Merge tag 'dmaengine-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58
59 #include "ivsrcid/ivsrcid_vislands30.h"
60
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88
89 #include "soc15_common.h"
90 #endif
91
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108
109 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111
112 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
117
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
120
121 /**
122  * DOC: overview
123  *
124  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126  * requests into DC requests, and DC responses into DRM responses.
127  *
128  * The root control structure is &struct amdgpu_display_manager.
129  */
130
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137         switch (link->dpcd_caps.dongle_type) {
138         case DISPLAY_DONGLE_NONE:
139                 return DRM_MODE_SUBCONNECTOR_Native;
140         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141                 return DRM_MODE_SUBCONNECTOR_VGA;
142         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143         case DISPLAY_DONGLE_DP_DVI_DONGLE:
144                 return DRM_MODE_SUBCONNECTOR_DVID;
145         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147                 return DRM_MODE_SUBCONNECTOR_HDMIA;
148         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149         default:
150                 return DRM_MODE_SUBCONNECTOR_Unknown;
151         }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_link *link = aconnector->dc_link;
157         struct drm_connector *connector = &aconnector->base;
158         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161                 return;
162
163         if (aconnector->dc_sink)
164                 subconnector = get_subconnector_type(link);
165
166         drm_object_property_set_value(&connector->base,
167                         connector->dev->mode_config.dp_subconnector_property,
168                         subconnector);
169 }
170
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183                                 struct drm_plane *plane,
184                                 unsigned long possible_crtcs,
185                                 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187                                struct drm_plane *plane,
188                                uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
191                                     uint32_t link_index,
192                                     struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194                                   struct amdgpu_encoder *aencoder,
195                                   uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
200                                    struct drm_atomic_state *state,
201                                    bool nonblock);
202
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206                                   struct drm_atomic_state *state);
207
208 static void handle_cursor_update(struct drm_plane *plane,
209                                  struct drm_plane_state *old_plane_state);
210
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 /*
221  * dm_vblank_get_counter
222  *
223  * @brief
224  * Get counter for number of vertical blanks
225  *
226  * @param
227  * struct amdgpu_device *adev - [in] desired amdgpu device
228  * int disp_idx - [in] which CRTC to get the counter from
229  *
230  * @return
231  * Counter for vertical blanks
232  */
233 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234 {
235         if (crtc >= adev->mode_info.num_crtc)
236                 return 0;
237         else {
238                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239
240                 if (acrtc->dm_irq_params.stream == NULL) {
241                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242                                   crtc);
243                         return 0;
244                 }
245
246                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
247         }
248 }
249
250 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
251                                   u32 *vbl, u32 *position)
252 {
253         uint32_t v_blank_start, v_blank_end, h_position, v_position;
254
255         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
256                 return -EINVAL;
257         else {
258                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259
260                 if (acrtc->dm_irq_params.stream ==  NULL) {
261                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262                                   crtc);
263                         return 0;
264                 }
265
266                 /*
267                  * TODO rework base driver to use values directly.
268                  * for now parse it back into reg-format
269                  */
270                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271                                          &v_blank_start,
272                                          &v_blank_end,
273                                          &h_position,
274                                          &v_position);
275
276                 *position = v_position | (h_position << 16);
277                 *vbl = v_blank_start | (v_blank_end << 16);
278         }
279
280         return 0;
281 }
282
283 static bool dm_is_idle(void *handle)
284 {
285         /* XXX todo */
286         return true;
287 }
288
289 static int dm_wait_for_idle(void *handle)
290 {
291         /* XXX todo */
292         return 0;
293 }
294
295 static bool dm_check_soft_reset(void *handle)
296 {
297         return false;
298 }
299
300 static int dm_soft_reset(void *handle)
301 {
302         /* XXX todo */
303         return 0;
304 }
305
306 static struct amdgpu_crtc *
307 get_crtc_by_otg_inst(struct amdgpu_device *adev,
308                      int otg_inst)
309 {
310         struct drm_device *dev = adev_to_drm(adev);
311         struct drm_crtc *crtc;
312         struct amdgpu_crtc *amdgpu_crtc;
313
314         if (otg_inst == -1) {
315                 WARN_ON(1);
316                 return adev->mode_info.crtcs[0];
317         }
318
319         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
320                 amdgpu_crtc = to_amdgpu_crtc(crtc);
321
322                 if (amdgpu_crtc->otg_inst == otg_inst)
323                         return amdgpu_crtc;
324         }
325
326         return NULL;
327 }
328
329 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 {
331         return acrtc->dm_irq_params.freesync_config.state ==
332                        VRR_STATE_ACTIVE_VARIABLE ||
333                acrtc->dm_irq_params.freesync_config.state ==
334                        VRR_STATE_ACTIVE_FIXED;
335 }
336
337 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 {
339         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
340                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
341 }
342
343 /**
344  * dm_pflip_high_irq() - Handle pageflip interrupt
345  * @interrupt_params: ignored
346  *
347  * Handles the pageflip interrupt by notifying all interested parties
348  * that the pageflip has been completed.
349  */
350 static void dm_pflip_high_irq(void *interrupt_params)
351 {
352         struct amdgpu_crtc *amdgpu_crtc;
353         struct common_irq_params *irq_params = interrupt_params;
354         struct amdgpu_device *adev = irq_params->adev;
355         unsigned long flags;
356         struct drm_pending_vblank_event *e;
357         uint32_t vpos, hpos, v_blank_start, v_blank_end;
358         bool vrr_active;
359
360         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
361
362         /* IRQ could occur when in initial stage */
363         /* TODO work and BO cleanup */
364         if (amdgpu_crtc == NULL) {
365                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
366                 return;
367         }
368
369         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
370
371         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
372                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
373                                                  amdgpu_crtc->pflip_status,
374                                                  AMDGPU_FLIP_SUBMITTED,
375                                                  amdgpu_crtc->crtc_id,
376                                                  amdgpu_crtc);
377                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
378                 return;
379         }
380
381         /* page flip completed. */
382         e = amdgpu_crtc->event;
383         amdgpu_crtc->event = NULL;
384
385         if (!e)
386                 WARN_ON(1);
387
388         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
389
390         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
391         if (!vrr_active ||
392             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
393                                       &v_blank_end, &hpos, &vpos) ||
394             (vpos < v_blank_start)) {
395                 /* Update to correct count and vblank timestamp if racing with
396                  * vblank irq. This also updates to the correct vblank timestamp
397                  * even in VRR mode, as scanout is past the front-porch atm.
398                  */
399                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
400
401                 /* Wake up userspace by sending the pageflip event with proper
402                  * count and timestamp of vblank of flip completion.
403                  */
404                 if (e) {
405                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
406
407                         /* Event sent, so done with vblank for this flip */
408                         drm_crtc_vblank_put(&amdgpu_crtc->base);
409                 }
410         } else if (e) {
411                 /* VRR active and inside front-porch: vblank count and
412                  * timestamp for pageflip event will only be up to date after
413                  * drm_crtc_handle_vblank() has been executed from late vblank
414                  * irq handler after start of back-porch (vline 0). We queue the
415                  * pageflip event for send-out by drm_crtc_handle_vblank() with
416                  * updated timestamp and count, once it runs after us.
417                  *
418                  * We need to open-code this instead of using the helper
419                  * drm_crtc_arm_vblank_event(), as that helper would
420                  * call drm_crtc_accurate_vblank_count(), which we must
421                  * not call in VRR mode while we are in front-porch!
422                  */
423
424                 /* sequence will be replaced by real count during send-out. */
425                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
426                 e->pipe = amdgpu_crtc->crtc_id;
427
428                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
429                 e = NULL;
430         }
431
432         /* Keep track of vblank of this flip for flip throttling. We use the
433          * cooked hw counter, as that one incremented at start of this vblank
434          * of pageflip completion, so last_flip_vblank is the forbidden count
435          * for queueing new pageflips if vsync + VRR is enabled.
436          */
437         amdgpu_crtc->dm_irq_params.last_flip_vblank =
438                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
439
440         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
441         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
442
443         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
444                          amdgpu_crtc->crtc_id, amdgpu_crtc,
445                          vrr_active, (int) !e);
446 }
447
448 static void dm_vupdate_high_irq(void *interrupt_params)
449 {
450         struct common_irq_params *irq_params = interrupt_params;
451         struct amdgpu_device *adev = irq_params->adev;
452         struct amdgpu_crtc *acrtc;
453         unsigned long flags;
454         int vrr_active;
455
456         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
457
458         if (acrtc) {
459                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
460
461                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
462                               acrtc->crtc_id,
463                               vrr_active);
464
465                 /* Core vblank handling is done here after end of front-porch in
466                  * vrr mode, as vblank timestamping will give valid results
467                  * while now done after front-porch. This will also deliver
468                  * page-flip completion events that have been queued to us
469                  * if a pageflip happened inside front-porch.
470                  */
471                 if (vrr_active) {
472                         drm_crtc_handle_vblank(&acrtc->base);
473
474                         /* BTR processing for pre-DCE12 ASICs */
475                         if (acrtc->dm_irq_params.stream &&
476                             adev->family < AMDGPU_FAMILY_AI) {
477                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
478                                 mod_freesync_handle_v_update(
479                                     adev->dm.freesync_module,
480                                     acrtc->dm_irq_params.stream,
481                                     &acrtc->dm_irq_params.vrr_params);
482
483                                 dc_stream_adjust_vmin_vmax(
484                                     adev->dm.dc,
485                                     acrtc->dm_irq_params.stream,
486                                     &acrtc->dm_irq_params.vrr_params.adjust);
487                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
488                         }
489                 }
490         }
491 }
492
493 /**
494  * dm_crtc_high_irq() - Handles CRTC interrupt
495  * @interrupt_params: used for determining the CRTC instance
496  *
497  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
498  * event handler.
499  */
500 static void dm_crtc_high_irq(void *interrupt_params)
501 {
502         struct common_irq_params *irq_params = interrupt_params;
503         struct amdgpu_device *adev = irq_params->adev;
504         struct amdgpu_crtc *acrtc;
505         unsigned long flags;
506         int vrr_active;
507
508         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
509         if (!acrtc)
510                 return;
511
512         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
513
514         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
515                       vrr_active, acrtc->dm_irq_params.active_planes);
516
517         /**
518          * Core vblank handling at start of front-porch is only possible
519          * in non-vrr mode, as only there vblank timestamping will give
520          * valid results while done in front-porch. Otherwise defer it
521          * to dm_vupdate_high_irq after end of front-porch.
522          */
523         if (!vrr_active)
524                 drm_crtc_handle_vblank(&acrtc->base);
525
526         /**
527          * Following stuff must happen at start of vblank, for crc
528          * computation and below-the-range btr support in vrr mode.
529          */
530         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
531
532         /* BTR updates need to happen before VUPDATE on Vega and above. */
533         if (adev->family < AMDGPU_FAMILY_AI)
534                 return;
535
536         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
537
538         if (acrtc->dm_irq_params.stream &&
539             acrtc->dm_irq_params.vrr_params.supported &&
540             acrtc->dm_irq_params.freesync_config.state ==
541                     VRR_STATE_ACTIVE_VARIABLE) {
542                 mod_freesync_handle_v_update(adev->dm.freesync_module,
543                                              acrtc->dm_irq_params.stream,
544                                              &acrtc->dm_irq_params.vrr_params);
545
546                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
547                                            &acrtc->dm_irq_params.vrr_params.adjust);
548         }
549
550         /*
551          * If there aren't any active_planes then DCH HUBP may be clock-gated.
552          * In that case, pageflip completion interrupts won't fire and pageflip
553          * completion events won't get delivered. Prevent this by sending
554          * pending pageflip events from here if a flip is still pending.
555          *
556          * If any planes are enabled, use dm_pflip_high_irq() instead, to
557          * avoid race conditions between flip programming and completion,
558          * which could cause too early flip completion events.
559          */
560         if (adev->family >= AMDGPU_FAMILY_RV &&
561             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
562             acrtc->dm_irq_params.active_planes == 0) {
563                 if (acrtc->event) {
564                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
565                         acrtc->event = NULL;
566                         drm_crtc_vblank_put(&acrtc->base);
567                 }
568                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
569         }
570
571         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
572 }
573
574 static int dm_set_clockgating_state(void *handle,
575                   enum amd_clockgating_state state)
576 {
577         return 0;
578 }
579
580 static int dm_set_powergating_state(void *handle,
581                   enum amd_powergating_state state)
582 {
583         return 0;
584 }
585
586 /* Prototypes of private functions */
587 static int dm_early_init(void* handle);
588
589 /* Allocate memory for FBC compressed data  */
590 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
591 {
592         struct drm_device *dev = connector->dev;
593         struct amdgpu_device *adev = drm_to_adev(dev);
594         struct dm_compressor_info *compressor = &adev->dm.compressor;
595         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
596         struct drm_display_mode *mode;
597         unsigned long max_size = 0;
598
599         if (adev->dm.dc->fbc_compressor == NULL)
600                 return;
601
602         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
603                 return;
604
605         if (compressor->bo_ptr)
606                 return;
607
608
609         list_for_each_entry(mode, &connector->modes, head) {
610                 if (max_size < mode->htotal * mode->vtotal)
611                         max_size = mode->htotal * mode->vtotal;
612         }
613
614         if (max_size) {
615                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
616                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
617                             &compressor->gpu_addr, &compressor->cpu_addr);
618
619                 if (r)
620                         DRM_ERROR("DM: Failed to initialize FBC\n");
621                 else {
622                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
623                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
624                 }
625
626         }
627
628 }
629
630 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
631                                           int pipe, bool *enabled,
632                                           unsigned char *buf, int max_bytes)
633 {
634         struct drm_device *dev = dev_get_drvdata(kdev);
635         struct amdgpu_device *adev = drm_to_adev(dev);
636         struct drm_connector *connector;
637         struct drm_connector_list_iter conn_iter;
638         struct amdgpu_dm_connector *aconnector;
639         int ret = 0;
640
641         *enabled = false;
642
643         mutex_lock(&adev->dm.audio_lock);
644
645         drm_connector_list_iter_begin(dev, &conn_iter);
646         drm_for_each_connector_iter(connector, &conn_iter) {
647                 aconnector = to_amdgpu_dm_connector(connector);
648                 if (aconnector->audio_inst != port)
649                         continue;
650
651                 *enabled = true;
652                 ret = drm_eld_size(connector->eld);
653                 memcpy(buf, connector->eld, min(max_bytes, ret));
654
655                 break;
656         }
657         drm_connector_list_iter_end(&conn_iter);
658
659         mutex_unlock(&adev->dm.audio_lock);
660
661         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662
663         return ret;
664 }
665
666 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
667         .get_eld = amdgpu_dm_audio_component_get_eld,
668 };
669
670 static int amdgpu_dm_audio_component_bind(struct device *kdev,
671                                        struct device *hda_kdev, void *data)
672 {
673         struct drm_device *dev = dev_get_drvdata(kdev);
674         struct amdgpu_device *adev = drm_to_adev(dev);
675         struct drm_audio_component *acomp = data;
676
677         acomp->ops = &amdgpu_dm_audio_component_ops;
678         acomp->dev = kdev;
679         adev->dm.audio_component = acomp;
680
681         return 0;
682 }
683
684 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
685                                           struct device *hda_kdev, void *data)
686 {
687         struct drm_device *dev = dev_get_drvdata(kdev);
688         struct amdgpu_device *adev = drm_to_adev(dev);
689         struct drm_audio_component *acomp = data;
690
691         acomp->ops = NULL;
692         acomp->dev = NULL;
693         adev->dm.audio_component = NULL;
694 }
695
696 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
697         .bind   = amdgpu_dm_audio_component_bind,
698         .unbind = amdgpu_dm_audio_component_unbind,
699 };
700
701 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
702 {
703         int i, ret;
704
705         if (!amdgpu_audio)
706                 return 0;
707
708         adev->mode_info.audio.enabled = true;
709
710         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711
712         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
713                 adev->mode_info.audio.pin[i].channels = -1;
714                 adev->mode_info.audio.pin[i].rate = -1;
715                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
716                 adev->mode_info.audio.pin[i].status_bits = 0;
717                 adev->mode_info.audio.pin[i].category_code = 0;
718                 adev->mode_info.audio.pin[i].connected = false;
719                 adev->mode_info.audio.pin[i].id =
720                         adev->dm.dc->res_pool->audios[i]->inst;
721                 adev->mode_info.audio.pin[i].offset = 0;
722         }
723
724         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
725         if (ret < 0)
726                 return ret;
727
728         adev->dm.audio_registered = true;
729
730         return 0;
731 }
732
733 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734 {
735         if (!amdgpu_audio)
736                 return;
737
738         if (!adev->mode_info.audio.enabled)
739                 return;
740
741         if (adev->dm.audio_registered) {
742                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
743                 adev->dm.audio_registered = false;
744         }
745
746         /* TODO: Disable audio? */
747
748         adev->mode_info.audio.enabled = false;
749 }
750
751 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
752 {
753         struct drm_audio_component *acomp = adev->dm.audio_component;
754
755         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
756                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757
758                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759                                                  pin, -1);
760         }
761 }
762
763 static int dm_dmub_hw_init(struct amdgpu_device *adev)
764 {
765         const struct dmcub_firmware_header_v1_0 *hdr;
766         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
767         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
768         const struct firmware *dmub_fw = adev->dm.dmub_fw;
769         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
770         struct abm *abm = adev->dm.dc->res_pool->abm;
771         struct dmub_srv_hw_params hw_params;
772         enum dmub_status status;
773         const unsigned char *fw_inst_const, *fw_bss_data;
774         uint32_t i, fw_inst_const_size, fw_bss_data_size;
775         bool has_hw_support;
776
777         if (!dmub_srv)
778                 /* DMUB isn't supported on the ASIC. */
779                 return 0;
780
781         if (!fb_info) {
782                 DRM_ERROR("No framebuffer info for DMUB service.\n");
783                 return -EINVAL;
784         }
785
786         if (!dmub_fw) {
787                 /* Firmware required for DMUB support. */
788                 DRM_ERROR("No firmware provided for DMUB.\n");
789                 return -EINVAL;
790         }
791
792         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
793         if (status != DMUB_STATUS_OK) {
794                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
795                 return -EINVAL;
796         }
797
798         if (!has_hw_support) {
799                 DRM_INFO("DMUB unsupported on ASIC\n");
800                 return 0;
801         }
802
803         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804
805         fw_inst_const = dmub_fw->data +
806                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807                         PSP_HEADER_BYTES;
808
809         fw_bss_data = dmub_fw->data +
810                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
811                       le32_to_cpu(hdr->inst_const_bytes);
812
813         /* Copy firmware and bios info into FB memory. */
814         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
815                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816
817         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818
819         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
820          * amdgpu_ucode_init_single_fw will load dmub firmware
821          * fw_inst_const part to cw0; otherwise, the firmware back door load
822          * will be done by dm_dmub_hw_init
823          */
824         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
825                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
826                                 fw_inst_const_size);
827         }
828
829         if (fw_bss_data_size)
830                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
831                        fw_bss_data, fw_bss_data_size);
832
833         /* Copy firmware bios info into FB memory. */
834         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
835                adev->bios_size);
836
837         /* Reset regions that need to be reset. */
838         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
839         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
840
841         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
842                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
843
844         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
845                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
846
847         /* Initialize hardware. */
848         memset(&hw_params, 0, sizeof(hw_params));
849         hw_params.fb_base = adev->gmc.fb_start;
850         hw_params.fb_offset = adev->gmc.aper_base;
851
852         /* backdoor load firmware and trigger dmub running */
853         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
854                 hw_params.load_inst_const = true;
855
856         if (dmcu)
857                 hw_params.psp_version = dmcu->psp_version;
858
859         for (i = 0; i < fb_info->num_fb; ++i)
860                 hw_params.fb[i] = &fb_info->fb[i];
861
862         status = dmub_srv_hw_init(dmub_srv, &hw_params);
863         if (status != DMUB_STATUS_OK) {
864                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
865                 return -EINVAL;
866         }
867
868         /* Wait for firmware load to finish. */
869         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
870         if (status != DMUB_STATUS_OK)
871                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
872
873         /* Init DMCU and ABM if available. */
874         if (dmcu && abm) {
875                 dmcu->funcs->dmcu_init(dmcu);
876                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
877         }
878
879         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
880         if (!adev->dm.dc->ctx->dmub_srv) {
881                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
882                 return -ENOMEM;
883         }
884
885         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
886                  adev->dm.dmcub_fw_version);
887
888         return 0;
889 }
890
891 #if defined(CONFIG_DRM_AMD_DC_DCN)
892 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
893 {
894         uint64_t pt_base;
895         uint32_t logical_addr_low;
896         uint32_t logical_addr_high;
897         uint32_t agp_base, agp_bot, agp_top;
898         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
899
900         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
901         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
902
903         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
904                 /*
905                  * Raven2 has a HW issue that it is unable to use the vram which
906                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
907                  * workaround that increase system aperture high address (add 1)
908                  * to get rid of the VM fault and hardware hang.
909                  */
910                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
911         else
912                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
913
914         agp_base = 0;
915         agp_bot = adev->gmc.agp_start >> 24;
916         agp_top = adev->gmc.agp_end >> 24;
917
918
919         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
920         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
921         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
922         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
923         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
924         page_table_base.low_part = lower_32_bits(pt_base);
925
926         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
927         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
928
929         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
930         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
931         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
932
933         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
934         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
935         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
936
937         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
938         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
939         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
940
941         pa_config->is_hvm_enabled = 0;
942
943 }
944 #endif
945
946 #ifdef CONFIG_DEBUG_FS
947 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
948 {
949         dm->crc_win_x_start_property =
950                 drm_property_create_range(adev_to_drm(dm->adev),
951                                           DRM_MODE_PROP_ATOMIC,
952                                           "AMD_CRC_WIN_X_START", 0, U16_MAX);
953         if (!dm->crc_win_x_start_property)
954                 return -ENOMEM;
955
956         dm->crc_win_y_start_property =
957                 drm_property_create_range(adev_to_drm(dm->adev),
958                                           DRM_MODE_PROP_ATOMIC,
959                                           "AMD_CRC_WIN_Y_START", 0, U16_MAX);
960         if (!dm->crc_win_y_start_property)
961                 return -ENOMEM;
962
963         dm->crc_win_x_end_property =
964                 drm_property_create_range(adev_to_drm(dm->adev),
965                                           DRM_MODE_PROP_ATOMIC,
966                                           "AMD_CRC_WIN_X_END", 0, U16_MAX);
967         if (!dm->crc_win_x_end_property)
968                 return -ENOMEM;
969
970         dm->crc_win_y_end_property =
971                 drm_property_create_range(adev_to_drm(dm->adev),
972                                           DRM_MODE_PROP_ATOMIC,
973                                           "AMD_CRC_WIN_Y_END", 0, U16_MAX);
974         if (!dm->crc_win_y_end_property)
975                 return -ENOMEM;
976
977         return 0;
978 }
979 #endif
980
981 static int amdgpu_dm_init(struct amdgpu_device *adev)
982 {
983         struct dc_init_data init_data;
984 #ifdef CONFIG_DRM_AMD_DC_HDCP
985         struct dc_callback_init init_params;
986 #endif
987         int r;
988
989         adev->dm.ddev = adev_to_drm(adev);
990         adev->dm.adev = adev;
991
992         /* Zero all the fields */
993         memset(&init_data, 0, sizeof(init_data));
994 #ifdef CONFIG_DRM_AMD_DC_HDCP
995         memset(&init_params, 0, sizeof(init_params));
996 #endif
997
998         mutex_init(&adev->dm.dc_lock);
999         mutex_init(&adev->dm.audio_lock);
1000
1001         if(amdgpu_dm_irq_init(adev)) {
1002                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1003                 goto error;
1004         }
1005
1006         init_data.asic_id.chip_family = adev->family;
1007
1008         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1009         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1010
1011         init_data.asic_id.vram_width = adev->gmc.vram_width;
1012         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1013         init_data.asic_id.atombios_base_address =
1014                 adev->mode_info.atom_context->bios;
1015
1016         init_data.driver = adev;
1017
1018         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1019
1020         if (!adev->dm.cgs_device) {
1021                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1022                 goto error;
1023         }
1024
1025         init_data.cgs_device = adev->dm.cgs_device;
1026
1027         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1028
1029         switch (adev->asic_type) {
1030         case CHIP_CARRIZO:
1031         case CHIP_STONEY:
1032         case CHIP_RAVEN:
1033         case CHIP_RENOIR:
1034                 init_data.flags.gpu_vm_support = true;
1035                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1036                         init_data.flags.disable_dmcu = true;
1037                 break;
1038 #if defined(CONFIG_DRM_AMD_DC_DCN)
1039         case CHIP_VANGOGH:
1040                 init_data.flags.gpu_vm_support = true;
1041                 break;
1042 #endif
1043         default:
1044                 break;
1045         }
1046
1047         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1048                 init_data.flags.fbc_support = true;
1049
1050         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1051                 init_data.flags.multi_mon_pp_mclk_switch = true;
1052
1053         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1054                 init_data.flags.disable_fractional_pwm = true;
1055
1056         init_data.flags.power_down_display_on_boot = true;
1057
1058         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1059
1060         /* Display Core create. */
1061         adev->dm.dc = dc_create(&init_data);
1062
1063         if (adev->dm.dc) {
1064                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1065         } else {
1066                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1067                 goto error;
1068         }
1069
1070         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1071                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1072                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1073         }
1074
1075         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1076                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1077
1078         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1079                 adev->dm.dc->debug.disable_stutter = true;
1080
1081         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1082                 adev->dm.dc->debug.disable_dsc = true;
1083
1084         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1085                 adev->dm.dc->debug.disable_clock_gate = true;
1086
1087         r = dm_dmub_hw_init(adev);
1088         if (r) {
1089                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1090                 goto error;
1091         }
1092
1093         dc_hardware_init(adev->dm.dc);
1094
1095 #if defined(CONFIG_DRM_AMD_DC_DCN)
1096         if (adev->apu_flags) {
1097                 struct dc_phy_addr_space_config pa_config;
1098
1099                 mmhub_read_system_context(adev, &pa_config);
1100
1101                 // Call the DC init_memory func
1102                 dc_setup_system_context(adev->dm.dc, &pa_config);
1103         }
1104 #endif
1105
1106         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1107         if (!adev->dm.freesync_module) {
1108                 DRM_ERROR(
1109                 "amdgpu: failed to initialize freesync_module.\n");
1110         } else
1111                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1112                                 adev->dm.freesync_module);
1113
1114         amdgpu_dm_init_color_mod();
1115
1116 #ifdef CONFIG_DRM_AMD_DC_HDCP
1117         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1118                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1119
1120                 if (!adev->dm.hdcp_workqueue)
1121                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1122                 else
1123                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1124
1125                 dc_init_callbacks(adev->dm.dc, &init_params);
1126         }
1127 #endif
1128 #ifdef CONFIG_DEBUG_FS
1129         if (create_crtc_crc_properties(&adev->dm))
1130                 DRM_ERROR("amdgpu: failed to create crc property.\n");
1131 #endif
1132         if (amdgpu_dm_initialize_drm_device(adev)) {
1133                 DRM_ERROR(
1134                 "amdgpu: failed to initialize sw for display support.\n");
1135                 goto error;
1136         }
1137
1138         /* create fake encoders for MST */
1139         dm_dp_create_fake_mst_encoders(adev);
1140
1141         /* TODO: Add_display_info? */
1142
1143         /* TODO use dynamic cursor width */
1144         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1145         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1146
1147         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1148                 DRM_ERROR(
1149                 "amdgpu: failed to initialize sw for display support.\n");
1150                 goto error;
1151         }
1152
1153
1154         DRM_DEBUG_DRIVER("KMS initialized.\n");
1155
1156         return 0;
1157 error:
1158         amdgpu_dm_fini(adev);
1159
1160         return -EINVAL;
1161 }
1162
1163 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1164 {
1165         int i;
1166
1167         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1168                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1169         }
1170
1171         amdgpu_dm_audio_fini(adev);
1172
1173         amdgpu_dm_destroy_drm_device(&adev->dm);
1174
1175 #ifdef CONFIG_DRM_AMD_DC_HDCP
1176         if (adev->dm.hdcp_workqueue) {
1177                 hdcp_destroy(adev->dm.hdcp_workqueue);
1178                 adev->dm.hdcp_workqueue = NULL;
1179         }
1180
1181         if (adev->dm.dc)
1182                 dc_deinit_callbacks(adev->dm.dc);
1183 #endif
1184         if (adev->dm.dc->ctx->dmub_srv) {
1185                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1186                 adev->dm.dc->ctx->dmub_srv = NULL;
1187         }
1188
1189         if (adev->dm.dmub_bo)
1190                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1191                                       &adev->dm.dmub_bo_gpu_addr,
1192                                       &adev->dm.dmub_bo_cpu_addr);
1193
1194         /* DC Destroy TODO: Replace destroy DAL */
1195         if (adev->dm.dc)
1196                 dc_destroy(&adev->dm.dc);
1197         /*
1198          * TODO: pageflip, vlank interrupt
1199          *
1200          * amdgpu_dm_irq_fini(adev);
1201          */
1202
1203         if (adev->dm.cgs_device) {
1204                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1205                 adev->dm.cgs_device = NULL;
1206         }
1207         if (adev->dm.freesync_module) {
1208                 mod_freesync_destroy(adev->dm.freesync_module);
1209                 adev->dm.freesync_module = NULL;
1210         }
1211
1212         mutex_destroy(&adev->dm.audio_lock);
1213         mutex_destroy(&adev->dm.dc_lock);
1214
1215         return;
1216 }
1217
1218 static int load_dmcu_fw(struct amdgpu_device *adev)
1219 {
1220         const char *fw_name_dmcu = NULL;
1221         int r;
1222         const struct dmcu_firmware_header_v1_0 *hdr;
1223
1224         switch(adev->asic_type) {
1225 #if defined(CONFIG_DRM_AMD_DC_SI)
1226         case CHIP_TAHITI:
1227         case CHIP_PITCAIRN:
1228         case CHIP_VERDE:
1229         case CHIP_OLAND:
1230 #endif
1231         case CHIP_BONAIRE:
1232         case CHIP_HAWAII:
1233         case CHIP_KAVERI:
1234         case CHIP_KABINI:
1235         case CHIP_MULLINS:
1236         case CHIP_TONGA:
1237         case CHIP_FIJI:
1238         case CHIP_CARRIZO:
1239         case CHIP_STONEY:
1240         case CHIP_POLARIS11:
1241         case CHIP_POLARIS10:
1242         case CHIP_POLARIS12:
1243         case CHIP_VEGAM:
1244         case CHIP_VEGA10:
1245         case CHIP_VEGA12:
1246         case CHIP_VEGA20:
1247         case CHIP_NAVI10:
1248         case CHIP_NAVI14:
1249         case CHIP_RENOIR:
1250         case CHIP_SIENNA_CICHLID:
1251         case CHIP_NAVY_FLOUNDER:
1252         case CHIP_DIMGREY_CAVEFISH:
1253         case CHIP_VANGOGH:
1254                 return 0;
1255         case CHIP_NAVI12:
1256                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1257                 break;
1258         case CHIP_RAVEN:
1259                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1260                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1261                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1262                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1263                 else
1264                         return 0;
1265                 break;
1266         default:
1267                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1268                 return -EINVAL;
1269         }
1270
1271         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1272                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1273                 return 0;
1274         }
1275
1276         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1277         if (r == -ENOENT) {
1278                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1279                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1280                 adev->dm.fw_dmcu = NULL;
1281                 return 0;
1282         }
1283         if (r) {
1284                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1285                         fw_name_dmcu);
1286                 return r;
1287         }
1288
1289         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1290         if (r) {
1291                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1292                         fw_name_dmcu);
1293                 release_firmware(adev->dm.fw_dmcu);
1294                 adev->dm.fw_dmcu = NULL;
1295                 return r;
1296         }
1297
1298         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1299         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1300         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1301         adev->firmware.fw_size +=
1302                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1303
1304         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1305         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1306         adev->firmware.fw_size +=
1307                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1308
1309         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1310
1311         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1312
1313         return 0;
1314 }
1315
1316 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1317 {
1318         struct amdgpu_device *adev = ctx;
1319
1320         return dm_read_reg(adev->dm.dc->ctx, address);
1321 }
1322
1323 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1324                                      uint32_t value)
1325 {
1326         struct amdgpu_device *adev = ctx;
1327
1328         return dm_write_reg(adev->dm.dc->ctx, address, value);
1329 }
1330
1331 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1332 {
1333         struct dmub_srv_create_params create_params;
1334         struct dmub_srv_region_params region_params;
1335         struct dmub_srv_region_info region_info;
1336         struct dmub_srv_fb_params fb_params;
1337         struct dmub_srv_fb_info *fb_info;
1338         struct dmub_srv *dmub_srv;
1339         const struct dmcub_firmware_header_v1_0 *hdr;
1340         const char *fw_name_dmub;
1341         enum dmub_asic dmub_asic;
1342         enum dmub_status status;
1343         int r;
1344
1345         switch (adev->asic_type) {
1346         case CHIP_RENOIR:
1347                 dmub_asic = DMUB_ASIC_DCN21;
1348                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1349                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1350                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1351                 break;
1352         case CHIP_SIENNA_CICHLID:
1353                 dmub_asic = DMUB_ASIC_DCN30;
1354                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1355                 break;
1356         case CHIP_NAVY_FLOUNDER:
1357                 dmub_asic = DMUB_ASIC_DCN30;
1358                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1359                 break;
1360         case CHIP_VANGOGH:
1361                 dmub_asic = DMUB_ASIC_DCN301;
1362                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1363                 break;
1364         case CHIP_DIMGREY_CAVEFISH:
1365                 dmub_asic = DMUB_ASIC_DCN302;
1366                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1367                 break;
1368
1369         default:
1370                 /* ASIC doesn't support DMUB. */
1371                 return 0;
1372         }
1373
1374         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1375         if (r) {
1376                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1377                 return 0;
1378         }
1379
1380         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1381         if (r) {
1382                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1383                 return 0;
1384         }
1385
1386         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1387
1388         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1389                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1390                         AMDGPU_UCODE_ID_DMCUB;
1391                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1392                         adev->dm.dmub_fw;
1393                 adev->firmware.fw_size +=
1394                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1395
1396                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1397                          adev->dm.dmcub_fw_version);
1398         }
1399
1400         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1401
1402         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1403         dmub_srv = adev->dm.dmub_srv;
1404
1405         if (!dmub_srv) {
1406                 DRM_ERROR("Failed to allocate DMUB service!\n");
1407                 return -ENOMEM;
1408         }
1409
1410         memset(&create_params, 0, sizeof(create_params));
1411         create_params.user_ctx = adev;
1412         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1413         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1414         create_params.asic = dmub_asic;
1415
1416         /* Create the DMUB service. */
1417         status = dmub_srv_create(dmub_srv, &create_params);
1418         if (status != DMUB_STATUS_OK) {
1419                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1420                 return -EINVAL;
1421         }
1422
1423         /* Calculate the size of all the regions for the DMUB service. */
1424         memset(&region_params, 0, sizeof(region_params));
1425
1426         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1427                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1428         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1429         region_params.vbios_size = adev->bios_size;
1430         region_params.fw_bss_data = region_params.bss_data_size ?
1431                 adev->dm.dmub_fw->data +
1432                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1433                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1434         region_params.fw_inst_const =
1435                 adev->dm.dmub_fw->data +
1436                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1437                 PSP_HEADER_BYTES;
1438
1439         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1440                                            &region_info);
1441
1442         if (status != DMUB_STATUS_OK) {
1443                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1444                 return -EINVAL;
1445         }
1446
1447         /*
1448          * Allocate a framebuffer based on the total size of all the regions.
1449          * TODO: Move this into GART.
1450          */
1451         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1452                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1453                                     &adev->dm.dmub_bo_gpu_addr,
1454                                     &adev->dm.dmub_bo_cpu_addr);
1455         if (r)
1456                 return r;
1457
1458         /* Rebase the regions on the framebuffer address. */
1459         memset(&fb_params, 0, sizeof(fb_params));
1460         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1461         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1462         fb_params.region_info = &region_info;
1463
1464         adev->dm.dmub_fb_info =
1465                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1466         fb_info = adev->dm.dmub_fb_info;
1467
1468         if (!fb_info) {
1469                 DRM_ERROR(
1470                         "Failed to allocate framebuffer info for DMUB service!\n");
1471                 return -ENOMEM;
1472         }
1473
1474         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1475         if (status != DMUB_STATUS_OK) {
1476                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1477                 return -EINVAL;
1478         }
1479
1480         return 0;
1481 }
1482
1483 static int dm_sw_init(void *handle)
1484 {
1485         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1486         int r;
1487
1488         r = dm_dmub_sw_init(adev);
1489         if (r)
1490                 return r;
1491
1492         return load_dmcu_fw(adev);
1493 }
1494
1495 static int dm_sw_fini(void *handle)
1496 {
1497         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1498
1499         kfree(adev->dm.dmub_fb_info);
1500         adev->dm.dmub_fb_info = NULL;
1501
1502         if (adev->dm.dmub_srv) {
1503                 dmub_srv_destroy(adev->dm.dmub_srv);
1504                 adev->dm.dmub_srv = NULL;
1505         }
1506
1507         release_firmware(adev->dm.dmub_fw);
1508         adev->dm.dmub_fw = NULL;
1509
1510         release_firmware(adev->dm.fw_dmcu);
1511         adev->dm.fw_dmcu = NULL;
1512
1513         return 0;
1514 }
1515
1516 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1517 {
1518         struct amdgpu_dm_connector *aconnector;
1519         struct drm_connector *connector;
1520         struct drm_connector_list_iter iter;
1521         int ret = 0;
1522
1523         drm_connector_list_iter_begin(dev, &iter);
1524         drm_for_each_connector_iter(connector, &iter) {
1525                 aconnector = to_amdgpu_dm_connector(connector);
1526                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1527                     aconnector->mst_mgr.aux) {
1528                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1529                                          aconnector,
1530                                          aconnector->base.base.id);
1531
1532                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1533                         if (ret < 0) {
1534                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1535                                 aconnector->dc_link->type =
1536                                         dc_connection_single;
1537                                 break;
1538                         }
1539                 }
1540         }
1541         drm_connector_list_iter_end(&iter);
1542
1543         return ret;
1544 }
1545
1546 static int dm_late_init(void *handle)
1547 {
1548         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1549
1550         struct dmcu_iram_parameters params;
1551         unsigned int linear_lut[16];
1552         int i;
1553         struct dmcu *dmcu = NULL;
1554         bool ret = true;
1555
1556         dmcu = adev->dm.dc->res_pool->dmcu;
1557
1558         for (i = 0; i < 16; i++)
1559                 linear_lut[i] = 0xFFFF * i / 15;
1560
1561         params.set = 0;
1562         params.backlight_ramping_start = 0xCCCC;
1563         params.backlight_ramping_reduction = 0xCCCCCCCC;
1564         params.backlight_lut_array_size = 16;
1565         params.backlight_lut_array = linear_lut;
1566
1567         /* Min backlight level after ABM reduction,  Don't allow below 1%
1568          * 0xFFFF x 0.01 = 0x28F
1569          */
1570         params.min_abm_backlight = 0x28F;
1571
1572         /* In the case where abm is implemented on dmcub,
1573          * dmcu object will be null.
1574          * ABM 2.4 and up are implemented on dmcub.
1575          */
1576         if (dmcu)
1577                 ret = dmcu_load_iram(dmcu, params);
1578         else if (adev->dm.dc->ctx->dmub_srv)
1579                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1580
1581         if (!ret)
1582                 return -EINVAL;
1583
1584         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1585 }
1586
1587 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1588 {
1589         struct amdgpu_dm_connector *aconnector;
1590         struct drm_connector *connector;
1591         struct drm_connector_list_iter iter;
1592         struct drm_dp_mst_topology_mgr *mgr;
1593         int ret;
1594         bool need_hotplug = false;
1595
1596         drm_connector_list_iter_begin(dev, &iter);
1597         drm_for_each_connector_iter(connector, &iter) {
1598                 aconnector = to_amdgpu_dm_connector(connector);
1599                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1600                     aconnector->mst_port)
1601                         continue;
1602
1603                 mgr = &aconnector->mst_mgr;
1604
1605                 if (suspend) {
1606                         drm_dp_mst_topology_mgr_suspend(mgr);
1607                 } else {
1608                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1609                         if (ret < 0) {
1610                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1611                                 need_hotplug = true;
1612                         }
1613                 }
1614         }
1615         drm_connector_list_iter_end(&iter);
1616
1617         if (need_hotplug)
1618                 drm_kms_helper_hotplug_event(dev);
1619 }
1620
1621 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1622 {
1623         struct smu_context *smu = &adev->smu;
1624         int ret = 0;
1625
1626         if (!is_support_sw_smu(adev))
1627                 return 0;
1628
1629         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1630          * on window driver dc implementation.
1631          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1632          * should be passed to smu during boot up and resume from s3.
1633          * boot up: dc calculate dcn watermark clock settings within dc_create,
1634          * dcn20_resource_construct
1635          * then call pplib functions below to pass the settings to smu:
1636          * smu_set_watermarks_for_clock_ranges
1637          * smu_set_watermarks_table
1638          * navi10_set_watermarks_table
1639          * smu_write_watermarks_table
1640          *
1641          * For Renoir, clock settings of dcn watermark are also fixed values.
1642          * dc has implemented different flow for window driver:
1643          * dc_hardware_init / dc_set_power_state
1644          * dcn10_init_hw
1645          * notify_wm_ranges
1646          * set_wm_ranges
1647          * -- Linux
1648          * smu_set_watermarks_for_clock_ranges
1649          * renoir_set_watermarks_table
1650          * smu_write_watermarks_table
1651          *
1652          * For Linux,
1653          * dc_hardware_init -> amdgpu_dm_init
1654          * dc_set_power_state --> dm_resume
1655          *
1656          * therefore, this function apply to navi10/12/14 but not Renoir
1657          * *
1658          */
1659         switch(adev->asic_type) {
1660         case CHIP_NAVI10:
1661         case CHIP_NAVI14:
1662         case CHIP_NAVI12:
1663                 break;
1664         default:
1665                 return 0;
1666         }
1667
1668         ret = smu_write_watermarks_table(smu);
1669         if (ret) {
1670                 DRM_ERROR("Failed to update WMTABLE!\n");
1671                 return ret;
1672         }
1673
1674         return 0;
1675 }
1676
1677 /**
1678  * dm_hw_init() - Initialize DC device
1679  * @handle: The base driver device containing the amdgpu_dm device.
1680  *
1681  * Initialize the &struct amdgpu_display_manager device. This involves calling
1682  * the initializers of each DM component, then populating the struct with them.
1683  *
1684  * Although the function implies hardware initialization, both hardware and
1685  * software are initialized here. Splitting them out to their relevant init
1686  * hooks is a future TODO item.
1687  *
1688  * Some notable things that are initialized here:
1689  *
1690  * - Display Core, both software and hardware
1691  * - DC modules that we need (freesync and color management)
1692  * - DRM software states
1693  * - Interrupt sources and handlers
1694  * - Vblank support
1695  * - Debug FS entries, if enabled
1696  */
1697 static int dm_hw_init(void *handle)
1698 {
1699         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1700         /* Create DAL display manager */
1701         amdgpu_dm_init(adev);
1702         amdgpu_dm_hpd_init(adev);
1703
1704         return 0;
1705 }
1706
1707 /**
1708  * dm_hw_fini() - Teardown DC device
1709  * @handle: The base driver device containing the amdgpu_dm device.
1710  *
1711  * Teardown components within &struct amdgpu_display_manager that require
1712  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1713  * were loaded. Also flush IRQ workqueues and disable them.
1714  */
1715 static int dm_hw_fini(void *handle)
1716 {
1717         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1718
1719         amdgpu_dm_hpd_fini(adev);
1720
1721         amdgpu_dm_irq_fini(adev);
1722         amdgpu_dm_fini(adev);
1723         return 0;
1724 }
1725
1726
1727 static int dm_enable_vblank(struct drm_crtc *crtc);
1728 static void dm_disable_vblank(struct drm_crtc *crtc);
1729
1730 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1731                                  struct dc_state *state, bool enable)
1732 {
1733         enum dc_irq_source irq_source;
1734         struct amdgpu_crtc *acrtc;
1735         int rc = -EBUSY;
1736         int i = 0;
1737
1738         for (i = 0; i < state->stream_count; i++) {
1739                 acrtc = get_crtc_by_otg_inst(
1740                                 adev, state->stream_status[i].primary_otg_inst);
1741
1742                 if (acrtc && state->stream_status[i].plane_count != 0) {
1743                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1744                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1745                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1746                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1747                         if (rc)
1748                                 DRM_WARN("Failed to %s pflip interrupts\n",
1749                                          enable ? "enable" : "disable");
1750
1751                         if (enable) {
1752                                 rc = dm_enable_vblank(&acrtc->base);
1753                                 if (rc)
1754                                         DRM_WARN("Failed to enable vblank interrupts\n");
1755                         } else {
1756                                 dm_disable_vblank(&acrtc->base);
1757                         }
1758
1759                 }
1760         }
1761
1762 }
1763
1764 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1765 {
1766         struct dc_state *context = NULL;
1767         enum dc_status res = DC_ERROR_UNEXPECTED;
1768         int i;
1769         struct dc_stream_state *del_streams[MAX_PIPES];
1770         int del_streams_count = 0;
1771
1772         memset(del_streams, 0, sizeof(del_streams));
1773
1774         context = dc_create_state(dc);
1775         if (context == NULL)
1776                 goto context_alloc_fail;
1777
1778         dc_resource_state_copy_construct_current(dc, context);
1779
1780         /* First remove from context all streams */
1781         for (i = 0; i < context->stream_count; i++) {
1782                 struct dc_stream_state *stream = context->streams[i];
1783
1784                 del_streams[del_streams_count++] = stream;
1785         }
1786
1787         /* Remove all planes for removed streams and then remove the streams */
1788         for (i = 0; i < del_streams_count; i++) {
1789                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1790                         res = DC_FAIL_DETACH_SURFACES;
1791                         goto fail;
1792                 }
1793
1794                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1795                 if (res != DC_OK)
1796                         goto fail;
1797         }
1798
1799
1800         res = dc_validate_global_state(dc, context, false);
1801
1802         if (res != DC_OK) {
1803                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1804                 goto fail;
1805         }
1806
1807         res = dc_commit_state(dc, context);
1808
1809 fail:
1810         dc_release_state(context);
1811
1812 context_alloc_fail:
1813         return res;
1814 }
1815
1816 static int dm_suspend(void *handle)
1817 {
1818         struct amdgpu_device *adev = handle;
1819         struct amdgpu_display_manager *dm = &adev->dm;
1820         int ret = 0;
1821
1822         if (amdgpu_in_reset(adev)) {
1823                 mutex_lock(&dm->dc_lock);
1824                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1825
1826                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1827
1828                 amdgpu_dm_commit_zero_streams(dm->dc);
1829
1830                 amdgpu_dm_irq_suspend(adev);
1831
1832                 return ret;
1833         }
1834
1835         WARN_ON(adev->dm.cached_state);
1836         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1837
1838         s3_handle_mst(adev_to_drm(adev), true);
1839
1840         amdgpu_dm_irq_suspend(adev);
1841
1842
1843         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1844
1845         return 0;
1846 }
1847
1848 static struct amdgpu_dm_connector *
1849 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1850                                              struct drm_crtc *crtc)
1851 {
1852         uint32_t i;
1853         struct drm_connector_state *new_con_state;
1854         struct drm_connector *connector;
1855         struct drm_crtc *crtc_from_state;
1856
1857         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1858                 crtc_from_state = new_con_state->crtc;
1859
1860                 if (crtc_from_state == crtc)
1861                         return to_amdgpu_dm_connector(connector);
1862         }
1863
1864         return NULL;
1865 }
1866
1867 static void emulated_link_detect(struct dc_link *link)
1868 {
1869         struct dc_sink_init_data sink_init_data = { 0 };
1870         struct display_sink_capability sink_caps = { 0 };
1871         enum dc_edid_status edid_status;
1872         struct dc_context *dc_ctx = link->ctx;
1873         struct dc_sink *sink = NULL;
1874         struct dc_sink *prev_sink = NULL;
1875
1876         link->type = dc_connection_none;
1877         prev_sink = link->local_sink;
1878
1879         if (prev_sink != NULL)
1880                 dc_sink_retain(prev_sink);
1881
1882         switch (link->connector_signal) {
1883         case SIGNAL_TYPE_HDMI_TYPE_A: {
1884                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1885                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1886                 break;
1887         }
1888
1889         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1890                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1891                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1892                 break;
1893         }
1894
1895         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1896                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1897                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1898                 break;
1899         }
1900
1901         case SIGNAL_TYPE_LVDS: {
1902                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1903                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1904                 break;
1905         }
1906
1907         case SIGNAL_TYPE_EDP: {
1908                 sink_caps.transaction_type =
1909                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1910                 sink_caps.signal = SIGNAL_TYPE_EDP;
1911                 break;
1912         }
1913
1914         case SIGNAL_TYPE_DISPLAY_PORT: {
1915                 sink_caps.transaction_type =
1916                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1917                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1918                 break;
1919         }
1920
1921         default:
1922                 DC_ERROR("Invalid connector type! signal:%d\n",
1923                         link->connector_signal);
1924                 return;
1925         }
1926
1927         sink_init_data.link = link;
1928         sink_init_data.sink_signal = sink_caps.signal;
1929
1930         sink = dc_sink_create(&sink_init_data);
1931         if (!sink) {
1932                 DC_ERROR("Failed to create sink!\n");
1933                 return;
1934         }
1935
1936         /* dc_sink_create returns a new reference */
1937         link->local_sink = sink;
1938
1939         edid_status = dm_helpers_read_local_edid(
1940                         link->ctx,
1941                         link,
1942                         sink);
1943
1944         if (edid_status != EDID_OK)
1945                 DC_ERROR("Failed to read EDID");
1946
1947 }
1948
1949 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1950                                      struct amdgpu_display_manager *dm)
1951 {
1952         struct {
1953                 struct dc_surface_update surface_updates[MAX_SURFACES];
1954                 struct dc_plane_info plane_infos[MAX_SURFACES];
1955                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1956                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1957                 struct dc_stream_update stream_update;
1958         } * bundle;
1959         int k, m;
1960
1961         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1962
1963         if (!bundle) {
1964                 dm_error("Failed to allocate update bundle\n");
1965                 goto cleanup;
1966         }
1967
1968         for (k = 0; k < dc_state->stream_count; k++) {
1969                 bundle->stream_update.stream = dc_state->streams[k];
1970
1971                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1972                         bundle->surface_updates[m].surface =
1973                                 dc_state->stream_status->plane_states[m];
1974                         bundle->surface_updates[m].surface->force_full_update =
1975                                 true;
1976                 }
1977                 dc_commit_updates_for_stream(
1978                         dm->dc, bundle->surface_updates,
1979                         dc_state->stream_status->plane_count,
1980                         dc_state->streams[k], &bundle->stream_update, dc_state);
1981         }
1982
1983 cleanup:
1984         kfree(bundle);
1985
1986         return;
1987 }
1988
1989 static void dm_set_dpms_off(struct dc_link *link)
1990 {
1991         struct dc_stream_state *stream_state;
1992         struct amdgpu_dm_connector *aconnector = link->priv;
1993         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1994         struct dc_stream_update stream_update;
1995         bool dpms_off = true;
1996
1997         memset(&stream_update, 0, sizeof(stream_update));
1998         stream_update.dpms_off = &dpms_off;
1999
2000         mutex_lock(&adev->dm.dc_lock);
2001         stream_state = dc_stream_find_from_link(link);
2002
2003         if (stream_state == NULL) {
2004                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2005                 mutex_unlock(&adev->dm.dc_lock);
2006                 return;
2007         }
2008
2009         stream_update.stream = stream_state;
2010         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2011                                      stream_state, &stream_update,
2012                                      stream_state->ctx->dc->current_state);
2013         mutex_unlock(&adev->dm.dc_lock);
2014 }
2015
2016 static int dm_resume(void *handle)
2017 {
2018         struct amdgpu_device *adev = handle;
2019         struct drm_device *ddev = adev_to_drm(adev);
2020         struct amdgpu_display_manager *dm = &adev->dm;
2021         struct amdgpu_dm_connector *aconnector;
2022         struct drm_connector *connector;
2023         struct drm_connector_list_iter iter;
2024         struct drm_crtc *crtc;
2025         struct drm_crtc_state *new_crtc_state;
2026         struct dm_crtc_state *dm_new_crtc_state;
2027         struct drm_plane *plane;
2028         struct drm_plane_state *new_plane_state;
2029         struct dm_plane_state *dm_new_plane_state;
2030         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2031         enum dc_connection_type new_connection_type = dc_connection_none;
2032         struct dc_state *dc_state;
2033         int i, r, j;
2034
2035         if (amdgpu_in_reset(adev)) {
2036                 dc_state = dm->cached_dc_state;
2037
2038                 r = dm_dmub_hw_init(adev);
2039                 if (r)
2040                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2041
2042                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2043                 dc_resume(dm->dc);
2044
2045                 amdgpu_dm_irq_resume_early(adev);
2046
2047                 for (i = 0; i < dc_state->stream_count; i++) {
2048                         dc_state->streams[i]->mode_changed = true;
2049                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2050                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2051                                         = 0xffffffff;
2052                         }
2053                 }
2054
2055                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2056
2057                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2058
2059                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2060
2061                 dc_release_state(dm->cached_dc_state);
2062                 dm->cached_dc_state = NULL;
2063
2064                 amdgpu_dm_irq_resume_late(adev);
2065
2066                 mutex_unlock(&dm->dc_lock);
2067
2068                 return 0;
2069         }
2070         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2071         dc_release_state(dm_state->context);
2072         dm_state->context = dc_create_state(dm->dc);
2073         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2074         dc_resource_state_construct(dm->dc, dm_state->context);
2075
2076         /* Before powering on DC we need to re-initialize DMUB. */
2077         r = dm_dmub_hw_init(adev);
2078         if (r)
2079                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2080
2081         /* power on hardware */
2082         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2083
2084         /* program HPD filter */
2085         dc_resume(dm->dc);
2086
2087         /*
2088          * early enable HPD Rx IRQ, should be done before set mode as short
2089          * pulse interrupts are used for MST
2090          */
2091         amdgpu_dm_irq_resume_early(adev);
2092
2093         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2094         s3_handle_mst(ddev, false);
2095
2096         /* Do detection*/
2097         drm_connector_list_iter_begin(ddev, &iter);
2098         drm_for_each_connector_iter(connector, &iter) {
2099                 aconnector = to_amdgpu_dm_connector(connector);
2100
2101                 /*
2102                  * this is the case when traversing through already created
2103                  * MST connectors, should be skipped
2104                  */
2105                 if (aconnector->mst_port)
2106                         continue;
2107
2108                 mutex_lock(&aconnector->hpd_lock);
2109                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2110                         DRM_ERROR("KMS: Failed to detect connector\n");
2111
2112                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2113                         emulated_link_detect(aconnector->dc_link);
2114                 else
2115                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2116
2117                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2118                         aconnector->fake_enable = false;
2119
2120                 if (aconnector->dc_sink)
2121                         dc_sink_release(aconnector->dc_sink);
2122                 aconnector->dc_sink = NULL;
2123                 amdgpu_dm_update_connector_after_detect(aconnector);
2124                 mutex_unlock(&aconnector->hpd_lock);
2125         }
2126         drm_connector_list_iter_end(&iter);
2127
2128         /* Force mode set in atomic commit */
2129         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2130                 new_crtc_state->active_changed = true;
2131
2132         /*
2133          * atomic_check is expected to create the dc states. We need to release
2134          * them here, since they were duplicated as part of the suspend
2135          * procedure.
2136          */
2137         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2138                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2139                 if (dm_new_crtc_state->stream) {
2140                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2141                         dc_stream_release(dm_new_crtc_state->stream);
2142                         dm_new_crtc_state->stream = NULL;
2143                 }
2144         }
2145
2146         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2147                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2148                 if (dm_new_plane_state->dc_state) {
2149                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2150                         dc_plane_state_release(dm_new_plane_state->dc_state);
2151                         dm_new_plane_state->dc_state = NULL;
2152                 }
2153         }
2154
2155         drm_atomic_helper_resume(ddev, dm->cached_state);
2156
2157         dm->cached_state = NULL;
2158
2159         amdgpu_dm_irq_resume_late(adev);
2160
2161         amdgpu_dm_smu_write_watermarks_table(adev);
2162
2163         return 0;
2164 }
2165
2166 /**
2167  * DOC: DM Lifecycle
2168  *
2169  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2170  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2171  * the base driver's device list to be initialized and torn down accordingly.
2172  *
2173  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2174  */
2175
2176 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2177         .name = "dm",
2178         .early_init = dm_early_init,
2179         .late_init = dm_late_init,
2180         .sw_init = dm_sw_init,
2181         .sw_fini = dm_sw_fini,
2182         .hw_init = dm_hw_init,
2183         .hw_fini = dm_hw_fini,
2184         .suspend = dm_suspend,
2185         .resume = dm_resume,
2186         .is_idle = dm_is_idle,
2187         .wait_for_idle = dm_wait_for_idle,
2188         .check_soft_reset = dm_check_soft_reset,
2189         .soft_reset = dm_soft_reset,
2190         .set_clockgating_state = dm_set_clockgating_state,
2191         .set_powergating_state = dm_set_powergating_state,
2192 };
2193
2194 const struct amdgpu_ip_block_version dm_ip_block =
2195 {
2196         .type = AMD_IP_BLOCK_TYPE_DCE,
2197         .major = 1,
2198         .minor = 0,
2199         .rev = 0,
2200         .funcs = &amdgpu_dm_funcs,
2201 };
2202
2203
2204 /**
2205  * DOC: atomic
2206  *
2207  * *WIP*
2208  */
2209
2210 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2211         .fb_create = amdgpu_display_user_framebuffer_create,
2212         .get_format_info = amd_get_format_info,
2213         .output_poll_changed = drm_fb_helper_output_poll_changed,
2214         .atomic_check = amdgpu_dm_atomic_check,
2215         .atomic_commit = amdgpu_dm_atomic_commit,
2216 };
2217
2218 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2219         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2220 };
2221
2222 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2223 {
2224         u32 max_cll, min_cll, max, min, q, r;
2225         struct amdgpu_dm_backlight_caps *caps;
2226         struct amdgpu_display_manager *dm;
2227         struct drm_connector *conn_base;
2228         struct amdgpu_device *adev;
2229         struct dc_link *link = NULL;
2230         static const u8 pre_computed_values[] = {
2231                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2232                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2233
2234         if (!aconnector || !aconnector->dc_link)
2235                 return;
2236
2237         link = aconnector->dc_link;
2238         if (link->connector_signal != SIGNAL_TYPE_EDP)
2239                 return;
2240
2241         conn_base = &aconnector->base;
2242         adev = drm_to_adev(conn_base->dev);
2243         dm = &adev->dm;
2244         caps = &dm->backlight_caps;
2245         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2246         caps->aux_support = false;
2247         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2248         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2249
2250         if (caps->ext_caps->bits.oled == 1 ||
2251             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2252             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2253                 caps->aux_support = true;
2254
2255         /* From the specification (CTA-861-G), for calculating the maximum
2256          * luminance we need to use:
2257          *      Luminance = 50*2**(CV/32)
2258          * Where CV is a one-byte value.
2259          * For calculating this expression we may need float point precision;
2260          * to avoid this complexity level, we take advantage that CV is divided
2261          * by a constant. From the Euclids division algorithm, we know that CV
2262          * can be written as: CV = 32*q + r. Next, we replace CV in the
2263          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2264          * need to pre-compute the value of r/32. For pre-computing the values
2265          * We just used the following Ruby line:
2266          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2267          * The results of the above expressions can be verified at
2268          * pre_computed_values.
2269          */
2270         q = max_cll >> 5;
2271         r = max_cll % 32;
2272         max = (1 << q) * pre_computed_values[r];
2273
2274         // min luminance: maxLum * (CV/255)^2 / 100
2275         q = DIV_ROUND_CLOSEST(min_cll, 255);
2276         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2277
2278         caps->aux_max_input_signal = max;
2279         caps->aux_min_input_signal = min;
2280 }
2281
2282 void amdgpu_dm_update_connector_after_detect(
2283                 struct amdgpu_dm_connector *aconnector)
2284 {
2285         struct drm_connector *connector = &aconnector->base;
2286         struct drm_device *dev = connector->dev;
2287         struct dc_sink *sink;
2288
2289         /* MST handled by drm_mst framework */
2290         if (aconnector->mst_mgr.mst_state == true)
2291                 return;
2292
2293         sink = aconnector->dc_link->local_sink;
2294         if (sink)
2295                 dc_sink_retain(sink);
2296
2297         /*
2298          * Edid mgmt connector gets first update only in mode_valid hook and then
2299          * the connector sink is set to either fake or physical sink depends on link status.
2300          * Skip if already done during boot.
2301          */
2302         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2303                         && aconnector->dc_em_sink) {
2304
2305                 /*
2306                  * For S3 resume with headless use eml_sink to fake stream
2307                  * because on resume connector->sink is set to NULL
2308                  */
2309                 mutex_lock(&dev->mode_config.mutex);
2310
2311                 if (sink) {
2312                         if (aconnector->dc_sink) {
2313                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2314                                 /*
2315                                  * retain and release below are used to
2316                                  * bump up refcount for sink because the link doesn't point
2317                                  * to it anymore after disconnect, so on next crtc to connector
2318                                  * reshuffle by UMD we will get into unwanted dc_sink release
2319                                  */
2320                                 dc_sink_release(aconnector->dc_sink);
2321                         }
2322                         aconnector->dc_sink = sink;
2323                         dc_sink_retain(aconnector->dc_sink);
2324                         amdgpu_dm_update_freesync_caps(connector,
2325                                         aconnector->edid);
2326                 } else {
2327                         amdgpu_dm_update_freesync_caps(connector, NULL);
2328                         if (!aconnector->dc_sink) {
2329                                 aconnector->dc_sink = aconnector->dc_em_sink;
2330                                 dc_sink_retain(aconnector->dc_sink);
2331                         }
2332                 }
2333
2334                 mutex_unlock(&dev->mode_config.mutex);
2335
2336                 if (sink)
2337                         dc_sink_release(sink);
2338                 return;
2339         }
2340
2341         /*
2342          * TODO: temporary guard to look for proper fix
2343          * if this sink is MST sink, we should not do anything
2344          */
2345         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2346                 dc_sink_release(sink);
2347                 return;
2348         }
2349
2350         if (aconnector->dc_sink == sink) {
2351                 /*
2352                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2353                  * Do nothing!!
2354                  */
2355                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2356                                 aconnector->connector_id);
2357                 if (sink)
2358                         dc_sink_release(sink);
2359                 return;
2360         }
2361
2362         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2363                 aconnector->connector_id, aconnector->dc_sink, sink);
2364
2365         mutex_lock(&dev->mode_config.mutex);
2366
2367         /*
2368          * 1. Update status of the drm connector
2369          * 2. Send an event and let userspace tell us what to do
2370          */
2371         if (sink) {
2372                 /*
2373                  * TODO: check if we still need the S3 mode update workaround.
2374                  * If yes, put it here.
2375                  */
2376                 if (aconnector->dc_sink)
2377                         amdgpu_dm_update_freesync_caps(connector, NULL);
2378
2379                 aconnector->dc_sink = sink;
2380                 dc_sink_retain(aconnector->dc_sink);
2381                 if (sink->dc_edid.length == 0) {
2382                         aconnector->edid = NULL;
2383                         if (aconnector->dc_link->aux_mode) {
2384                                 drm_dp_cec_unset_edid(
2385                                         &aconnector->dm_dp_aux.aux);
2386                         }
2387                 } else {
2388                         aconnector->edid =
2389                                 (struct edid *)sink->dc_edid.raw_edid;
2390
2391                         drm_connector_update_edid_property(connector,
2392                                                            aconnector->edid);
2393                         drm_add_edid_modes(connector, aconnector->edid);
2394
2395                         if (aconnector->dc_link->aux_mode)
2396                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2397                                                     aconnector->edid);
2398                 }
2399
2400                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2401                 update_connector_ext_caps(aconnector);
2402         } else {
2403                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2404                 amdgpu_dm_update_freesync_caps(connector, NULL);
2405                 drm_connector_update_edid_property(connector, NULL);
2406                 aconnector->num_modes = 0;
2407                 dc_sink_release(aconnector->dc_sink);
2408                 aconnector->dc_sink = NULL;
2409                 aconnector->edid = NULL;
2410 #ifdef CONFIG_DRM_AMD_DC_HDCP
2411                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2412                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2413                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2414 #endif
2415         }
2416
2417         mutex_unlock(&dev->mode_config.mutex);
2418
2419         update_subconnector_property(aconnector);
2420
2421         if (sink)
2422                 dc_sink_release(sink);
2423 }
2424
2425 static void handle_hpd_irq(void *param)
2426 {
2427         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2428         struct drm_connector *connector = &aconnector->base;
2429         struct drm_device *dev = connector->dev;
2430         enum dc_connection_type new_connection_type = dc_connection_none;
2431 #ifdef CONFIG_DRM_AMD_DC_HDCP
2432         struct amdgpu_device *adev = drm_to_adev(dev);
2433         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2434 #endif
2435
2436         /*
2437          * In case of failure or MST no need to update connector status or notify the OS
2438          * since (for MST case) MST does this in its own context.
2439          */
2440         mutex_lock(&aconnector->hpd_lock);
2441
2442 #ifdef CONFIG_DRM_AMD_DC_HDCP
2443         if (adev->dm.hdcp_workqueue) {
2444                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2445                 dm_con_state->update_hdcp = true;
2446         }
2447 #endif
2448         if (aconnector->fake_enable)
2449                 aconnector->fake_enable = false;
2450
2451         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2452                 DRM_ERROR("KMS: Failed to detect connector\n");
2453
2454         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2455                 emulated_link_detect(aconnector->dc_link);
2456
2457
2458                 drm_modeset_lock_all(dev);
2459                 dm_restore_drm_connector_state(dev, connector);
2460                 drm_modeset_unlock_all(dev);
2461
2462                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2463                         drm_kms_helper_hotplug_event(dev);
2464
2465         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2466                 if (new_connection_type == dc_connection_none &&
2467                     aconnector->dc_link->type == dc_connection_none)
2468                         dm_set_dpms_off(aconnector->dc_link);
2469
2470                 amdgpu_dm_update_connector_after_detect(aconnector);
2471
2472                 drm_modeset_lock_all(dev);
2473                 dm_restore_drm_connector_state(dev, connector);
2474                 drm_modeset_unlock_all(dev);
2475
2476                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2477                         drm_kms_helper_hotplug_event(dev);
2478         }
2479         mutex_unlock(&aconnector->hpd_lock);
2480
2481 }
2482
2483 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2484 {
2485         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2486         uint8_t dret;
2487         bool new_irq_handled = false;
2488         int dpcd_addr;
2489         int dpcd_bytes_to_read;
2490
2491         const int max_process_count = 30;
2492         int process_count = 0;
2493
2494         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2495
2496         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2497                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2498                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2499                 dpcd_addr = DP_SINK_COUNT;
2500         } else {
2501                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2502                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2503                 dpcd_addr = DP_SINK_COUNT_ESI;
2504         }
2505
2506         dret = drm_dp_dpcd_read(
2507                 &aconnector->dm_dp_aux.aux,
2508                 dpcd_addr,
2509                 esi,
2510                 dpcd_bytes_to_read);
2511
2512         while (dret == dpcd_bytes_to_read &&
2513                 process_count < max_process_count) {
2514                 uint8_t retry;
2515                 dret = 0;
2516
2517                 process_count++;
2518
2519                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2520                 /* handle HPD short pulse irq */
2521                 if (aconnector->mst_mgr.mst_state)
2522                         drm_dp_mst_hpd_irq(
2523                                 &aconnector->mst_mgr,
2524                                 esi,
2525                                 &new_irq_handled);
2526
2527                 if (new_irq_handled) {
2528                         /* ACK at DPCD to notify down stream */
2529                         const int ack_dpcd_bytes_to_write =
2530                                 dpcd_bytes_to_read - 1;
2531
2532                         for (retry = 0; retry < 3; retry++) {
2533                                 uint8_t wret;
2534
2535                                 wret = drm_dp_dpcd_write(
2536                                         &aconnector->dm_dp_aux.aux,
2537                                         dpcd_addr + 1,
2538                                         &esi[1],
2539                                         ack_dpcd_bytes_to_write);
2540                                 if (wret == ack_dpcd_bytes_to_write)
2541                                         break;
2542                         }
2543
2544                         /* check if there is new irq to be handled */
2545                         dret = drm_dp_dpcd_read(
2546                                 &aconnector->dm_dp_aux.aux,
2547                                 dpcd_addr,
2548                                 esi,
2549                                 dpcd_bytes_to_read);
2550
2551                         new_irq_handled = false;
2552                 } else {
2553                         break;
2554                 }
2555         }
2556
2557         if (process_count == max_process_count)
2558                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2559 }
2560
2561 static void handle_hpd_rx_irq(void *param)
2562 {
2563         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2564         struct drm_connector *connector = &aconnector->base;
2565         struct drm_device *dev = connector->dev;
2566         struct dc_link *dc_link = aconnector->dc_link;
2567         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2568         bool result = false;
2569         enum dc_connection_type new_connection_type = dc_connection_none;
2570         struct amdgpu_device *adev = drm_to_adev(dev);
2571         union hpd_irq_data hpd_irq_data;
2572
2573         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2574
2575         /*
2576          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2577          * conflict, after implement i2c helper, this mutex should be
2578          * retired.
2579          */
2580         if (dc_link->type != dc_connection_mst_branch)
2581                 mutex_lock(&aconnector->hpd_lock);
2582
2583         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2584
2585         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2586                 (dc_link->type == dc_connection_mst_branch)) {
2587                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2588                         result = true;
2589                         dm_handle_hpd_rx_irq(aconnector);
2590                         goto out;
2591                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2592                         result = false;
2593                         dm_handle_hpd_rx_irq(aconnector);
2594                         goto out;
2595                 }
2596         }
2597
2598         mutex_lock(&adev->dm.dc_lock);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2601 #else
2602         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2603 #endif
2604         mutex_unlock(&adev->dm.dc_lock);
2605
2606 out:
2607         if (result && !is_mst_root_connector) {
2608                 /* Downstream Port status changed. */
2609                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2610                         DRM_ERROR("KMS: Failed to detect connector\n");
2611
2612                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2613                         emulated_link_detect(dc_link);
2614
2615                         if (aconnector->fake_enable)
2616                                 aconnector->fake_enable = false;
2617
2618                         amdgpu_dm_update_connector_after_detect(aconnector);
2619
2620
2621                         drm_modeset_lock_all(dev);
2622                         dm_restore_drm_connector_state(dev, connector);
2623                         drm_modeset_unlock_all(dev);
2624
2625                         drm_kms_helper_hotplug_event(dev);
2626                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2627
2628                         if (aconnector->fake_enable)
2629                                 aconnector->fake_enable = false;
2630
2631                         amdgpu_dm_update_connector_after_detect(aconnector);
2632
2633
2634                         drm_modeset_lock_all(dev);
2635                         dm_restore_drm_connector_state(dev, connector);
2636                         drm_modeset_unlock_all(dev);
2637
2638                         drm_kms_helper_hotplug_event(dev);
2639                 }
2640         }
2641 #ifdef CONFIG_DRM_AMD_DC_HDCP
2642         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2643                 if (adev->dm.hdcp_workqueue)
2644                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2645         }
2646 #endif
2647
2648         if (dc_link->type != dc_connection_mst_branch) {
2649                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2650                 mutex_unlock(&aconnector->hpd_lock);
2651         }
2652 }
2653
2654 static void register_hpd_handlers(struct amdgpu_device *adev)
2655 {
2656         struct drm_device *dev = adev_to_drm(adev);
2657         struct drm_connector *connector;
2658         struct amdgpu_dm_connector *aconnector;
2659         const struct dc_link *dc_link;
2660         struct dc_interrupt_params int_params = {0};
2661
2662         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2663         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2664
2665         list_for_each_entry(connector,
2666                         &dev->mode_config.connector_list, head) {
2667
2668                 aconnector = to_amdgpu_dm_connector(connector);
2669                 dc_link = aconnector->dc_link;
2670
2671                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2672                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2673                         int_params.irq_source = dc_link->irq_source_hpd;
2674
2675                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2676                                         handle_hpd_irq,
2677                                         (void *) aconnector);
2678                 }
2679
2680                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2681
2682                         /* Also register for DP short pulse (hpd_rx). */
2683                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2684                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2685
2686                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2687                                         handle_hpd_rx_irq,
2688                                         (void *) aconnector);
2689                 }
2690         }
2691 }
2692
2693 #if defined(CONFIG_DRM_AMD_DC_SI)
2694 /* Register IRQ sources and initialize IRQ callbacks */
2695 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2696 {
2697         struct dc *dc = adev->dm.dc;
2698         struct common_irq_params *c_irq_params;
2699         struct dc_interrupt_params int_params = {0};
2700         int r;
2701         int i;
2702         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2703
2704         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2705         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2706
2707         /*
2708          * Actions of amdgpu_irq_add_id():
2709          * 1. Register a set() function with base driver.
2710          *    Base driver will call set() function to enable/disable an
2711          *    interrupt in DC hardware.
2712          * 2. Register amdgpu_dm_irq_handler().
2713          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2714          *    coming from DC hardware.
2715          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2716          *    for acknowledging and handling. */
2717
2718         /* Use VBLANK interrupt */
2719         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2720                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2721                 if (r) {
2722                         DRM_ERROR("Failed to add crtc irq id!\n");
2723                         return r;
2724                 }
2725
2726                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2727                 int_params.irq_source =
2728                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2729
2730                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2731
2732                 c_irq_params->adev = adev;
2733                 c_irq_params->irq_src = int_params.irq_source;
2734
2735                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2736                                 dm_crtc_high_irq, c_irq_params);
2737         }
2738
2739         /* Use GRPH_PFLIP interrupt */
2740         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2741                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2742                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2743                 if (r) {
2744                         DRM_ERROR("Failed to add page flip irq id!\n");
2745                         return r;
2746                 }
2747
2748                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2749                 int_params.irq_source =
2750                         dc_interrupt_to_irq_source(dc, i, 0);
2751
2752                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2753
2754                 c_irq_params->adev = adev;
2755                 c_irq_params->irq_src = int_params.irq_source;
2756
2757                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2758                                 dm_pflip_high_irq, c_irq_params);
2759
2760         }
2761
2762         /* HPD */
2763         r = amdgpu_irq_add_id(adev, client_id,
2764                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2765         if (r) {
2766                 DRM_ERROR("Failed to add hpd irq id!\n");
2767                 return r;
2768         }
2769
2770         register_hpd_handlers(adev);
2771
2772         return 0;
2773 }
2774 #endif
2775
2776 /* Register IRQ sources and initialize IRQ callbacks */
2777 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2778 {
2779         struct dc *dc = adev->dm.dc;
2780         struct common_irq_params *c_irq_params;
2781         struct dc_interrupt_params int_params = {0};
2782         int r;
2783         int i;
2784         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2785
2786         if (adev->asic_type >= CHIP_VEGA10)
2787                 client_id = SOC15_IH_CLIENTID_DCE;
2788
2789         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2790         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2791
2792         /*
2793          * Actions of amdgpu_irq_add_id():
2794          * 1. Register a set() function with base driver.
2795          *    Base driver will call set() function to enable/disable an
2796          *    interrupt in DC hardware.
2797          * 2. Register amdgpu_dm_irq_handler().
2798          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2799          *    coming from DC hardware.
2800          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2801          *    for acknowledging and handling. */
2802
2803         /* Use VBLANK interrupt */
2804         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2805                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2806                 if (r) {
2807                         DRM_ERROR("Failed to add crtc irq id!\n");
2808                         return r;
2809                 }
2810
2811                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2812                 int_params.irq_source =
2813                         dc_interrupt_to_irq_source(dc, i, 0);
2814
2815                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2816
2817                 c_irq_params->adev = adev;
2818                 c_irq_params->irq_src = int_params.irq_source;
2819
2820                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2821                                 dm_crtc_high_irq, c_irq_params);
2822         }
2823
2824         /* Use VUPDATE interrupt */
2825         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2826                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2827                 if (r) {
2828                         DRM_ERROR("Failed to add vupdate irq id!\n");
2829                         return r;
2830                 }
2831
2832                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2833                 int_params.irq_source =
2834                         dc_interrupt_to_irq_source(dc, i, 0);
2835
2836                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2837
2838                 c_irq_params->adev = adev;
2839                 c_irq_params->irq_src = int_params.irq_source;
2840
2841                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2842                                 dm_vupdate_high_irq, c_irq_params);
2843         }
2844
2845         /* Use GRPH_PFLIP interrupt */
2846         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2847                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2848                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2849                 if (r) {
2850                         DRM_ERROR("Failed to add page flip irq id!\n");
2851                         return r;
2852                 }
2853
2854                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2855                 int_params.irq_source =
2856                         dc_interrupt_to_irq_source(dc, i, 0);
2857
2858                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2859
2860                 c_irq_params->adev = adev;
2861                 c_irq_params->irq_src = int_params.irq_source;
2862
2863                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2864                                 dm_pflip_high_irq, c_irq_params);
2865
2866         }
2867
2868         /* HPD */
2869         r = amdgpu_irq_add_id(adev, client_id,
2870                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2871         if (r) {
2872                 DRM_ERROR("Failed to add hpd irq id!\n");
2873                 return r;
2874         }
2875
2876         register_hpd_handlers(adev);
2877
2878         return 0;
2879 }
2880
2881 #if defined(CONFIG_DRM_AMD_DC_DCN)
2882 /* Register IRQ sources and initialize IRQ callbacks */
2883 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2884 {
2885         struct dc *dc = adev->dm.dc;
2886         struct common_irq_params *c_irq_params;
2887         struct dc_interrupt_params int_params = {0};
2888         int r;
2889         int i;
2890
2891         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2892         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2893
2894         /*
2895          * Actions of amdgpu_irq_add_id():
2896          * 1. Register a set() function with base driver.
2897          *    Base driver will call set() function to enable/disable an
2898          *    interrupt in DC hardware.
2899          * 2. Register amdgpu_dm_irq_handler().
2900          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2901          *    coming from DC hardware.
2902          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2903          *    for acknowledging and handling.
2904          */
2905
2906         /* Use VSTARTUP interrupt */
2907         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2908                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2909                         i++) {
2910                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2911
2912                 if (r) {
2913                         DRM_ERROR("Failed to add crtc irq id!\n");
2914                         return r;
2915                 }
2916
2917                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2918                 int_params.irq_source =
2919                         dc_interrupt_to_irq_source(dc, i, 0);
2920
2921                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2922
2923                 c_irq_params->adev = adev;
2924                 c_irq_params->irq_src = int_params.irq_source;
2925
2926                 amdgpu_dm_irq_register_interrupt(
2927                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2928         }
2929
2930         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2931          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2932          * to trigger at end of each vblank, regardless of state of the lock,
2933          * matching DCE behaviour.
2934          */
2935         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2936              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2937              i++) {
2938                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2939
2940                 if (r) {
2941                         DRM_ERROR("Failed to add vupdate irq id!\n");
2942                         return r;
2943                 }
2944
2945                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2946                 int_params.irq_source =
2947                         dc_interrupt_to_irq_source(dc, i, 0);
2948
2949                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2950
2951                 c_irq_params->adev = adev;
2952                 c_irq_params->irq_src = int_params.irq_source;
2953
2954                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2955                                 dm_vupdate_high_irq, c_irq_params);
2956         }
2957
2958         /* Use GRPH_PFLIP interrupt */
2959         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2960                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2961                         i++) {
2962                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2963                 if (r) {
2964                         DRM_ERROR("Failed to add page flip irq id!\n");
2965                         return r;
2966                 }
2967
2968                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2969                 int_params.irq_source =
2970                         dc_interrupt_to_irq_source(dc, i, 0);
2971
2972                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2973
2974                 c_irq_params->adev = adev;
2975                 c_irq_params->irq_src = int_params.irq_source;
2976
2977                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2978                                 dm_pflip_high_irq, c_irq_params);
2979
2980         }
2981
2982         /* HPD */
2983         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2984                         &adev->hpd_irq);
2985         if (r) {
2986                 DRM_ERROR("Failed to add hpd irq id!\n");
2987                 return r;
2988         }
2989
2990         register_hpd_handlers(adev);
2991
2992         return 0;
2993 }
2994 #endif
2995
2996 /*
2997  * Acquires the lock for the atomic state object and returns
2998  * the new atomic state.
2999  *
3000  * This should only be called during atomic check.
3001  */
3002 static int dm_atomic_get_state(struct drm_atomic_state *state,
3003                                struct dm_atomic_state **dm_state)
3004 {
3005         struct drm_device *dev = state->dev;
3006         struct amdgpu_device *adev = drm_to_adev(dev);
3007         struct amdgpu_display_manager *dm = &adev->dm;
3008         struct drm_private_state *priv_state;
3009
3010         if (*dm_state)
3011                 return 0;
3012
3013         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3014         if (IS_ERR(priv_state))
3015                 return PTR_ERR(priv_state);
3016
3017         *dm_state = to_dm_atomic_state(priv_state);
3018
3019         return 0;
3020 }
3021
3022 static struct dm_atomic_state *
3023 dm_atomic_get_new_state(struct drm_atomic_state *state)
3024 {
3025         struct drm_device *dev = state->dev;
3026         struct amdgpu_device *adev = drm_to_adev(dev);
3027         struct amdgpu_display_manager *dm = &adev->dm;
3028         struct drm_private_obj *obj;
3029         struct drm_private_state *new_obj_state;
3030         int i;
3031
3032         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3033                 if (obj->funcs == dm->atomic_obj.funcs)
3034                         return to_dm_atomic_state(new_obj_state);
3035         }
3036
3037         return NULL;
3038 }
3039
3040 static struct drm_private_state *
3041 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3042 {
3043         struct dm_atomic_state *old_state, *new_state;
3044
3045         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3046         if (!new_state)
3047                 return NULL;
3048
3049         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3050
3051         old_state = to_dm_atomic_state(obj->state);
3052
3053         if (old_state && old_state->context)
3054                 new_state->context = dc_copy_state(old_state->context);
3055
3056         if (!new_state->context) {
3057                 kfree(new_state);
3058                 return NULL;
3059         }
3060
3061         return &new_state->base;
3062 }
3063
3064 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3065                                     struct drm_private_state *state)
3066 {
3067         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3068
3069         if (dm_state && dm_state->context)
3070                 dc_release_state(dm_state->context);
3071
3072         kfree(dm_state);
3073 }
3074
3075 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3076         .atomic_duplicate_state = dm_atomic_duplicate_state,
3077         .atomic_destroy_state = dm_atomic_destroy_state,
3078 };
3079
3080 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3081 {
3082         struct dm_atomic_state *state;
3083         int r;
3084
3085         adev->mode_info.mode_config_initialized = true;
3086
3087         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3088         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3089
3090         adev_to_drm(adev)->mode_config.max_width = 16384;
3091         adev_to_drm(adev)->mode_config.max_height = 16384;
3092
3093         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3094         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3095         /* indicates support for immediate flip */
3096         adev_to_drm(adev)->mode_config.async_page_flip = true;
3097
3098         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3099
3100         state = kzalloc(sizeof(*state), GFP_KERNEL);
3101         if (!state)
3102                 return -ENOMEM;
3103
3104         state->context = dc_create_state(adev->dm.dc);
3105         if (!state->context) {
3106                 kfree(state);
3107                 return -ENOMEM;
3108         }
3109
3110         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3111
3112         drm_atomic_private_obj_init(adev_to_drm(adev),
3113                                     &adev->dm.atomic_obj,
3114                                     &state->base,
3115                                     &dm_atomic_state_funcs);
3116
3117         r = amdgpu_display_modeset_create_props(adev);
3118         if (r) {
3119                 dc_release_state(state->context);
3120                 kfree(state);
3121                 return r;
3122         }
3123
3124         r = amdgpu_dm_audio_init(adev);
3125         if (r) {
3126                 dc_release_state(state->context);
3127                 kfree(state);
3128                 return r;
3129         }
3130
3131         return 0;
3132 }
3133
3134 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3135 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3136 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3137
3138 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3139         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3140
3141 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3142 {
3143 #if defined(CONFIG_ACPI)
3144         struct amdgpu_dm_backlight_caps caps;
3145
3146         memset(&caps, 0, sizeof(caps));
3147
3148         if (dm->backlight_caps.caps_valid)
3149                 return;
3150
3151         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3152         if (caps.caps_valid) {
3153                 dm->backlight_caps.caps_valid = true;
3154                 if (caps.aux_support)
3155                         return;
3156                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3157                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3158         } else {
3159                 dm->backlight_caps.min_input_signal =
3160                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3161                 dm->backlight_caps.max_input_signal =
3162                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3163         }
3164 #else
3165         if (dm->backlight_caps.aux_support)
3166                 return;
3167
3168         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3169         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3170 #endif
3171 }
3172
3173 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3174 {
3175         bool rc;
3176
3177         if (!link)
3178                 return 1;
3179
3180         rc = dc_link_set_backlight_level_nits(link, true, brightness,
3181                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3182
3183         return rc ? 0 : 1;
3184 }
3185
3186 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3187                                 unsigned *min, unsigned *max)
3188 {
3189         if (!caps)
3190                 return 0;
3191
3192         if (caps->aux_support) {
3193                 // Firmware limits are in nits, DC API wants millinits.
3194                 *max = 1000 * caps->aux_max_input_signal;
3195                 *min = 1000 * caps->aux_min_input_signal;
3196         } else {
3197                 // Firmware limits are 8-bit, PWM control is 16-bit.
3198                 *max = 0x101 * caps->max_input_signal;
3199                 *min = 0x101 * caps->min_input_signal;
3200         }
3201         return 1;
3202 }
3203
3204 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3205                                         uint32_t brightness)
3206 {
3207         unsigned min, max;
3208
3209         if (!get_brightness_range(caps, &min, &max))
3210                 return brightness;
3211
3212         // Rescale 0..255 to min..max
3213         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3214                                        AMDGPU_MAX_BL_LEVEL);
3215 }
3216
3217 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3218                                       uint32_t brightness)
3219 {
3220         unsigned min, max;
3221
3222         if (!get_brightness_range(caps, &min, &max))
3223                 return brightness;
3224
3225         if (brightness < min)
3226                 return 0;
3227         // Rescale min..max to 0..255
3228         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3229                                  max - min);
3230 }
3231
3232 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3233 {
3234         struct amdgpu_display_manager *dm = bl_get_data(bd);
3235         struct amdgpu_dm_backlight_caps caps;
3236         struct dc_link *link = NULL;
3237         u32 brightness;
3238         bool rc;
3239
3240         amdgpu_dm_update_backlight_caps(dm);
3241         caps = dm->backlight_caps;
3242
3243         link = (struct dc_link *)dm->backlight_link;
3244
3245         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3246         // Change brightness based on AUX property
3247         if (caps.aux_support)
3248                 return set_backlight_via_aux(link, brightness);
3249
3250         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3251
3252         return rc ? 0 : 1;
3253 }
3254
3255 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3256 {
3257         struct amdgpu_display_manager *dm = bl_get_data(bd);
3258         int ret = dc_link_get_backlight_level(dm->backlight_link);
3259
3260         if (ret == DC_ERROR_UNEXPECTED)
3261                 return bd->props.brightness;
3262         return convert_brightness_to_user(&dm->backlight_caps, ret);
3263 }
3264
3265 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3266         .options = BL_CORE_SUSPENDRESUME,
3267         .get_brightness = amdgpu_dm_backlight_get_brightness,
3268         .update_status  = amdgpu_dm_backlight_update_status,
3269 };
3270
3271 static void
3272 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3273 {
3274         char bl_name[16];
3275         struct backlight_properties props = { 0 };
3276
3277         amdgpu_dm_update_backlight_caps(dm);
3278
3279         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3280         props.brightness = AMDGPU_MAX_BL_LEVEL;
3281         props.type = BACKLIGHT_RAW;
3282
3283         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3284                  adev_to_drm(dm->adev)->primary->index);
3285
3286         dm->backlight_dev = backlight_device_register(bl_name,
3287                                                       adev_to_drm(dm->adev)->dev,
3288                                                       dm,
3289                                                       &amdgpu_dm_backlight_ops,
3290                                                       &props);
3291
3292         if (IS_ERR(dm->backlight_dev))
3293                 DRM_ERROR("DM: Backlight registration failed!\n");
3294         else
3295                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3296 }
3297
3298 #endif
3299
3300 static int initialize_plane(struct amdgpu_display_manager *dm,
3301                             struct amdgpu_mode_info *mode_info, int plane_id,
3302                             enum drm_plane_type plane_type,
3303                             const struct dc_plane_cap *plane_cap)
3304 {
3305         struct drm_plane *plane;
3306         unsigned long possible_crtcs;
3307         int ret = 0;
3308
3309         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3310         if (!plane) {
3311                 DRM_ERROR("KMS: Failed to allocate plane\n");
3312                 return -ENOMEM;
3313         }
3314         plane->type = plane_type;
3315
3316         /*
3317          * HACK: IGT tests expect that the primary plane for a CRTC
3318          * can only have one possible CRTC. Only expose support for
3319          * any CRTC if they're not going to be used as a primary plane
3320          * for a CRTC - like overlay or underlay planes.
3321          */
3322         possible_crtcs = 1 << plane_id;
3323         if (plane_id >= dm->dc->caps.max_streams)
3324                 possible_crtcs = 0xff;
3325
3326         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3327
3328         if (ret) {
3329                 DRM_ERROR("KMS: Failed to initialize plane\n");
3330                 kfree(plane);
3331                 return ret;
3332         }
3333
3334         if (mode_info)
3335                 mode_info->planes[plane_id] = plane;
3336
3337         return ret;
3338 }
3339
3340
3341 static void register_backlight_device(struct amdgpu_display_manager *dm,
3342                                       struct dc_link *link)
3343 {
3344 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3345         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3346
3347         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3348             link->type != dc_connection_none) {
3349                 /*
3350                  * Event if registration failed, we should continue with
3351                  * DM initialization because not having a backlight control
3352                  * is better then a black screen.
3353                  */
3354                 amdgpu_dm_register_backlight_device(dm);
3355
3356                 if (dm->backlight_dev)
3357                         dm->backlight_link = link;
3358         }
3359 #endif
3360 }
3361
3362
3363 /*
3364  * In this architecture, the association
3365  * connector -> encoder -> crtc
3366  * id not really requried. The crtc and connector will hold the
3367  * display_index as an abstraction to use with DAL component
3368  *
3369  * Returns 0 on success
3370  */
3371 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3372 {
3373         struct amdgpu_display_manager *dm = &adev->dm;
3374         int32_t i;
3375         struct amdgpu_dm_connector *aconnector = NULL;
3376         struct amdgpu_encoder *aencoder = NULL;
3377         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3378         uint32_t link_cnt;
3379         int32_t primary_planes;
3380         enum dc_connection_type new_connection_type = dc_connection_none;
3381         const struct dc_plane_cap *plane;
3382
3383         dm->display_indexes_num = dm->dc->caps.max_streams;
3384         /* Update the actual used number of crtc */
3385         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3386
3387         link_cnt = dm->dc->caps.max_links;
3388         if (amdgpu_dm_mode_config_init(dm->adev)) {
3389                 DRM_ERROR("DM: Failed to initialize mode config\n");
3390                 return -EINVAL;
3391         }
3392
3393         /* There is one primary plane per CRTC */
3394         primary_planes = dm->dc->caps.max_streams;
3395         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3396
3397         /*
3398          * Initialize primary planes, implicit planes for legacy IOCTLS.
3399          * Order is reversed to match iteration order in atomic check.
3400          */
3401         for (i = (primary_planes - 1); i >= 0; i--) {
3402                 plane = &dm->dc->caps.planes[i];
3403
3404                 if (initialize_plane(dm, mode_info, i,
3405                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3406                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3407                         goto fail;
3408                 }
3409         }
3410
3411         /*
3412          * Initialize overlay planes, index starting after primary planes.
3413          * These planes have a higher DRM index than the primary planes since
3414          * they should be considered as having a higher z-order.
3415          * Order is reversed to match iteration order in atomic check.
3416          *
3417          * Only support DCN for now, and only expose one so we don't encourage
3418          * userspace to use up all the pipes.
3419          */
3420         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3421                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3422
3423                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3424                         continue;
3425
3426                 if (!plane->blends_with_above || !plane->blends_with_below)
3427                         continue;
3428
3429                 if (!plane->pixel_format_support.argb8888)
3430                         continue;
3431
3432                 if (initialize_plane(dm, NULL, primary_planes + i,
3433                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3434                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3435                         goto fail;
3436                 }
3437
3438                 /* Only create one overlay plane. */
3439                 break;
3440         }
3441
3442         for (i = 0; i < dm->dc->caps.max_streams; i++)
3443                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3444                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3445                         goto fail;
3446                 }
3447
3448         /* loops over all connectors on the board */
3449         for (i = 0; i < link_cnt; i++) {
3450                 struct dc_link *link = NULL;
3451
3452                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3453                         DRM_ERROR(
3454                                 "KMS: Cannot support more than %d display indexes\n",
3455                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3456                         continue;
3457                 }
3458
3459                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3460                 if (!aconnector)
3461                         goto fail;
3462
3463                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3464                 if (!aencoder)
3465                         goto fail;
3466
3467                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3468                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3469                         goto fail;
3470                 }
3471
3472                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3473                         DRM_ERROR("KMS: Failed to initialize connector\n");
3474                         goto fail;
3475                 }
3476
3477                 link = dc_get_link_at_index(dm->dc, i);
3478
3479                 if (!dc_link_detect_sink(link, &new_connection_type))
3480                         DRM_ERROR("KMS: Failed to detect connector\n");
3481
3482                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3483                         emulated_link_detect(link);
3484                         amdgpu_dm_update_connector_after_detect(aconnector);
3485
3486                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3487                         amdgpu_dm_update_connector_after_detect(aconnector);
3488                         register_backlight_device(dm, link);
3489                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3490                                 amdgpu_dm_set_psr_caps(link);
3491                 }
3492
3493
3494         }
3495
3496         /* Software is initialized. Now we can register interrupt handlers. */
3497         switch (adev->asic_type) {
3498 #if defined(CONFIG_DRM_AMD_DC_SI)
3499         case CHIP_TAHITI:
3500         case CHIP_PITCAIRN:
3501         case CHIP_VERDE:
3502         case CHIP_OLAND:
3503                 if (dce60_register_irq_handlers(dm->adev)) {
3504                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3505                         goto fail;
3506                 }
3507                 break;
3508 #endif
3509         case CHIP_BONAIRE:
3510         case CHIP_HAWAII:
3511         case CHIP_KAVERI:
3512         case CHIP_KABINI:
3513         case CHIP_MULLINS:
3514         case CHIP_TONGA:
3515         case CHIP_FIJI:
3516         case CHIP_CARRIZO:
3517         case CHIP_STONEY:
3518         case CHIP_POLARIS11:
3519         case CHIP_POLARIS10:
3520         case CHIP_POLARIS12:
3521         case CHIP_VEGAM:
3522         case CHIP_VEGA10:
3523         case CHIP_VEGA12:
3524         case CHIP_VEGA20:
3525                 if (dce110_register_irq_handlers(dm->adev)) {
3526                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3527                         goto fail;
3528                 }
3529                 break;
3530 #if defined(CONFIG_DRM_AMD_DC_DCN)
3531         case CHIP_RAVEN:
3532         case CHIP_NAVI12:
3533         case CHIP_NAVI10:
3534         case CHIP_NAVI14:
3535         case CHIP_RENOIR:
3536         case CHIP_SIENNA_CICHLID:
3537         case CHIP_NAVY_FLOUNDER:
3538         case CHIP_DIMGREY_CAVEFISH:
3539         case CHIP_VANGOGH:
3540                 if (dcn10_register_irq_handlers(dm->adev)) {
3541                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3542                         goto fail;
3543                 }
3544                 break;
3545 #endif
3546         default:
3547                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3548                 goto fail;
3549         }
3550
3551         return 0;
3552 fail:
3553         kfree(aencoder);
3554         kfree(aconnector);
3555
3556         return -EINVAL;
3557 }
3558
3559 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3560 {
3561         drm_mode_config_cleanup(dm->ddev);
3562         drm_atomic_private_obj_fini(&dm->atomic_obj);
3563         return;
3564 }
3565
3566 /******************************************************************************
3567  * amdgpu_display_funcs functions
3568  *****************************************************************************/
3569
3570 /*
3571  * dm_bandwidth_update - program display watermarks
3572  *
3573  * @adev: amdgpu_device pointer
3574  *
3575  * Calculate and program the display watermarks and line buffer allocation.
3576  */
3577 static void dm_bandwidth_update(struct amdgpu_device *adev)
3578 {
3579         /* TODO: implement later */
3580 }
3581
3582 static const struct amdgpu_display_funcs dm_display_funcs = {
3583         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3584         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3585         .backlight_set_level = NULL, /* never called for DC */
3586         .backlight_get_level = NULL, /* never called for DC */
3587         .hpd_sense = NULL,/* called unconditionally */
3588         .hpd_set_polarity = NULL, /* called unconditionally */
3589         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3590         .page_flip_get_scanoutpos =
3591                 dm_crtc_get_scanoutpos,/* called unconditionally */
3592         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3593         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3594 };
3595
3596 #if defined(CONFIG_DEBUG_KERNEL_DC)
3597
3598 static ssize_t s3_debug_store(struct device *device,
3599                               struct device_attribute *attr,
3600                               const char *buf,
3601                               size_t count)
3602 {
3603         int ret;
3604         int s3_state;
3605         struct drm_device *drm_dev = dev_get_drvdata(device);
3606         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3607
3608         ret = kstrtoint(buf, 0, &s3_state);
3609
3610         if (ret == 0) {
3611                 if (s3_state) {
3612                         dm_resume(adev);
3613                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3614                 } else
3615                         dm_suspend(adev);
3616         }
3617
3618         return ret == 0 ? count : 0;
3619 }
3620
3621 DEVICE_ATTR_WO(s3_debug);
3622
3623 #endif
3624
3625 static int dm_early_init(void *handle)
3626 {
3627         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3628
3629         switch (adev->asic_type) {
3630 #if defined(CONFIG_DRM_AMD_DC_SI)
3631         case CHIP_TAHITI:
3632         case CHIP_PITCAIRN:
3633         case CHIP_VERDE:
3634                 adev->mode_info.num_crtc = 6;
3635                 adev->mode_info.num_hpd = 6;
3636                 adev->mode_info.num_dig = 6;
3637                 break;
3638         case CHIP_OLAND:
3639                 adev->mode_info.num_crtc = 2;
3640                 adev->mode_info.num_hpd = 2;
3641                 adev->mode_info.num_dig = 2;
3642                 break;
3643 #endif
3644         case CHIP_BONAIRE:
3645         case CHIP_HAWAII:
3646                 adev->mode_info.num_crtc = 6;
3647                 adev->mode_info.num_hpd = 6;
3648                 adev->mode_info.num_dig = 6;
3649                 break;
3650         case CHIP_KAVERI:
3651                 adev->mode_info.num_crtc = 4;
3652                 adev->mode_info.num_hpd = 6;
3653                 adev->mode_info.num_dig = 7;
3654                 break;
3655         case CHIP_KABINI:
3656         case CHIP_MULLINS:
3657                 adev->mode_info.num_crtc = 2;
3658                 adev->mode_info.num_hpd = 6;
3659                 adev->mode_info.num_dig = 6;
3660                 break;
3661         case CHIP_FIJI:
3662         case CHIP_TONGA:
3663                 adev->mode_info.num_crtc = 6;
3664                 adev->mode_info.num_hpd = 6;
3665                 adev->mode_info.num_dig = 7;
3666                 break;
3667         case CHIP_CARRIZO:
3668                 adev->mode_info.num_crtc = 3;
3669                 adev->mode_info.num_hpd = 6;
3670                 adev->mode_info.num_dig = 9;
3671                 break;
3672         case CHIP_STONEY:
3673                 adev->mode_info.num_crtc = 2;
3674                 adev->mode_info.num_hpd = 6;
3675                 adev->mode_info.num_dig = 9;
3676                 break;
3677         case CHIP_POLARIS11:
3678         case CHIP_POLARIS12:
3679                 adev->mode_info.num_crtc = 5;
3680                 adev->mode_info.num_hpd = 5;
3681                 adev->mode_info.num_dig = 5;
3682                 break;
3683         case CHIP_POLARIS10:
3684         case CHIP_VEGAM:
3685                 adev->mode_info.num_crtc = 6;
3686                 adev->mode_info.num_hpd = 6;
3687                 adev->mode_info.num_dig = 6;
3688                 break;
3689         case CHIP_VEGA10:
3690         case CHIP_VEGA12:
3691         case CHIP_VEGA20:
3692                 adev->mode_info.num_crtc = 6;
3693                 adev->mode_info.num_hpd = 6;
3694                 adev->mode_info.num_dig = 6;
3695                 break;
3696 #if defined(CONFIG_DRM_AMD_DC_DCN)
3697         case CHIP_RAVEN:
3698         case CHIP_RENOIR:
3699         case CHIP_VANGOGH:
3700                 adev->mode_info.num_crtc = 4;
3701                 adev->mode_info.num_hpd = 4;
3702                 adev->mode_info.num_dig = 4;
3703                 break;
3704         case CHIP_NAVI10:
3705         case CHIP_NAVI12:
3706         case CHIP_SIENNA_CICHLID:
3707         case CHIP_NAVY_FLOUNDER:
3708                 adev->mode_info.num_crtc = 6;
3709                 adev->mode_info.num_hpd = 6;
3710                 adev->mode_info.num_dig = 6;
3711                 break;
3712         case CHIP_NAVI14:
3713         case CHIP_DIMGREY_CAVEFISH:
3714                 adev->mode_info.num_crtc = 5;
3715                 adev->mode_info.num_hpd = 5;
3716                 adev->mode_info.num_dig = 5;
3717                 break;
3718 #endif
3719         default:
3720                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3721                 return -EINVAL;
3722         }
3723
3724         amdgpu_dm_set_irq_funcs(adev);
3725
3726         if (adev->mode_info.funcs == NULL)
3727                 adev->mode_info.funcs = &dm_display_funcs;
3728
3729         /*
3730          * Note: Do NOT change adev->audio_endpt_rreg and
3731          * adev->audio_endpt_wreg because they are initialised in
3732          * amdgpu_device_init()
3733          */
3734 #if defined(CONFIG_DEBUG_KERNEL_DC)
3735         device_create_file(
3736                 adev_to_drm(adev)->dev,
3737                 &dev_attr_s3_debug);
3738 #endif
3739
3740         return 0;
3741 }
3742
3743 static bool modeset_required(struct drm_crtc_state *crtc_state,
3744                              struct dc_stream_state *new_stream,
3745                              struct dc_stream_state *old_stream)
3746 {
3747         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3748 }
3749
3750 static bool modereset_required(struct drm_crtc_state *crtc_state)
3751 {
3752         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3753 }
3754
3755 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3756 {
3757         drm_encoder_cleanup(encoder);
3758         kfree(encoder);
3759 }
3760
3761 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3762         .destroy = amdgpu_dm_encoder_destroy,
3763 };
3764
3765
3766 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3767                                 struct dc_scaling_info *scaling_info)
3768 {
3769         int scale_w, scale_h;
3770
3771         memset(scaling_info, 0, sizeof(*scaling_info));
3772
3773         /* Source is fixed 16.16 but we ignore mantissa for now... */
3774         scaling_info->src_rect.x = state->src_x >> 16;
3775         scaling_info->src_rect.y = state->src_y >> 16;
3776
3777         scaling_info->src_rect.width = state->src_w >> 16;
3778         if (scaling_info->src_rect.width == 0)
3779                 return -EINVAL;
3780
3781         scaling_info->src_rect.height = state->src_h >> 16;
3782         if (scaling_info->src_rect.height == 0)
3783                 return -EINVAL;
3784
3785         scaling_info->dst_rect.x = state->crtc_x;
3786         scaling_info->dst_rect.y = state->crtc_y;
3787
3788         if (state->crtc_w == 0)
3789                 return -EINVAL;
3790
3791         scaling_info->dst_rect.width = state->crtc_w;
3792
3793         if (state->crtc_h == 0)
3794                 return -EINVAL;
3795
3796         scaling_info->dst_rect.height = state->crtc_h;
3797
3798         /* DRM doesn't specify clipping on destination output. */
3799         scaling_info->clip_rect = scaling_info->dst_rect;
3800
3801         /* TODO: Validate scaling per-format with DC plane caps */
3802         scale_w = scaling_info->dst_rect.width * 1000 /
3803                   scaling_info->src_rect.width;
3804
3805         if (scale_w < 250 || scale_w > 16000)
3806                 return -EINVAL;
3807
3808         scale_h = scaling_info->dst_rect.height * 1000 /
3809                   scaling_info->src_rect.height;
3810
3811         if (scale_h < 250 || scale_h > 16000)
3812                 return -EINVAL;
3813
3814         /*
3815          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3816          * assume reasonable defaults based on the format.
3817          */
3818
3819         return 0;
3820 }
3821
3822 static void
3823 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3824                                  uint64_t tiling_flags)
3825 {
3826         /* Fill GFX8 params */
3827         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3828                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3829
3830                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3831                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3832                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3833                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3834                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3835
3836                 /* XXX fix me for VI */
3837                 tiling_info->gfx8.num_banks = num_banks;
3838                 tiling_info->gfx8.array_mode =
3839                                 DC_ARRAY_2D_TILED_THIN1;
3840                 tiling_info->gfx8.tile_split = tile_split;
3841                 tiling_info->gfx8.bank_width = bankw;
3842                 tiling_info->gfx8.bank_height = bankh;
3843                 tiling_info->gfx8.tile_aspect = mtaspect;
3844                 tiling_info->gfx8.tile_mode =
3845                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3846         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3847                         == DC_ARRAY_1D_TILED_THIN1) {
3848                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3849         }
3850
3851         tiling_info->gfx8.pipe_config =
3852                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3853 }
3854
3855 static void
3856 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3857                                   union dc_tiling_info *tiling_info)
3858 {
3859         tiling_info->gfx9.num_pipes =
3860                 adev->gfx.config.gb_addr_config_fields.num_pipes;
3861         tiling_info->gfx9.num_banks =
3862                 adev->gfx.config.gb_addr_config_fields.num_banks;
3863         tiling_info->gfx9.pipe_interleave =
3864                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3865         tiling_info->gfx9.num_shader_engines =
3866                 adev->gfx.config.gb_addr_config_fields.num_se;
3867         tiling_info->gfx9.max_compressed_frags =
3868                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3869         tiling_info->gfx9.num_rb_per_se =
3870                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3871         tiling_info->gfx9.shaderEnable = 1;
3872         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3873             adev->asic_type == CHIP_NAVY_FLOUNDER ||
3874             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3875             adev->asic_type == CHIP_VANGOGH)
3876                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3877 }
3878
3879 static int
3880 validate_dcc(struct amdgpu_device *adev,
3881              const enum surface_pixel_format format,
3882              const enum dc_rotation_angle rotation,
3883              const union dc_tiling_info *tiling_info,
3884              const struct dc_plane_dcc_param *dcc,
3885              const struct dc_plane_address *address,
3886              const struct plane_size *plane_size)
3887 {
3888         struct dc *dc = adev->dm.dc;
3889         struct dc_dcc_surface_param input;
3890         struct dc_surface_dcc_cap output;
3891
3892         memset(&input, 0, sizeof(input));
3893         memset(&output, 0, sizeof(output));
3894
3895         if (!dcc->enable)
3896                 return 0;
3897
3898         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3899             !dc->cap_funcs.get_dcc_compression_cap)
3900                 return -EINVAL;
3901
3902         input.format = format;
3903         input.surface_size.width = plane_size->surface_size.width;
3904         input.surface_size.height = plane_size->surface_size.height;
3905         input.swizzle_mode = tiling_info->gfx9.swizzle;
3906
3907         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3908                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3909         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3910                 input.scan = SCAN_DIRECTION_VERTICAL;
3911
3912         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3913                 return -EINVAL;
3914
3915         if (!output.capable)
3916                 return -EINVAL;
3917
3918         if (dcc->independent_64b_blks == 0 &&
3919             output.grph.rgb.independent_64b_blks != 0)
3920                 return -EINVAL;
3921
3922         return 0;
3923 }
3924
3925 static bool
3926 modifier_has_dcc(uint64_t modifier)
3927 {
3928         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3929 }
3930
3931 static unsigned
3932 modifier_gfx9_swizzle_mode(uint64_t modifier)
3933 {
3934         if (modifier == DRM_FORMAT_MOD_LINEAR)
3935                 return 0;
3936
3937         return AMD_FMT_MOD_GET(TILE, modifier);
3938 }
3939
3940 static const struct drm_format_info *
3941 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3942 {
3943         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3944 }
3945
3946 static void
3947 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3948                                     union dc_tiling_info *tiling_info,
3949                                     uint64_t modifier)
3950 {
3951         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3952         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3953         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3954         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3955
3956         fill_gfx9_tiling_info_from_device(adev, tiling_info);
3957
3958         if (!IS_AMD_FMT_MOD(modifier))
3959                 return;
3960
3961         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3962         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3963
3964         if (adev->family >= AMDGPU_FAMILY_NV) {
3965                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3966         } else {
3967                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3968
3969                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3970         }
3971 }
3972
3973 enum dm_micro_swizzle {
3974         MICRO_SWIZZLE_Z = 0,
3975         MICRO_SWIZZLE_S = 1,
3976         MICRO_SWIZZLE_D = 2,
3977         MICRO_SWIZZLE_R = 3
3978 };
3979
3980 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3981                                           uint32_t format,
3982                                           uint64_t modifier)
3983 {
3984         struct amdgpu_device *adev = drm_to_adev(plane->dev);
3985         const struct drm_format_info *info = drm_format_info(format);
3986
3987         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3988
3989         if (!info)
3990                 return false;
3991
3992         /*
3993          * We always have to allow this modifier, because core DRM still
3994          * checks LINEAR support if userspace does not provide modifers.
3995          */
3996         if (modifier == DRM_FORMAT_MOD_LINEAR)
3997                 return true;
3998
3999         /*
4000          * The arbitrary tiling support for multiplane formats has not been hooked
4001          * up.
4002          */
4003         if (info->num_planes > 1)
4004                 return false;
4005
4006         /*
4007          * For D swizzle the canonical modifier depends on the bpp, so check
4008          * it here.
4009          */
4010         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4011             adev->family >= AMDGPU_FAMILY_NV) {
4012                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4013                         return false;
4014         }
4015
4016         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4017             info->cpp[0] < 8)
4018                 return false;
4019
4020         if (modifier_has_dcc(modifier)) {
4021                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4022                 if (info->cpp[0] != 4)
4023                         return false;
4024         }
4025
4026         return true;
4027 }
4028
4029 static void
4030 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4031 {
4032         if (!*mods)
4033                 return;
4034
4035         if (*cap - *size < 1) {
4036                 uint64_t new_cap = *cap * 2;
4037                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4038
4039                 if (!new_mods) {
4040                         kfree(*mods);
4041                         *mods = NULL;
4042                         return;
4043                 }
4044
4045                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4046                 kfree(*mods);
4047                 *mods = new_mods;
4048                 *cap = new_cap;
4049         }
4050
4051         (*mods)[*size] = mod;
4052         *size += 1;
4053 }
4054
4055 static void
4056 add_gfx9_modifiers(const struct amdgpu_device *adev,
4057                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4058 {
4059         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4060         int pipe_xor_bits = min(8, pipes +
4061                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4062         int bank_xor_bits = min(8 - pipe_xor_bits,
4063                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4064         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4065                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4066
4067
4068         if (adev->family == AMDGPU_FAMILY_RV) {
4069                 /* Raven2 and later */
4070                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4071
4072                 /*
4073                  * No _D DCC swizzles yet because we only allow 32bpp, which
4074                  * doesn't support _D on DCN
4075                  */
4076
4077                 if (has_constant_encode) {
4078                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4079                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4080                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4081                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4082                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4083                                     AMD_FMT_MOD_SET(DCC, 1) |
4084                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4085                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4086                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4087                 }
4088
4089                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4090                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4091                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4092                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4093                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4094                             AMD_FMT_MOD_SET(DCC, 1) |
4095                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4096                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4097                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4098
4099                 if (has_constant_encode) {
4100                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4101                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4102                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4103                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4104                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4105                                     AMD_FMT_MOD_SET(DCC, 1) |
4106                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4107                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4108                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4109
4110                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4111                                     AMD_FMT_MOD_SET(RB, rb) |
4112                                     AMD_FMT_MOD_SET(PIPE, pipes));
4113                 }
4114
4115                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4116                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4117                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4118                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4119                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4120                             AMD_FMT_MOD_SET(DCC, 1) |
4121                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4122                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4123                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4124                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4125                             AMD_FMT_MOD_SET(RB, rb) |
4126                             AMD_FMT_MOD_SET(PIPE, pipes));
4127         }
4128
4129         /*
4130          * Only supported for 64bpp on Raven, will be filtered on format in
4131          * dm_plane_format_mod_supported.
4132          */
4133         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4134                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4135                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4136                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4137                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4138
4139         if (adev->family == AMDGPU_FAMILY_RV) {
4140                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4141                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4142                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4143                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4144                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4145         }
4146
4147         /*
4148          * Only supported for 64bpp on Raven, will be filtered on format in
4149          * dm_plane_format_mod_supported.
4150          */
4151         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4152                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4153                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4154
4155         if (adev->family == AMDGPU_FAMILY_RV) {
4156                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4157                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4158                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4159         }
4160 }
4161
4162 static void
4163 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4164                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4165 {
4166         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4167
4168         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4169                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4170                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4171                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4172                     AMD_FMT_MOD_SET(DCC, 1) |
4173                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4174                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4175                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4176
4177         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4178                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4179                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4180                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4181                     AMD_FMT_MOD_SET(DCC, 1) |
4182                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4183                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4184                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4185                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4186
4187         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4188                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4189                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4190                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4191
4192         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4193                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4194                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4195                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4196
4197
4198         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4199         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4200                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4201                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4202
4203         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4204                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4205                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4206 }
4207
4208 static void
4209 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4210                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4211 {
4212         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4213         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4214
4215         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4216                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4217                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4218                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4219                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4220                     AMD_FMT_MOD_SET(DCC, 1) |
4221                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4222                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4223                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4224                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4225
4226         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4228                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4229                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4230                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4231                     AMD_FMT_MOD_SET(DCC, 1) |
4232                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4233                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4234                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4235                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4236                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4237
4238         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4240                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4241                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4242                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4243
4244         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4245                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4246                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4247                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4248                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4249
4250         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4251         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4252                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4253                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4254
4255         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4256                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4257                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4258 }
4259
4260 static int
4261 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4262 {
4263         uint64_t size = 0, capacity = 128;
4264         *mods = NULL;
4265
4266         /* We have not hooked up any pre-GFX9 modifiers. */
4267         if (adev->family < AMDGPU_FAMILY_AI)
4268                 return 0;
4269
4270         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4271
4272         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4273                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4274                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4275                 return *mods ? 0 : -ENOMEM;
4276         }
4277
4278         switch (adev->family) {
4279         case AMDGPU_FAMILY_AI:
4280         case AMDGPU_FAMILY_RV:
4281                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4282                 break;
4283         case AMDGPU_FAMILY_NV:
4284         case AMDGPU_FAMILY_VGH:
4285                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4286                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4287                 else
4288                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4289                 break;
4290         }
4291
4292         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4293
4294         /* INVALID marks the end of the list. */
4295         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4296
4297         if (!*mods)
4298                 return -ENOMEM;
4299
4300         return 0;
4301 }
4302
4303 static int
4304 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4305                                           const struct amdgpu_framebuffer *afb,
4306                                           const enum surface_pixel_format format,
4307                                           const enum dc_rotation_angle rotation,
4308                                           const struct plane_size *plane_size,
4309                                           union dc_tiling_info *tiling_info,
4310                                           struct dc_plane_dcc_param *dcc,
4311                                           struct dc_plane_address *address,
4312                                           const bool force_disable_dcc)
4313 {
4314         const uint64_t modifier = afb->base.modifier;
4315         int ret;
4316
4317         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4318         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4319
4320         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4321                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4322
4323                 dcc->enable = 1;
4324                 dcc->meta_pitch = afb->base.pitches[1];
4325                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4326
4327                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4328                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4329         }
4330
4331         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4332         if (ret)
4333                 return ret;
4334
4335         return 0;
4336 }
4337
4338 static int
4339 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4340                              const struct amdgpu_framebuffer *afb,
4341                              const enum surface_pixel_format format,
4342                              const enum dc_rotation_angle rotation,
4343                              const uint64_t tiling_flags,
4344                              union dc_tiling_info *tiling_info,
4345                              struct plane_size *plane_size,
4346                              struct dc_plane_dcc_param *dcc,
4347                              struct dc_plane_address *address,
4348                              bool tmz_surface,
4349                              bool force_disable_dcc)
4350 {
4351         const struct drm_framebuffer *fb = &afb->base;
4352         int ret;
4353
4354         memset(tiling_info, 0, sizeof(*tiling_info));
4355         memset(plane_size, 0, sizeof(*plane_size));
4356         memset(dcc, 0, sizeof(*dcc));
4357         memset(address, 0, sizeof(*address));
4358
4359         address->tmz_surface = tmz_surface;
4360
4361         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4362                 uint64_t addr = afb->address + fb->offsets[0];
4363
4364                 plane_size->surface_size.x = 0;
4365                 plane_size->surface_size.y = 0;
4366                 plane_size->surface_size.width = fb->width;
4367                 plane_size->surface_size.height = fb->height;
4368                 plane_size->surface_pitch =
4369                         fb->pitches[0] / fb->format->cpp[0];
4370
4371                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4372                 address->grph.addr.low_part = lower_32_bits(addr);
4373                 address->grph.addr.high_part = upper_32_bits(addr);
4374         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4375                 uint64_t luma_addr = afb->address + fb->offsets[0];
4376                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4377
4378                 plane_size->surface_size.x = 0;
4379                 plane_size->surface_size.y = 0;
4380                 plane_size->surface_size.width = fb->width;
4381                 plane_size->surface_size.height = fb->height;
4382                 plane_size->surface_pitch =
4383                         fb->pitches[0] / fb->format->cpp[0];
4384
4385                 plane_size->chroma_size.x = 0;
4386                 plane_size->chroma_size.y = 0;
4387                 /* TODO: set these based on surface format */
4388                 plane_size->chroma_size.width = fb->width / 2;
4389                 plane_size->chroma_size.height = fb->height / 2;
4390
4391                 plane_size->chroma_pitch =
4392                         fb->pitches[1] / fb->format->cpp[1];
4393
4394                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4395                 address->video_progressive.luma_addr.low_part =
4396                         lower_32_bits(luma_addr);
4397                 address->video_progressive.luma_addr.high_part =
4398                         upper_32_bits(luma_addr);
4399                 address->video_progressive.chroma_addr.low_part =
4400                         lower_32_bits(chroma_addr);
4401                 address->video_progressive.chroma_addr.high_part =
4402                         upper_32_bits(chroma_addr);
4403         }
4404
4405         if (adev->family >= AMDGPU_FAMILY_AI) {
4406                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4407                                                                 rotation, plane_size,
4408                                                                 tiling_info, dcc,
4409                                                                 address,
4410                                                                 force_disable_dcc);
4411                 if (ret)
4412                         return ret;
4413         } else {
4414                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4415         }
4416
4417         return 0;
4418 }
4419
4420 static void
4421 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4422                                bool *per_pixel_alpha, bool *global_alpha,
4423                                int *global_alpha_value)
4424 {
4425         *per_pixel_alpha = false;
4426         *global_alpha = false;
4427         *global_alpha_value = 0xff;
4428
4429         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4430                 return;
4431
4432         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4433                 static const uint32_t alpha_formats[] = {
4434                         DRM_FORMAT_ARGB8888,
4435                         DRM_FORMAT_RGBA8888,
4436                         DRM_FORMAT_ABGR8888,
4437                 };
4438                 uint32_t format = plane_state->fb->format->format;
4439                 unsigned int i;
4440
4441                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4442                         if (format == alpha_formats[i]) {
4443                                 *per_pixel_alpha = true;
4444                                 break;
4445                         }
4446                 }
4447         }
4448
4449         if (plane_state->alpha < 0xffff) {
4450                 *global_alpha = true;
4451                 *global_alpha_value = plane_state->alpha >> 8;
4452         }
4453 }
4454
4455 static int
4456 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4457                             const enum surface_pixel_format format,
4458                             enum dc_color_space *color_space)
4459 {
4460         bool full_range;
4461
4462         *color_space = COLOR_SPACE_SRGB;
4463
4464         /* DRM color properties only affect non-RGB formats. */
4465         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4466                 return 0;
4467
4468         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4469
4470         switch (plane_state->color_encoding) {
4471         case DRM_COLOR_YCBCR_BT601:
4472                 if (full_range)
4473                         *color_space = COLOR_SPACE_YCBCR601;
4474                 else
4475                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4476                 break;
4477
4478         case DRM_COLOR_YCBCR_BT709:
4479                 if (full_range)
4480                         *color_space = COLOR_SPACE_YCBCR709;
4481                 else
4482                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4483                 break;
4484
4485         case DRM_COLOR_YCBCR_BT2020:
4486                 if (full_range)
4487                         *color_space = COLOR_SPACE_2020_YCBCR;
4488                 else
4489                         return -EINVAL;
4490                 break;
4491
4492         default:
4493                 return -EINVAL;
4494         }
4495
4496         return 0;
4497 }
4498
4499 static int
4500 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4501                             const struct drm_plane_state *plane_state,
4502                             const uint64_t tiling_flags,
4503                             struct dc_plane_info *plane_info,
4504                             struct dc_plane_address *address,
4505                             bool tmz_surface,
4506                             bool force_disable_dcc)
4507 {
4508         const struct drm_framebuffer *fb = plane_state->fb;
4509         const struct amdgpu_framebuffer *afb =
4510                 to_amdgpu_framebuffer(plane_state->fb);
4511         struct drm_format_name_buf format_name;
4512         int ret;
4513
4514         memset(plane_info, 0, sizeof(*plane_info));
4515
4516         switch (fb->format->format) {
4517         case DRM_FORMAT_C8:
4518                 plane_info->format =
4519                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4520                 break;
4521         case DRM_FORMAT_RGB565:
4522                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4523                 break;
4524         case DRM_FORMAT_XRGB8888:
4525         case DRM_FORMAT_ARGB8888:
4526                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4527                 break;
4528         case DRM_FORMAT_XRGB2101010:
4529         case DRM_FORMAT_ARGB2101010:
4530                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4531                 break;
4532         case DRM_FORMAT_XBGR2101010:
4533         case DRM_FORMAT_ABGR2101010:
4534                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4535                 break;
4536         case DRM_FORMAT_XBGR8888:
4537         case DRM_FORMAT_ABGR8888:
4538                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4539                 break;
4540         case DRM_FORMAT_NV21:
4541                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4542                 break;
4543         case DRM_FORMAT_NV12:
4544                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4545                 break;
4546         case DRM_FORMAT_P010:
4547                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4548                 break;
4549         case DRM_FORMAT_XRGB16161616F:
4550         case DRM_FORMAT_ARGB16161616F:
4551                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4552                 break;
4553         case DRM_FORMAT_XBGR16161616F:
4554         case DRM_FORMAT_ABGR16161616F:
4555                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4556                 break;
4557         default:
4558                 DRM_ERROR(
4559                         "Unsupported screen format %s\n",
4560                         drm_get_format_name(fb->format->format, &format_name));
4561                 return -EINVAL;
4562         }
4563
4564         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4565         case DRM_MODE_ROTATE_0:
4566                 plane_info->rotation = ROTATION_ANGLE_0;
4567                 break;
4568         case DRM_MODE_ROTATE_90:
4569                 plane_info->rotation = ROTATION_ANGLE_90;
4570                 break;
4571         case DRM_MODE_ROTATE_180:
4572                 plane_info->rotation = ROTATION_ANGLE_180;
4573                 break;
4574         case DRM_MODE_ROTATE_270:
4575                 plane_info->rotation = ROTATION_ANGLE_270;
4576                 break;
4577         default:
4578                 plane_info->rotation = ROTATION_ANGLE_0;
4579                 break;
4580         }
4581
4582         plane_info->visible = true;
4583         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4584
4585         plane_info->layer_index = 0;
4586
4587         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4588                                           &plane_info->color_space);
4589         if (ret)
4590                 return ret;
4591
4592         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4593                                            plane_info->rotation, tiling_flags,
4594                                            &plane_info->tiling_info,
4595                                            &plane_info->plane_size,
4596                                            &plane_info->dcc, address, tmz_surface,
4597                                            force_disable_dcc);
4598         if (ret)
4599                 return ret;
4600
4601         fill_blending_from_plane_state(
4602                 plane_state, &plane_info->per_pixel_alpha,
4603                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4604
4605         return 0;
4606 }
4607
4608 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4609                                     struct dc_plane_state *dc_plane_state,
4610                                     struct drm_plane_state *plane_state,
4611                                     struct drm_crtc_state *crtc_state)
4612 {
4613         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4614         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4615         struct dc_scaling_info scaling_info;
4616         struct dc_plane_info plane_info;
4617         int ret;
4618         bool force_disable_dcc = false;
4619
4620         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4621         if (ret)
4622                 return ret;
4623
4624         dc_plane_state->src_rect = scaling_info.src_rect;
4625         dc_plane_state->dst_rect = scaling_info.dst_rect;
4626         dc_plane_state->clip_rect = scaling_info.clip_rect;
4627         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4628
4629         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4630         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4631                                           afb->tiling_flags,
4632                                           &plane_info,
4633                                           &dc_plane_state->address,
4634                                           afb->tmz_surface,
4635                                           force_disable_dcc);
4636         if (ret)
4637                 return ret;
4638
4639         dc_plane_state->format = plane_info.format;
4640         dc_plane_state->color_space = plane_info.color_space;
4641         dc_plane_state->format = plane_info.format;
4642         dc_plane_state->plane_size = plane_info.plane_size;
4643         dc_plane_state->rotation = plane_info.rotation;
4644         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4645         dc_plane_state->stereo_format = plane_info.stereo_format;
4646         dc_plane_state->tiling_info = plane_info.tiling_info;
4647         dc_plane_state->visible = plane_info.visible;
4648         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4649         dc_plane_state->global_alpha = plane_info.global_alpha;
4650         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4651         dc_plane_state->dcc = plane_info.dcc;
4652         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4653
4654         /*
4655          * Always set input transfer function, since plane state is refreshed
4656          * every time.
4657          */
4658         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4659         if (ret)
4660                 return ret;
4661
4662         return 0;
4663 }
4664
4665 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4666                                            const struct dm_connector_state *dm_state,
4667                                            struct dc_stream_state *stream)
4668 {
4669         enum amdgpu_rmx_type rmx_type;
4670
4671         struct rect src = { 0 }; /* viewport in composition space*/
4672         struct rect dst = { 0 }; /* stream addressable area */
4673
4674         /* no mode. nothing to be done */
4675         if (!mode)
4676                 return;
4677
4678         /* Full screen scaling by default */
4679         src.width = mode->hdisplay;
4680         src.height = mode->vdisplay;
4681         dst.width = stream->timing.h_addressable;
4682         dst.height = stream->timing.v_addressable;
4683
4684         if (dm_state) {
4685                 rmx_type = dm_state->scaling;
4686                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4687                         if (src.width * dst.height <
4688                                         src.height * dst.width) {
4689                                 /* height needs less upscaling/more downscaling */
4690                                 dst.width = src.width *
4691                                                 dst.height / src.height;
4692                         } else {
4693                                 /* width needs less upscaling/more downscaling */
4694                                 dst.height = src.height *
4695                                                 dst.width / src.width;
4696                         }
4697                 } else if (rmx_type == RMX_CENTER) {
4698                         dst = src;
4699                 }
4700
4701                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4702                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4703
4704                 if (dm_state->underscan_enable) {
4705                         dst.x += dm_state->underscan_hborder / 2;
4706                         dst.y += dm_state->underscan_vborder / 2;
4707                         dst.width -= dm_state->underscan_hborder;
4708                         dst.height -= dm_state->underscan_vborder;
4709                 }
4710         }
4711
4712         stream->src = src;
4713         stream->dst = dst;
4714
4715         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4716                         dst.x, dst.y, dst.width, dst.height);
4717
4718 }
4719
4720 static enum dc_color_depth
4721 convert_color_depth_from_display_info(const struct drm_connector *connector,
4722                                       bool is_y420, int requested_bpc)
4723 {
4724         uint8_t bpc;
4725
4726         if (is_y420) {
4727                 bpc = 8;
4728
4729                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4730                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4731                         bpc = 16;
4732                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4733                         bpc = 12;
4734                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4735                         bpc = 10;
4736         } else {
4737                 bpc = (uint8_t)connector->display_info.bpc;
4738                 /* Assume 8 bpc by default if no bpc is specified. */
4739                 bpc = bpc ? bpc : 8;
4740         }
4741
4742         if (requested_bpc > 0) {
4743                 /*
4744                  * Cap display bpc based on the user requested value.
4745                  *
4746                  * The value for state->max_bpc may not correctly updated
4747                  * depending on when the connector gets added to the state
4748                  * or if this was called outside of atomic check, so it
4749                  * can't be used directly.
4750                  */
4751                 bpc = min_t(u8, bpc, requested_bpc);
4752
4753                 /* Round down to the nearest even number. */
4754                 bpc = bpc - (bpc & 1);
4755         }
4756
4757         switch (bpc) {
4758         case 0:
4759                 /*
4760                  * Temporary Work around, DRM doesn't parse color depth for
4761                  * EDID revision before 1.4
4762                  * TODO: Fix edid parsing
4763                  */
4764                 return COLOR_DEPTH_888;
4765         case 6:
4766                 return COLOR_DEPTH_666;
4767         case 8:
4768                 return COLOR_DEPTH_888;
4769         case 10:
4770                 return COLOR_DEPTH_101010;
4771         case 12:
4772                 return COLOR_DEPTH_121212;
4773         case 14:
4774                 return COLOR_DEPTH_141414;
4775         case 16:
4776                 return COLOR_DEPTH_161616;
4777         default:
4778                 return COLOR_DEPTH_UNDEFINED;
4779         }
4780 }
4781
4782 static enum dc_aspect_ratio
4783 get_aspect_ratio(const struct drm_display_mode *mode_in)
4784 {
4785         /* 1-1 mapping, since both enums follow the HDMI spec. */
4786         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4787 }
4788
4789 static enum dc_color_space
4790 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4791 {
4792         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4793
4794         switch (dc_crtc_timing->pixel_encoding) {
4795         case PIXEL_ENCODING_YCBCR422:
4796         case PIXEL_ENCODING_YCBCR444:
4797         case PIXEL_ENCODING_YCBCR420:
4798         {
4799                 /*
4800                  * 27030khz is the separation point between HDTV and SDTV
4801                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4802                  * respectively
4803                  */
4804                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4805                         if (dc_crtc_timing->flags.Y_ONLY)
4806                                 color_space =
4807                                         COLOR_SPACE_YCBCR709_LIMITED;
4808                         else
4809                                 color_space = COLOR_SPACE_YCBCR709;
4810                 } else {
4811                         if (dc_crtc_timing->flags.Y_ONLY)
4812                                 color_space =
4813                                         COLOR_SPACE_YCBCR601_LIMITED;
4814                         else
4815                                 color_space = COLOR_SPACE_YCBCR601;
4816                 }
4817
4818         }
4819         break;
4820         case PIXEL_ENCODING_RGB:
4821                 color_space = COLOR_SPACE_SRGB;
4822                 break;
4823
4824         default:
4825                 WARN_ON(1);
4826                 break;
4827         }
4828
4829         return color_space;
4830 }
4831
4832 static bool adjust_colour_depth_from_display_info(
4833         struct dc_crtc_timing *timing_out,
4834         const struct drm_display_info *info)
4835 {
4836         enum dc_color_depth depth = timing_out->display_color_depth;
4837         int normalized_clk;
4838         do {
4839                 normalized_clk = timing_out->pix_clk_100hz / 10;
4840                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4841                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4842                         normalized_clk /= 2;
4843                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4844                 switch (depth) {
4845                 case COLOR_DEPTH_888:
4846                         break;
4847                 case COLOR_DEPTH_101010:
4848                         normalized_clk = (normalized_clk * 30) / 24;
4849                         break;
4850                 case COLOR_DEPTH_121212:
4851                         normalized_clk = (normalized_clk * 36) / 24;
4852                         break;
4853                 case COLOR_DEPTH_161616:
4854                         normalized_clk = (normalized_clk * 48) / 24;
4855                         break;
4856                 default:
4857                         /* The above depths are the only ones valid for HDMI. */
4858                         return false;
4859                 }
4860                 if (normalized_clk <= info->max_tmds_clock) {
4861                         timing_out->display_color_depth = depth;
4862                         return true;
4863                 }
4864         } while (--depth > COLOR_DEPTH_666);
4865         return false;
4866 }
4867
4868 static void fill_stream_properties_from_drm_display_mode(
4869         struct dc_stream_state *stream,
4870         const struct drm_display_mode *mode_in,
4871         const struct drm_connector *connector,
4872         const struct drm_connector_state *connector_state,
4873         const struct dc_stream_state *old_stream,
4874         int requested_bpc)
4875 {
4876         struct dc_crtc_timing *timing_out = &stream->timing;
4877         const struct drm_display_info *info = &connector->display_info;
4878         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4879         struct hdmi_vendor_infoframe hv_frame;
4880         struct hdmi_avi_infoframe avi_frame;
4881
4882         memset(&hv_frame, 0, sizeof(hv_frame));
4883         memset(&avi_frame, 0, sizeof(avi_frame));
4884
4885         timing_out->h_border_left = 0;
4886         timing_out->h_border_right = 0;
4887         timing_out->v_border_top = 0;
4888         timing_out->v_border_bottom = 0;
4889         /* TODO: un-hardcode */
4890         if (drm_mode_is_420_only(info, mode_in)
4891                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4892                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4893         else if (drm_mode_is_420_also(info, mode_in)
4894                         && aconnector->force_yuv420_output)
4895                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4896         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4897                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4898                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4899         else
4900                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4901
4902         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4903         timing_out->display_color_depth = convert_color_depth_from_display_info(
4904                 connector,
4905                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4906                 requested_bpc);
4907         timing_out->scan_type = SCANNING_TYPE_NODATA;
4908         timing_out->hdmi_vic = 0;
4909
4910         if(old_stream) {
4911                 timing_out->vic = old_stream->timing.vic;
4912                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4913                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4914         } else {
4915                 timing_out->vic = drm_match_cea_mode(mode_in);
4916                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4917                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4918                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4919                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4920         }
4921
4922         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4923                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4924                 timing_out->vic = avi_frame.video_code;
4925                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4926                 timing_out->hdmi_vic = hv_frame.vic;
4927         }
4928
4929         timing_out->h_addressable = mode_in->crtc_hdisplay;
4930         timing_out->h_total = mode_in->crtc_htotal;
4931         timing_out->h_sync_width =
4932                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4933         timing_out->h_front_porch =
4934                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4935         timing_out->v_total = mode_in->crtc_vtotal;
4936         timing_out->v_addressable = mode_in->crtc_vdisplay;
4937         timing_out->v_front_porch =
4938                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4939         timing_out->v_sync_width =
4940                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4941         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4942         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4943
4944         stream->output_color_space = get_output_color_space(timing_out);
4945
4946         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4947         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4948         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4949                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4950                     drm_mode_is_420_also(info, mode_in) &&
4951                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4952                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4953                         adjust_colour_depth_from_display_info(timing_out, info);
4954                 }
4955         }
4956 }
4957
4958 static void fill_audio_info(struct audio_info *audio_info,
4959                             const struct drm_connector *drm_connector,
4960                             const struct dc_sink *dc_sink)
4961 {
4962         int i = 0;
4963         int cea_revision = 0;
4964         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4965
4966         audio_info->manufacture_id = edid_caps->manufacturer_id;
4967         audio_info->product_id = edid_caps->product_id;
4968
4969         cea_revision = drm_connector->display_info.cea_rev;
4970
4971         strscpy(audio_info->display_name,
4972                 edid_caps->display_name,
4973                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4974
4975         if (cea_revision >= 3) {
4976                 audio_info->mode_count = edid_caps->audio_mode_count;
4977
4978                 for (i = 0; i < audio_info->mode_count; ++i) {
4979                         audio_info->modes[i].format_code =
4980                                         (enum audio_format_code)
4981                                         (edid_caps->audio_modes[i].format_code);
4982                         audio_info->modes[i].channel_count =
4983                                         edid_caps->audio_modes[i].channel_count;
4984                         audio_info->modes[i].sample_rates.all =
4985                                         edid_caps->audio_modes[i].sample_rate;
4986                         audio_info->modes[i].sample_size =
4987                                         edid_caps->audio_modes[i].sample_size;
4988                 }
4989         }
4990
4991         audio_info->flags.all = edid_caps->speaker_flags;
4992
4993         /* TODO: We only check for the progressive mode, check for interlace mode too */
4994         if (drm_connector->latency_present[0]) {
4995                 audio_info->video_latency = drm_connector->video_latency[0];
4996                 audio_info->audio_latency = drm_connector->audio_latency[0];
4997         }
4998
4999         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5000
5001 }
5002
5003 static void
5004 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5005                                       struct drm_display_mode *dst_mode)
5006 {
5007         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5008         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5009         dst_mode->crtc_clock = src_mode->crtc_clock;
5010         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5011         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5012         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5013         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5014         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5015         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5016         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5017         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5018         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5019         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5020         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5021 }
5022
5023 static void
5024 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5025                                         const struct drm_display_mode *native_mode,
5026                                         bool scale_enabled)
5027 {
5028         if (scale_enabled) {
5029                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5030         } else if (native_mode->clock == drm_mode->clock &&
5031                         native_mode->htotal == drm_mode->htotal &&
5032                         native_mode->vtotal == drm_mode->vtotal) {
5033                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5034         } else {
5035                 /* no scaling nor amdgpu inserted, no need to patch */
5036         }
5037 }
5038
5039 static struct dc_sink *
5040 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5041 {
5042         struct dc_sink_init_data sink_init_data = { 0 };
5043         struct dc_sink *sink = NULL;
5044         sink_init_data.link = aconnector->dc_link;
5045         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5046
5047         sink = dc_sink_create(&sink_init_data);
5048         if (!sink) {
5049                 DRM_ERROR("Failed to create sink!\n");
5050                 return NULL;
5051         }
5052         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5053
5054         return sink;
5055 }
5056
5057 static void set_multisync_trigger_params(
5058                 struct dc_stream_state *stream)
5059 {
5060         if (stream->triggered_crtc_reset.enabled) {
5061                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5062                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5063         }
5064 }
5065
5066 static void set_master_stream(struct dc_stream_state *stream_set[],
5067                               int stream_count)
5068 {
5069         int j, highest_rfr = 0, master_stream = 0;
5070
5071         for (j = 0;  j < stream_count; j++) {
5072                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5073                         int refresh_rate = 0;
5074
5075                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5076                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5077                         if (refresh_rate > highest_rfr) {
5078                                 highest_rfr = refresh_rate;
5079                                 master_stream = j;
5080                         }
5081                 }
5082         }
5083         for (j = 0;  j < stream_count; j++) {
5084                 if (stream_set[j])
5085                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5086         }
5087 }
5088
5089 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5090 {
5091         int i = 0;
5092
5093         if (context->stream_count < 2)
5094                 return;
5095         for (i = 0; i < context->stream_count ; i++) {
5096                 if (!context->streams[i])
5097                         continue;
5098                 /*
5099                  * TODO: add a function to read AMD VSDB bits and set
5100                  * crtc_sync_master.multi_sync_enabled flag
5101                  * For now it's set to false
5102                  */
5103                 set_multisync_trigger_params(context->streams[i]);
5104         }
5105         set_master_stream(context->streams, context->stream_count);
5106 }
5107
5108 static struct dc_stream_state *
5109 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5110                        const struct drm_display_mode *drm_mode,
5111                        const struct dm_connector_state *dm_state,
5112                        const struct dc_stream_state *old_stream,
5113                        int requested_bpc)
5114 {
5115         struct drm_display_mode *preferred_mode = NULL;
5116         struct drm_connector *drm_connector;
5117         const struct drm_connector_state *con_state =
5118                 dm_state ? &dm_state->base : NULL;
5119         struct dc_stream_state *stream = NULL;
5120         struct drm_display_mode mode = *drm_mode;
5121         bool native_mode_found = false;
5122         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5123         int mode_refresh;
5124         int preferred_refresh = 0;
5125 #if defined(CONFIG_DRM_AMD_DC_DCN)
5126         struct dsc_dec_dpcd_caps dsc_caps;
5127 #endif
5128         uint32_t link_bandwidth_kbps;
5129
5130         struct dc_sink *sink = NULL;
5131         if (aconnector == NULL) {
5132                 DRM_ERROR("aconnector is NULL!\n");
5133                 return stream;
5134         }
5135
5136         drm_connector = &aconnector->base;
5137
5138         if (!aconnector->dc_sink) {
5139                 sink = create_fake_sink(aconnector);
5140                 if (!sink)
5141                         return stream;
5142         } else {
5143                 sink = aconnector->dc_sink;
5144                 dc_sink_retain(sink);
5145         }
5146
5147         stream = dc_create_stream_for_sink(sink);
5148
5149         if (stream == NULL) {
5150                 DRM_ERROR("Failed to create stream for sink!\n");
5151                 goto finish;
5152         }
5153
5154         stream->dm_stream_context = aconnector;
5155
5156         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5157                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5158
5159         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5160                 /* Search for preferred mode */
5161                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5162                         native_mode_found = true;
5163                         break;
5164                 }
5165         }
5166         if (!native_mode_found)
5167                 preferred_mode = list_first_entry_or_null(
5168                                 &aconnector->base.modes,
5169                                 struct drm_display_mode,
5170                                 head);
5171
5172         mode_refresh = drm_mode_vrefresh(&mode);
5173
5174         if (preferred_mode == NULL) {
5175                 /*
5176                  * This may not be an error, the use case is when we have no
5177                  * usermode calls to reset and set mode upon hotplug. In this
5178                  * case, we call set mode ourselves to restore the previous mode
5179                  * and the modelist may not be filled in in time.
5180                  */
5181                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5182         } else {
5183                 decide_crtc_timing_for_drm_display_mode(
5184                                 &mode, preferred_mode,
5185                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5186                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5187         }
5188
5189         if (!dm_state)
5190                 drm_mode_set_crtcinfo(&mode, 0);
5191
5192         /*
5193         * If scaling is enabled and refresh rate didn't change
5194         * we copy the vic and polarities of the old timings
5195         */
5196         if (!scale || mode_refresh != preferred_refresh)
5197                 fill_stream_properties_from_drm_display_mode(stream,
5198                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
5199         else
5200                 fill_stream_properties_from_drm_display_mode(stream,
5201                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5202
5203         stream->timing.flags.DSC = 0;
5204
5205         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5206 #if defined(CONFIG_DRM_AMD_DC_DCN)
5207                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5208                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5209                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5210                                       &dsc_caps);
5211 #endif
5212                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5213                                                              dc_link_get_link_cap(aconnector->dc_link));
5214
5215 #if defined(CONFIG_DRM_AMD_DC_DCN)
5216                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5217                         /* Set DSC policy according to dsc_clock_en */
5218                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5219                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5220
5221                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5222                                                   &dsc_caps,
5223                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5224                                                   0,
5225                                                   link_bandwidth_kbps,
5226                                                   &stream->timing,
5227                                                   &stream->timing.dsc_cfg))
5228                                 stream->timing.flags.DSC = 1;
5229                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5230                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5231                                 stream->timing.flags.DSC = 1;
5232
5233                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5234                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5235
5236                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5237                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5238
5239                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5240                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5241                 }
5242 #endif
5243         }
5244
5245         update_stream_scaling_settings(&mode, dm_state, stream);
5246
5247         fill_audio_info(
5248                 &stream->audio_info,
5249                 drm_connector,
5250                 sink);
5251
5252         update_stream_signal(stream, sink);
5253
5254         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5255                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5256
5257         if (stream->link->psr_settings.psr_feature_enabled) {
5258                 //
5259                 // should decide stream support vsc sdp colorimetry capability
5260                 // before building vsc info packet
5261                 //
5262                 stream->use_vsc_sdp_for_colorimetry = false;
5263                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5264                         stream->use_vsc_sdp_for_colorimetry =
5265                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5266                 } else {
5267                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5268                                 stream->use_vsc_sdp_for_colorimetry = true;
5269                 }
5270                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5271         }
5272 finish:
5273         dc_sink_release(sink);
5274
5275         return stream;
5276 }
5277
5278 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5279 {
5280         drm_crtc_cleanup(crtc);
5281         kfree(crtc);
5282 }
5283
5284 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5285                                   struct drm_crtc_state *state)
5286 {
5287         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5288
5289         /* TODO Destroy dc_stream objects are stream object is flattened */
5290         if (cur->stream)
5291                 dc_stream_release(cur->stream);
5292
5293
5294         __drm_atomic_helper_crtc_destroy_state(state);
5295
5296
5297         kfree(state);
5298 }
5299
5300 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5301 {
5302         struct dm_crtc_state *state;
5303
5304         if (crtc->state)
5305                 dm_crtc_destroy_state(crtc, crtc->state);
5306
5307         state = kzalloc(sizeof(*state), GFP_KERNEL);
5308         if (WARN_ON(!state))
5309                 return;
5310
5311         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5312 }
5313
5314 static struct drm_crtc_state *
5315 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5316 {
5317         struct dm_crtc_state *state, *cur;
5318
5319         cur = to_dm_crtc_state(crtc->state);
5320
5321         if (WARN_ON(!crtc->state))
5322                 return NULL;
5323
5324         state = kzalloc(sizeof(*state), GFP_KERNEL);
5325         if (!state)
5326                 return NULL;
5327
5328         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5329
5330         if (cur->stream) {
5331                 state->stream = cur->stream;
5332                 dc_stream_retain(state->stream);
5333         }
5334
5335         state->active_planes = cur->active_planes;
5336         state->vrr_infopacket = cur->vrr_infopacket;
5337         state->abm_level = cur->abm_level;
5338         state->vrr_supported = cur->vrr_supported;
5339         state->freesync_config = cur->freesync_config;
5340         state->crc_src = cur->crc_src;
5341         state->cm_has_degamma = cur->cm_has_degamma;
5342         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5343 #ifdef CONFIG_DEBUG_FS
5344         state->crc_window = cur->crc_window;
5345 #endif
5346         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5347
5348         return &state->base;
5349 }
5350
5351 #ifdef CONFIG_DEBUG_FS
5352 int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5353                                             struct drm_crtc_state *crtc_state,
5354                                             struct drm_property *property,
5355                                             uint64_t val)
5356 {
5357         struct drm_device *dev = crtc->dev;
5358         struct amdgpu_device *adev = drm_to_adev(dev);
5359         struct dm_crtc_state *dm_new_state =
5360                 to_dm_crtc_state(crtc_state);
5361
5362         if (property == adev->dm.crc_win_x_start_property)
5363                 dm_new_state->crc_window.x_start = val;
5364         else if (property == adev->dm.crc_win_y_start_property)
5365                 dm_new_state->crc_window.y_start = val;
5366         else if (property == adev->dm.crc_win_x_end_property)
5367                 dm_new_state->crc_window.x_end = val;
5368         else if (property == adev->dm.crc_win_y_end_property)
5369                 dm_new_state->crc_window.y_end = val;
5370         else
5371                 return -EINVAL;
5372
5373         return 0;
5374 }
5375
5376 int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5377                                             const struct drm_crtc_state *state,
5378                                             struct drm_property *property,
5379                                             uint64_t *val)
5380 {
5381         struct drm_device *dev = crtc->dev;
5382         struct amdgpu_device *adev = drm_to_adev(dev);
5383         struct dm_crtc_state *dm_state =
5384                 to_dm_crtc_state(state);
5385
5386         if (property == adev->dm.crc_win_x_start_property)
5387                 *val = dm_state->crc_window.x_start;
5388         else if (property == adev->dm.crc_win_y_start_property)
5389                 *val = dm_state->crc_window.y_start;
5390         else if (property == adev->dm.crc_win_x_end_property)
5391                 *val = dm_state->crc_window.x_end;
5392         else if (property == adev->dm.crc_win_y_end_property)
5393                 *val = dm_state->crc_window.y_end;
5394         else
5395                 return -EINVAL;
5396
5397         return 0;
5398 }
5399 #endif
5400
5401 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5402 {
5403         enum dc_irq_source irq_source;
5404         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5405         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5406         int rc;
5407
5408         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5409
5410         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5411
5412         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5413                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5414         return rc;
5415 }
5416
5417 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5418 {
5419         enum dc_irq_source irq_source;
5420         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5421         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5422         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5423         int rc = 0;
5424
5425         if (enable) {
5426                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5427                 if (amdgpu_dm_vrr_active(acrtc_state))
5428                         rc = dm_set_vupdate_irq(crtc, true);
5429         } else {
5430                 /* vblank irq off -> vupdate irq off */
5431                 rc = dm_set_vupdate_irq(crtc, false);
5432         }
5433
5434         if (rc)
5435                 return rc;
5436
5437         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5438         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5439 }
5440
5441 static int dm_enable_vblank(struct drm_crtc *crtc)
5442 {
5443         return dm_set_vblank(crtc, true);
5444 }
5445
5446 static void dm_disable_vblank(struct drm_crtc *crtc)
5447 {
5448         dm_set_vblank(crtc, false);
5449 }
5450
5451 /* Implemented only the options currently availible for the driver */
5452 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5453         .reset = dm_crtc_reset_state,
5454         .destroy = amdgpu_dm_crtc_destroy,
5455         .gamma_set = drm_atomic_helper_legacy_gamma_set,
5456         .set_config = drm_atomic_helper_set_config,
5457         .page_flip = drm_atomic_helper_page_flip,
5458         .atomic_duplicate_state = dm_crtc_duplicate_state,
5459         .atomic_destroy_state = dm_crtc_destroy_state,
5460         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5461         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5462         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5463         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5464         .enable_vblank = dm_enable_vblank,
5465         .disable_vblank = dm_disable_vblank,
5466         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5467 #ifdef CONFIG_DEBUG_FS
5468         .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5469         .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5470 #endif
5471 };
5472
5473 static enum drm_connector_status
5474 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5475 {
5476         bool connected;
5477         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5478
5479         /*
5480          * Notes:
5481          * 1. This interface is NOT called in context of HPD irq.
5482          * 2. This interface *is called* in context of user-mode ioctl. Which
5483          * makes it a bad place for *any* MST-related activity.
5484          */
5485
5486         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5487             !aconnector->fake_enable)
5488                 connected = (aconnector->dc_sink != NULL);
5489         else
5490                 connected = (aconnector->base.force == DRM_FORCE_ON);
5491
5492         update_subconnector_property(aconnector);
5493
5494         return (connected ? connector_status_connected :
5495                         connector_status_disconnected);
5496 }
5497
5498 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5499                                             struct drm_connector_state *connector_state,
5500                                             struct drm_property *property,
5501                                             uint64_t val)
5502 {
5503         struct drm_device *dev = connector->dev;
5504         struct amdgpu_device *adev = drm_to_adev(dev);
5505         struct dm_connector_state *dm_old_state =
5506                 to_dm_connector_state(connector->state);
5507         struct dm_connector_state *dm_new_state =
5508                 to_dm_connector_state(connector_state);
5509
5510         int ret = -EINVAL;
5511
5512         if (property == dev->mode_config.scaling_mode_property) {
5513                 enum amdgpu_rmx_type rmx_type;
5514
5515                 switch (val) {
5516                 case DRM_MODE_SCALE_CENTER:
5517                         rmx_type = RMX_CENTER;
5518                         break;
5519                 case DRM_MODE_SCALE_ASPECT:
5520                         rmx_type = RMX_ASPECT;
5521                         break;
5522                 case DRM_MODE_SCALE_FULLSCREEN:
5523                         rmx_type = RMX_FULL;
5524                         break;
5525                 case DRM_MODE_SCALE_NONE:
5526                 default:
5527                         rmx_type = RMX_OFF;
5528                         break;
5529                 }
5530
5531                 if (dm_old_state->scaling == rmx_type)
5532                         return 0;
5533
5534                 dm_new_state->scaling = rmx_type;
5535                 ret = 0;
5536         } else if (property == adev->mode_info.underscan_hborder_property) {
5537                 dm_new_state->underscan_hborder = val;
5538                 ret = 0;
5539         } else if (property == adev->mode_info.underscan_vborder_property) {
5540                 dm_new_state->underscan_vborder = val;
5541                 ret = 0;
5542         } else if (property == adev->mode_info.underscan_property) {
5543                 dm_new_state->underscan_enable = val;
5544                 ret = 0;
5545         } else if (property == adev->mode_info.abm_level_property) {
5546                 dm_new_state->abm_level = val;
5547                 ret = 0;
5548         }
5549
5550         return ret;
5551 }
5552
5553 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5554                                             const struct drm_connector_state *state,
5555                                             struct drm_property *property,
5556                                             uint64_t *val)
5557 {
5558         struct drm_device *dev = connector->dev;
5559         struct amdgpu_device *adev = drm_to_adev(dev);
5560         struct dm_connector_state *dm_state =
5561                 to_dm_connector_state(state);
5562         int ret = -EINVAL;
5563
5564         if (property == dev->mode_config.scaling_mode_property) {
5565                 switch (dm_state->scaling) {
5566                 case RMX_CENTER:
5567                         *val = DRM_MODE_SCALE_CENTER;
5568                         break;
5569                 case RMX_ASPECT:
5570                         *val = DRM_MODE_SCALE_ASPECT;
5571                         break;
5572                 case RMX_FULL:
5573                         *val = DRM_MODE_SCALE_FULLSCREEN;
5574                         break;
5575                 case RMX_OFF:
5576                 default:
5577                         *val = DRM_MODE_SCALE_NONE;
5578                         break;
5579                 }
5580                 ret = 0;
5581         } else if (property == adev->mode_info.underscan_hborder_property) {
5582                 *val = dm_state->underscan_hborder;
5583                 ret = 0;
5584         } else if (property == adev->mode_info.underscan_vborder_property) {
5585                 *val = dm_state->underscan_vborder;
5586                 ret = 0;
5587         } else if (property == adev->mode_info.underscan_property) {
5588                 *val = dm_state->underscan_enable;
5589                 ret = 0;
5590         } else if (property == adev->mode_info.abm_level_property) {
5591                 *val = dm_state->abm_level;
5592                 ret = 0;
5593         }
5594
5595         return ret;
5596 }
5597
5598 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5599 {
5600         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5601
5602         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5603 }
5604
5605 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5606 {
5607         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5608         const struct dc_link *link = aconnector->dc_link;
5609         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5610         struct amdgpu_display_manager *dm = &adev->dm;
5611
5612         /*
5613          * Call only if mst_mgr was iniitalized before since it's not done
5614          * for all connector types.
5615          */
5616         if (aconnector->mst_mgr.dev)
5617                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5618
5619 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5620         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5621
5622         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5623             link->type != dc_connection_none &&
5624             dm->backlight_dev) {
5625                 backlight_device_unregister(dm->backlight_dev);
5626                 dm->backlight_dev = NULL;
5627         }
5628 #endif
5629
5630         if (aconnector->dc_em_sink)
5631                 dc_sink_release(aconnector->dc_em_sink);
5632         aconnector->dc_em_sink = NULL;
5633         if (aconnector->dc_sink)
5634                 dc_sink_release(aconnector->dc_sink);
5635         aconnector->dc_sink = NULL;
5636
5637         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5638         drm_connector_unregister(connector);
5639         drm_connector_cleanup(connector);
5640         if (aconnector->i2c) {
5641                 i2c_del_adapter(&aconnector->i2c->base);
5642                 kfree(aconnector->i2c);
5643         }
5644         kfree(aconnector->dm_dp_aux.aux.name);
5645
5646         kfree(connector);
5647 }
5648
5649 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5650 {
5651         struct dm_connector_state *state =
5652                 to_dm_connector_state(connector->state);
5653
5654         if (connector->state)
5655                 __drm_atomic_helper_connector_destroy_state(connector->state);
5656
5657         kfree(state);
5658
5659         state = kzalloc(sizeof(*state), GFP_KERNEL);
5660
5661         if (state) {
5662                 state->scaling = RMX_OFF;
5663                 state->underscan_enable = false;
5664                 state->underscan_hborder = 0;
5665                 state->underscan_vborder = 0;
5666                 state->base.max_requested_bpc = 8;
5667                 state->vcpi_slots = 0;
5668                 state->pbn = 0;
5669                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5670                         state->abm_level = amdgpu_dm_abm_level;
5671
5672                 __drm_atomic_helper_connector_reset(connector, &state->base);
5673         }
5674 }
5675
5676 struct drm_connector_state *
5677 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5678 {
5679         struct dm_connector_state *state =
5680                 to_dm_connector_state(connector->state);
5681
5682         struct dm_connector_state *new_state =
5683                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5684
5685         if (!new_state)
5686                 return NULL;
5687
5688         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5689
5690         new_state->freesync_capable = state->freesync_capable;
5691         new_state->abm_level = state->abm_level;
5692         new_state->scaling = state->scaling;
5693         new_state->underscan_enable = state->underscan_enable;
5694         new_state->underscan_hborder = state->underscan_hborder;
5695         new_state->underscan_vborder = state->underscan_vborder;
5696         new_state->vcpi_slots = state->vcpi_slots;
5697         new_state->pbn = state->pbn;
5698         return &new_state->base;
5699 }
5700
5701 static int
5702 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5703 {
5704         struct amdgpu_dm_connector *amdgpu_dm_connector =
5705                 to_amdgpu_dm_connector(connector);
5706         int r;
5707
5708         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5709             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5710                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5711                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5712                 if (r)
5713                         return r;
5714         }
5715
5716 #if defined(CONFIG_DEBUG_FS)
5717         connector_debugfs_init(amdgpu_dm_connector);
5718 #endif
5719
5720         return 0;
5721 }
5722
5723 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5724         .reset = amdgpu_dm_connector_funcs_reset,
5725         .detect = amdgpu_dm_connector_detect,
5726         .fill_modes = drm_helper_probe_single_connector_modes,
5727         .destroy = amdgpu_dm_connector_destroy,
5728         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5729         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5730         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5731         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5732         .late_register = amdgpu_dm_connector_late_register,
5733         .early_unregister = amdgpu_dm_connector_unregister
5734 };
5735
5736 static int get_modes(struct drm_connector *connector)
5737 {
5738         return amdgpu_dm_connector_get_modes(connector);
5739 }
5740
5741 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5742 {
5743         struct dc_sink_init_data init_params = {
5744                         .link = aconnector->dc_link,
5745                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5746         };
5747         struct edid *edid;
5748
5749         if (!aconnector->base.edid_blob_ptr) {
5750                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5751                                 aconnector->base.name);
5752
5753                 aconnector->base.force = DRM_FORCE_OFF;
5754                 aconnector->base.override_edid = false;
5755                 return;
5756         }
5757
5758         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5759
5760         aconnector->edid = edid;
5761
5762         aconnector->dc_em_sink = dc_link_add_remote_sink(
5763                 aconnector->dc_link,
5764                 (uint8_t *)edid,
5765                 (edid->extensions + 1) * EDID_LENGTH,
5766                 &init_params);
5767
5768         if (aconnector->base.force == DRM_FORCE_ON) {
5769                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5770                 aconnector->dc_link->local_sink :
5771                 aconnector->dc_em_sink;
5772                 dc_sink_retain(aconnector->dc_sink);
5773         }
5774 }
5775
5776 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5777 {
5778         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5779
5780         /*
5781          * In case of headless boot with force on for DP managed connector
5782          * Those settings have to be != 0 to get initial modeset
5783          */
5784         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5785                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5786                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5787         }
5788
5789
5790         aconnector->base.override_edid = true;
5791         create_eml_sink(aconnector);
5792 }
5793
5794 static struct dc_stream_state *
5795 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5796                                 const struct drm_display_mode *drm_mode,
5797                                 const struct dm_connector_state *dm_state,
5798                                 const struct dc_stream_state *old_stream)
5799 {
5800         struct drm_connector *connector = &aconnector->base;
5801         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5802         struct dc_stream_state *stream;
5803         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5804         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5805         enum dc_status dc_result = DC_OK;
5806
5807         do {
5808                 stream = create_stream_for_sink(aconnector, drm_mode,
5809                                                 dm_state, old_stream,
5810                                                 requested_bpc);
5811                 if (stream == NULL) {
5812                         DRM_ERROR("Failed to create stream for sink!\n");
5813                         break;
5814                 }
5815
5816                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5817
5818                 if (dc_result != DC_OK) {
5819                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5820                                       drm_mode->hdisplay,
5821                                       drm_mode->vdisplay,
5822                                       drm_mode->clock,
5823                                       dc_result,
5824                                       dc_status_to_str(dc_result));
5825
5826                         dc_stream_release(stream);
5827                         stream = NULL;
5828                         requested_bpc -= 2; /* lower bpc to retry validation */
5829                 }
5830
5831         } while (stream == NULL && requested_bpc >= 6);
5832
5833         return stream;
5834 }
5835
5836 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5837                                    struct drm_display_mode *mode)
5838 {
5839         int result = MODE_ERROR;
5840         struct dc_sink *dc_sink;
5841         /* TODO: Unhardcode stream count */
5842         struct dc_stream_state *stream;
5843         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5844
5845         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5846                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5847                 return result;
5848
5849         /*
5850          * Only run this the first time mode_valid is called to initilialize
5851          * EDID mgmt
5852          */
5853         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5854                 !aconnector->dc_em_sink)
5855                 handle_edid_mgmt(aconnector);
5856
5857         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5858
5859         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5860                                 aconnector->base.force != DRM_FORCE_ON) {
5861                 DRM_ERROR("dc_sink is NULL!\n");
5862                 goto fail;
5863         }
5864
5865         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5866         if (stream) {
5867                 dc_stream_release(stream);
5868                 result = MODE_OK;
5869         }
5870
5871 fail:
5872         /* TODO: error handling*/
5873         return result;
5874 }
5875
5876 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5877                                 struct dc_info_packet *out)
5878 {
5879         struct hdmi_drm_infoframe frame;
5880         unsigned char buf[30]; /* 26 + 4 */
5881         ssize_t len;
5882         int ret, i;
5883
5884         memset(out, 0, sizeof(*out));
5885
5886         if (!state->hdr_output_metadata)
5887                 return 0;
5888
5889         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5890         if (ret)
5891                 return ret;
5892
5893         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5894         if (len < 0)
5895                 return (int)len;
5896
5897         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5898         if (len != 30)
5899                 return -EINVAL;
5900
5901         /* Prepare the infopacket for DC. */
5902         switch (state->connector->connector_type) {
5903         case DRM_MODE_CONNECTOR_HDMIA:
5904                 out->hb0 = 0x87; /* type */
5905                 out->hb1 = 0x01; /* version */
5906                 out->hb2 = 0x1A; /* length */
5907                 out->sb[0] = buf[3]; /* checksum */
5908                 i = 1;
5909                 break;
5910
5911         case DRM_MODE_CONNECTOR_DisplayPort:
5912         case DRM_MODE_CONNECTOR_eDP:
5913                 out->hb0 = 0x00; /* sdp id, zero */
5914                 out->hb1 = 0x87; /* type */
5915                 out->hb2 = 0x1D; /* payload len - 1 */
5916                 out->hb3 = (0x13 << 2); /* sdp version */
5917                 out->sb[0] = 0x01; /* version */
5918                 out->sb[1] = 0x1A; /* length */
5919                 i = 2;
5920                 break;
5921
5922         default:
5923                 return -EINVAL;
5924         }
5925
5926         memcpy(&out->sb[i], &buf[4], 26);
5927         out->valid = true;
5928
5929         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5930                        sizeof(out->sb), false);
5931
5932         return 0;
5933 }
5934
5935 static bool
5936 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5937                           const struct drm_connector_state *new_state)
5938 {
5939         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5940         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5941
5942         if (old_blob != new_blob) {
5943                 if (old_blob && new_blob &&
5944                     old_blob->length == new_blob->length)
5945                         return memcmp(old_blob->data, new_blob->data,
5946                                       old_blob->length);
5947
5948                 return true;
5949         }
5950
5951         return false;
5952 }
5953
5954 static int
5955 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5956                                  struct drm_atomic_state *state)
5957 {
5958         struct drm_connector_state *new_con_state =
5959                 drm_atomic_get_new_connector_state(state, conn);
5960         struct drm_connector_state *old_con_state =
5961                 drm_atomic_get_old_connector_state(state, conn);
5962         struct drm_crtc *crtc = new_con_state->crtc;
5963         struct drm_crtc_state *new_crtc_state;
5964         int ret;
5965
5966         trace_amdgpu_dm_connector_atomic_check(new_con_state);
5967
5968         if (!crtc)
5969                 return 0;
5970
5971         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5972                 struct dc_info_packet hdr_infopacket;
5973
5974                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5975                 if (ret)
5976                         return ret;
5977
5978                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5979                 if (IS_ERR(new_crtc_state))
5980                         return PTR_ERR(new_crtc_state);
5981
5982                 /*
5983                  * DC considers the stream backends changed if the
5984                  * static metadata changes. Forcing the modeset also
5985                  * gives a simple way for userspace to switch from
5986                  * 8bpc to 10bpc when setting the metadata to enter
5987                  * or exit HDR.
5988                  *
5989                  * Changing the static metadata after it's been
5990                  * set is permissible, however. So only force a
5991                  * modeset if we're entering or exiting HDR.
5992                  */
5993                 new_crtc_state->mode_changed =
5994                         !old_con_state->hdr_output_metadata ||
5995                         !new_con_state->hdr_output_metadata;
5996         }
5997
5998         return 0;
5999 }
6000
6001 static const struct drm_connector_helper_funcs
6002 amdgpu_dm_connector_helper_funcs = {
6003         /*
6004          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6005          * modes will be filtered by drm_mode_validate_size(), and those modes
6006          * are missing after user start lightdm. So we need to renew modes list.
6007          * in get_modes call back, not just return the modes count
6008          */
6009         .get_modes = get_modes,
6010         .mode_valid = amdgpu_dm_connector_mode_valid,
6011         .atomic_check = amdgpu_dm_connector_atomic_check,
6012 };
6013
6014 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6015 {
6016 }
6017
6018 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6019 {
6020         struct drm_atomic_state *state = new_crtc_state->state;
6021         struct drm_plane *plane;
6022         int num_active = 0;
6023
6024         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6025                 struct drm_plane_state *new_plane_state;
6026
6027                 /* Cursor planes are "fake". */
6028                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6029                         continue;
6030
6031                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6032
6033                 if (!new_plane_state) {
6034                         /*
6035                          * The plane is enable on the CRTC and hasn't changed
6036                          * state. This means that it previously passed
6037                          * validation and is therefore enabled.
6038                          */
6039                         num_active += 1;
6040                         continue;
6041                 }
6042
6043                 /* We need a framebuffer to be considered enabled. */
6044                 num_active += (new_plane_state->fb != NULL);
6045         }
6046
6047         return num_active;
6048 }
6049
6050 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6051                                          struct drm_crtc_state *new_crtc_state)
6052 {
6053         struct dm_crtc_state *dm_new_crtc_state =
6054                 to_dm_crtc_state(new_crtc_state);
6055
6056         dm_new_crtc_state->active_planes = 0;
6057
6058         if (!dm_new_crtc_state->stream)
6059                 return;
6060
6061         dm_new_crtc_state->active_planes =
6062                 count_crtc_active_planes(new_crtc_state);
6063 }
6064
6065 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6066                                        struct drm_atomic_state *state)
6067 {
6068         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6069                                                                           crtc);
6070         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6071         struct dc *dc = adev->dm.dc;
6072         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6073         int ret = -EINVAL;
6074
6075         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6076
6077         dm_update_crtc_active_planes(crtc, crtc_state);
6078
6079         if (unlikely(!dm_crtc_state->stream &&
6080                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6081                 WARN_ON(1);
6082                 return ret;
6083         }
6084
6085         /*
6086          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6087          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6088          * planes are disabled, which is not supported by the hardware. And there is legacy
6089          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6090          */
6091         if (crtc_state->enable &&
6092             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6093                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6094                 return -EINVAL;
6095         }
6096
6097         /* In some use cases, like reset, no stream is attached */
6098         if (!dm_crtc_state->stream)
6099                 return 0;
6100
6101         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6102                 return 0;
6103
6104         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6105         return ret;
6106 }
6107
6108 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6109                                       const struct drm_display_mode *mode,
6110                                       struct drm_display_mode *adjusted_mode)
6111 {
6112         return true;
6113 }
6114
6115 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6116         .disable = dm_crtc_helper_disable,
6117         .atomic_check = dm_crtc_helper_atomic_check,
6118         .mode_fixup = dm_crtc_helper_mode_fixup,
6119         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6120 };
6121
6122 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6123 {
6124
6125 }
6126
6127 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6128 {
6129         switch (display_color_depth) {
6130                 case COLOR_DEPTH_666:
6131                         return 6;
6132                 case COLOR_DEPTH_888:
6133                         return 8;
6134                 case COLOR_DEPTH_101010:
6135                         return 10;
6136                 case COLOR_DEPTH_121212:
6137                         return 12;
6138                 case COLOR_DEPTH_141414:
6139                         return 14;
6140                 case COLOR_DEPTH_161616:
6141                         return 16;
6142                 default:
6143                         break;
6144                 }
6145         return 0;
6146 }
6147
6148 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6149                                           struct drm_crtc_state *crtc_state,
6150                                           struct drm_connector_state *conn_state)
6151 {
6152         struct drm_atomic_state *state = crtc_state->state;
6153         struct drm_connector *connector = conn_state->connector;
6154         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6155         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6156         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6157         struct drm_dp_mst_topology_mgr *mst_mgr;
6158         struct drm_dp_mst_port *mst_port;
6159         enum dc_color_depth color_depth;
6160         int clock, bpp = 0;
6161         bool is_y420 = false;
6162
6163         if (!aconnector->port || !aconnector->dc_sink)
6164                 return 0;
6165
6166         mst_port = aconnector->port;
6167         mst_mgr = &aconnector->mst_port->mst_mgr;
6168
6169         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6170                 return 0;
6171
6172         if (!state->duplicated) {
6173                 int max_bpc = conn_state->max_requested_bpc;
6174                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6175                                 aconnector->force_yuv420_output;
6176                 color_depth = convert_color_depth_from_display_info(connector,
6177                                                                     is_y420,
6178                                                                     max_bpc);
6179                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6180                 clock = adjusted_mode->clock;
6181                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6182         }
6183         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6184                                                                            mst_mgr,
6185                                                                            mst_port,
6186                                                                            dm_new_connector_state->pbn,
6187                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6188         if (dm_new_connector_state->vcpi_slots < 0) {
6189                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6190                 return dm_new_connector_state->vcpi_slots;
6191         }
6192         return 0;
6193 }
6194
6195 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6196         .disable = dm_encoder_helper_disable,
6197         .atomic_check = dm_encoder_helper_atomic_check
6198 };
6199
6200 #if defined(CONFIG_DRM_AMD_DC_DCN)
6201 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6202                                             struct dc_state *dc_state)
6203 {
6204         struct dc_stream_state *stream = NULL;
6205         struct drm_connector *connector;
6206         struct drm_connector_state *new_con_state, *old_con_state;
6207         struct amdgpu_dm_connector *aconnector;
6208         struct dm_connector_state *dm_conn_state;
6209         int i, j, clock, bpp;
6210         int vcpi, pbn_div, pbn = 0;
6211
6212         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6213
6214                 aconnector = to_amdgpu_dm_connector(connector);
6215
6216                 if (!aconnector->port)
6217                         continue;
6218
6219                 if (!new_con_state || !new_con_state->crtc)
6220                         continue;
6221
6222                 dm_conn_state = to_dm_connector_state(new_con_state);
6223
6224                 for (j = 0; j < dc_state->stream_count; j++) {
6225                         stream = dc_state->streams[j];
6226                         if (!stream)
6227                                 continue;
6228
6229                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6230                                 break;
6231
6232                         stream = NULL;
6233                 }
6234
6235                 if (!stream)
6236                         continue;
6237
6238                 if (stream->timing.flags.DSC != 1) {
6239                         drm_dp_mst_atomic_enable_dsc(state,
6240                                                      aconnector->port,
6241                                                      dm_conn_state->pbn,
6242                                                      0,
6243                                                      false);
6244                         continue;
6245                 }
6246
6247                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6248                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6249                 clock = stream->timing.pix_clk_100hz / 10;
6250                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6251                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6252                                                     aconnector->port,
6253                                                     pbn, pbn_div,
6254                                                     true);
6255                 if (vcpi < 0)
6256                         return vcpi;
6257
6258                 dm_conn_state->pbn = pbn;
6259                 dm_conn_state->vcpi_slots = vcpi;
6260         }
6261         return 0;
6262 }
6263 #endif
6264
6265 static void dm_drm_plane_reset(struct drm_plane *plane)
6266 {
6267         struct dm_plane_state *amdgpu_state = NULL;
6268
6269         if (plane->state)
6270                 plane->funcs->atomic_destroy_state(plane, plane->state);
6271
6272         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6273         WARN_ON(amdgpu_state == NULL);
6274
6275         if (amdgpu_state)
6276                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6277 }
6278
6279 static struct drm_plane_state *
6280 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6281 {
6282         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6283
6284         old_dm_plane_state = to_dm_plane_state(plane->state);
6285         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6286         if (!dm_plane_state)
6287                 return NULL;
6288
6289         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6290
6291         if (old_dm_plane_state->dc_state) {
6292                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6293                 dc_plane_state_retain(dm_plane_state->dc_state);
6294         }
6295
6296         return &dm_plane_state->base;
6297 }
6298
6299 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6300                                 struct drm_plane_state *state)
6301 {
6302         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6303
6304         if (dm_plane_state->dc_state)
6305                 dc_plane_state_release(dm_plane_state->dc_state);
6306
6307         drm_atomic_helper_plane_destroy_state(plane, state);
6308 }
6309
6310 static const struct drm_plane_funcs dm_plane_funcs = {
6311         .update_plane   = drm_atomic_helper_update_plane,
6312         .disable_plane  = drm_atomic_helper_disable_plane,
6313         .destroy        = drm_primary_helper_destroy,
6314         .reset = dm_drm_plane_reset,
6315         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6316         .atomic_destroy_state = dm_drm_plane_destroy_state,
6317         .format_mod_supported = dm_plane_format_mod_supported,
6318 };
6319
6320 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6321                                       struct drm_plane_state *new_state)
6322 {
6323         struct amdgpu_framebuffer *afb;
6324         struct drm_gem_object *obj;
6325         struct amdgpu_device *adev;
6326         struct amdgpu_bo *rbo;
6327         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6328         struct list_head list;
6329         struct ttm_validate_buffer tv;
6330         struct ww_acquire_ctx ticket;
6331         uint32_t domain;
6332         int r;
6333
6334         if (!new_state->fb) {
6335                 DRM_DEBUG_DRIVER("No FB bound\n");
6336                 return 0;
6337         }
6338
6339         afb = to_amdgpu_framebuffer(new_state->fb);
6340         obj = new_state->fb->obj[0];
6341         rbo = gem_to_amdgpu_bo(obj);
6342         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6343         INIT_LIST_HEAD(&list);
6344
6345         tv.bo = &rbo->tbo;
6346         tv.num_shared = 1;
6347         list_add(&tv.head, &list);
6348
6349         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6350         if (r) {
6351                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6352                 return r;
6353         }
6354
6355         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6356                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6357         else
6358                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6359
6360         r = amdgpu_bo_pin(rbo, domain);
6361         if (unlikely(r != 0)) {
6362                 if (r != -ERESTARTSYS)
6363                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6364                 ttm_eu_backoff_reservation(&ticket, &list);
6365                 return r;
6366         }
6367
6368         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6369         if (unlikely(r != 0)) {
6370                 amdgpu_bo_unpin(rbo);
6371                 ttm_eu_backoff_reservation(&ticket, &list);
6372                 DRM_ERROR("%p bind failed\n", rbo);
6373                 return r;
6374         }
6375
6376         ttm_eu_backoff_reservation(&ticket, &list);
6377
6378         afb->address = amdgpu_bo_gpu_offset(rbo);
6379
6380         amdgpu_bo_ref(rbo);
6381
6382         /**
6383          * We don't do surface updates on planes that have been newly created,
6384          * but we also don't have the afb->address during atomic check.
6385          *
6386          * Fill in buffer attributes depending on the address here, but only on
6387          * newly created planes since they're not being used by DC yet and this
6388          * won't modify global state.
6389          */
6390         dm_plane_state_old = to_dm_plane_state(plane->state);
6391         dm_plane_state_new = to_dm_plane_state(new_state);
6392
6393         if (dm_plane_state_new->dc_state &&
6394             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6395                 struct dc_plane_state *plane_state =
6396                         dm_plane_state_new->dc_state;
6397                 bool force_disable_dcc = !plane_state->dcc.enable;
6398
6399                 fill_plane_buffer_attributes(
6400                         adev, afb, plane_state->format, plane_state->rotation,
6401                         afb->tiling_flags,
6402                         &plane_state->tiling_info, &plane_state->plane_size,
6403                         &plane_state->dcc, &plane_state->address,
6404                         afb->tmz_surface, force_disable_dcc);
6405         }
6406
6407         return 0;
6408 }
6409
6410 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6411                                        struct drm_plane_state *old_state)
6412 {
6413         struct amdgpu_bo *rbo;
6414         int r;
6415
6416         if (!old_state->fb)
6417                 return;
6418
6419         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6420         r = amdgpu_bo_reserve(rbo, false);
6421         if (unlikely(r)) {
6422                 DRM_ERROR("failed to reserve rbo before unpin\n");
6423                 return;
6424         }
6425
6426         amdgpu_bo_unpin(rbo);
6427         amdgpu_bo_unreserve(rbo);
6428         amdgpu_bo_unref(&rbo);
6429 }
6430
6431 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6432                                        struct drm_crtc_state *new_crtc_state)
6433 {
6434         int max_downscale = 0;
6435         int max_upscale = INT_MAX;
6436
6437         /* TODO: These should be checked against DC plane caps */
6438         return drm_atomic_helper_check_plane_state(
6439                 state, new_crtc_state, max_downscale, max_upscale, true, true);
6440 }
6441
6442 static int dm_plane_atomic_check(struct drm_plane *plane,
6443                                  struct drm_plane_state *state)
6444 {
6445         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6446         struct dc *dc = adev->dm.dc;
6447         struct dm_plane_state *dm_plane_state;
6448         struct dc_scaling_info scaling_info;
6449         struct drm_crtc_state *new_crtc_state;
6450         int ret;
6451
6452         trace_amdgpu_dm_plane_atomic_check(state);
6453
6454         dm_plane_state = to_dm_plane_state(state);
6455
6456         if (!dm_plane_state->dc_state)
6457                 return 0;
6458
6459         new_crtc_state =
6460                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6461         if (!new_crtc_state)
6462                 return -EINVAL;
6463
6464         ret = dm_plane_helper_check_state(state, new_crtc_state);
6465         if (ret)
6466                 return ret;
6467
6468         ret = fill_dc_scaling_info(state, &scaling_info);
6469         if (ret)
6470                 return ret;
6471
6472         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6473                 return 0;
6474
6475         return -EINVAL;
6476 }
6477
6478 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6479                                        struct drm_plane_state *new_plane_state)
6480 {
6481         /* Only support async updates on cursor planes. */
6482         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6483                 return -EINVAL;
6484
6485         return 0;
6486 }
6487
6488 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6489                                          struct drm_plane_state *new_state)
6490 {
6491         struct drm_plane_state *old_state =
6492                 drm_atomic_get_old_plane_state(new_state->state, plane);
6493
6494         trace_amdgpu_dm_atomic_update_cursor(new_state);
6495
6496         swap(plane->state->fb, new_state->fb);
6497
6498         plane->state->src_x = new_state->src_x;
6499         plane->state->src_y = new_state->src_y;
6500         plane->state->src_w = new_state->src_w;
6501         plane->state->src_h = new_state->src_h;
6502         plane->state->crtc_x = new_state->crtc_x;
6503         plane->state->crtc_y = new_state->crtc_y;
6504         plane->state->crtc_w = new_state->crtc_w;
6505         plane->state->crtc_h = new_state->crtc_h;
6506
6507         handle_cursor_update(plane, old_state);
6508 }
6509
6510 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6511         .prepare_fb = dm_plane_helper_prepare_fb,
6512         .cleanup_fb = dm_plane_helper_cleanup_fb,
6513         .atomic_check = dm_plane_atomic_check,
6514         .atomic_async_check = dm_plane_atomic_async_check,
6515         .atomic_async_update = dm_plane_atomic_async_update
6516 };
6517
6518 /*
6519  * TODO: these are currently initialized to rgb formats only.
6520  * For future use cases we should either initialize them dynamically based on
6521  * plane capabilities, or initialize this array to all formats, so internal drm
6522  * check will succeed, and let DC implement proper check
6523  */
6524 static const uint32_t rgb_formats[] = {
6525         DRM_FORMAT_XRGB8888,
6526         DRM_FORMAT_ARGB8888,
6527         DRM_FORMAT_RGBA8888,
6528         DRM_FORMAT_XRGB2101010,
6529         DRM_FORMAT_XBGR2101010,
6530         DRM_FORMAT_ARGB2101010,
6531         DRM_FORMAT_ABGR2101010,
6532         DRM_FORMAT_XBGR8888,
6533         DRM_FORMAT_ABGR8888,
6534         DRM_FORMAT_RGB565,
6535 };
6536
6537 static const uint32_t overlay_formats[] = {
6538         DRM_FORMAT_XRGB8888,
6539         DRM_FORMAT_ARGB8888,
6540         DRM_FORMAT_RGBA8888,
6541         DRM_FORMAT_XBGR8888,
6542         DRM_FORMAT_ABGR8888,
6543         DRM_FORMAT_RGB565
6544 };
6545
6546 static const u32 cursor_formats[] = {
6547         DRM_FORMAT_ARGB8888
6548 };
6549
6550 static int get_plane_formats(const struct drm_plane *plane,
6551                              const struct dc_plane_cap *plane_cap,
6552                              uint32_t *formats, int max_formats)
6553 {
6554         int i, num_formats = 0;
6555
6556         /*
6557          * TODO: Query support for each group of formats directly from
6558          * DC plane caps. This will require adding more formats to the
6559          * caps list.
6560          */
6561
6562         switch (plane->type) {
6563         case DRM_PLANE_TYPE_PRIMARY:
6564                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6565                         if (num_formats >= max_formats)
6566                                 break;
6567
6568                         formats[num_formats++] = rgb_formats[i];
6569                 }
6570
6571                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6572                         formats[num_formats++] = DRM_FORMAT_NV12;
6573                 if (plane_cap && plane_cap->pixel_format_support.p010)
6574                         formats[num_formats++] = DRM_FORMAT_P010;
6575                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6576                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6577                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6578                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6579                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6580                 }
6581                 break;
6582
6583         case DRM_PLANE_TYPE_OVERLAY:
6584                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6585                         if (num_formats >= max_formats)
6586                                 break;
6587
6588                         formats[num_formats++] = overlay_formats[i];
6589                 }
6590                 break;
6591
6592         case DRM_PLANE_TYPE_CURSOR:
6593                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6594                         if (num_formats >= max_formats)
6595                                 break;
6596
6597                         formats[num_formats++] = cursor_formats[i];
6598                 }
6599                 break;
6600         }
6601
6602         return num_formats;
6603 }
6604
6605 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6606                                 struct drm_plane *plane,
6607                                 unsigned long possible_crtcs,
6608                                 const struct dc_plane_cap *plane_cap)
6609 {
6610         uint32_t formats[32];
6611         int num_formats;
6612         int res = -EPERM;
6613         unsigned int supported_rotations;
6614         uint64_t *modifiers = NULL;
6615
6616         num_formats = get_plane_formats(plane, plane_cap, formats,
6617                                         ARRAY_SIZE(formats));
6618
6619         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6620         if (res)
6621                 return res;
6622
6623         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6624                                        &dm_plane_funcs, formats, num_formats,
6625                                        modifiers, plane->type, NULL);
6626         kfree(modifiers);
6627         if (res)
6628                 return res;
6629
6630         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6631             plane_cap && plane_cap->per_pixel_alpha) {
6632                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6633                                           BIT(DRM_MODE_BLEND_PREMULTI);
6634
6635                 drm_plane_create_alpha_property(plane);
6636                 drm_plane_create_blend_mode_property(plane, blend_caps);
6637         }
6638
6639         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6640             plane_cap &&
6641             (plane_cap->pixel_format_support.nv12 ||
6642              plane_cap->pixel_format_support.p010)) {
6643                 /* This only affects YUV formats. */
6644                 drm_plane_create_color_properties(
6645                         plane,
6646                         BIT(DRM_COLOR_YCBCR_BT601) |
6647                         BIT(DRM_COLOR_YCBCR_BT709) |
6648                         BIT(DRM_COLOR_YCBCR_BT2020),
6649                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6650                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6651                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6652         }
6653
6654         supported_rotations =
6655                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6656                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6657
6658         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6659             plane->type != DRM_PLANE_TYPE_CURSOR)
6660                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6661                                                    supported_rotations);
6662
6663         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6664
6665         /* Create (reset) the plane state */
6666         if (plane->funcs->reset)
6667                 plane->funcs->reset(plane);
6668
6669         return 0;
6670 }
6671
6672 #ifdef CONFIG_DEBUG_FS
6673 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6674                                 struct amdgpu_crtc *acrtc)
6675 {
6676         drm_object_attach_property(&acrtc->base.base,
6677                                    dm->crc_win_x_start_property,
6678                                    0);
6679         drm_object_attach_property(&acrtc->base.base,
6680                                    dm->crc_win_y_start_property,
6681                                    0);
6682         drm_object_attach_property(&acrtc->base.base,
6683                                    dm->crc_win_x_end_property,
6684                                    0);
6685         drm_object_attach_property(&acrtc->base.base,
6686                                    dm->crc_win_y_end_property,
6687                                    0);
6688 }
6689 #endif
6690
6691 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6692                                struct drm_plane *plane,
6693                                uint32_t crtc_index)
6694 {
6695         struct amdgpu_crtc *acrtc = NULL;
6696         struct drm_plane *cursor_plane;
6697
6698         int res = -ENOMEM;
6699
6700         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6701         if (!cursor_plane)
6702                 goto fail;
6703
6704         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6705         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6706
6707         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6708         if (!acrtc)
6709                 goto fail;
6710
6711         res = drm_crtc_init_with_planes(
6712                         dm->ddev,
6713                         &acrtc->base,
6714                         plane,
6715                         cursor_plane,
6716                         &amdgpu_dm_crtc_funcs, NULL);
6717
6718         if (res)
6719                 goto fail;
6720
6721         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6722
6723         /* Create (reset) the plane state */
6724         if (acrtc->base.funcs->reset)
6725                 acrtc->base.funcs->reset(&acrtc->base);
6726
6727         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6728         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6729
6730         acrtc->crtc_id = crtc_index;
6731         acrtc->base.enabled = false;
6732         acrtc->otg_inst = -1;
6733
6734         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6735         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6736                                    true, MAX_COLOR_LUT_ENTRIES);
6737         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6738 #ifdef CONFIG_DEBUG_FS
6739         attach_crtc_crc_properties(dm, acrtc);
6740 #endif
6741         return 0;
6742
6743 fail:
6744         kfree(acrtc);
6745         kfree(cursor_plane);
6746         return res;
6747 }
6748
6749
6750 static int to_drm_connector_type(enum signal_type st)
6751 {
6752         switch (st) {
6753         case SIGNAL_TYPE_HDMI_TYPE_A:
6754                 return DRM_MODE_CONNECTOR_HDMIA;
6755         case SIGNAL_TYPE_EDP:
6756                 return DRM_MODE_CONNECTOR_eDP;
6757         case SIGNAL_TYPE_LVDS:
6758                 return DRM_MODE_CONNECTOR_LVDS;
6759         case SIGNAL_TYPE_RGB:
6760                 return DRM_MODE_CONNECTOR_VGA;
6761         case SIGNAL_TYPE_DISPLAY_PORT:
6762         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6763                 return DRM_MODE_CONNECTOR_DisplayPort;
6764         case SIGNAL_TYPE_DVI_DUAL_LINK:
6765         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6766                 return DRM_MODE_CONNECTOR_DVID;
6767         case SIGNAL_TYPE_VIRTUAL:
6768                 return DRM_MODE_CONNECTOR_VIRTUAL;
6769
6770         default:
6771                 return DRM_MODE_CONNECTOR_Unknown;
6772         }
6773 }
6774
6775 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6776 {
6777         struct drm_encoder *encoder;
6778
6779         /* There is only one encoder per connector */
6780         drm_connector_for_each_possible_encoder(connector, encoder)
6781                 return encoder;
6782
6783         return NULL;
6784 }
6785
6786 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6787 {
6788         struct drm_encoder *encoder;
6789         struct amdgpu_encoder *amdgpu_encoder;
6790
6791         encoder = amdgpu_dm_connector_to_encoder(connector);
6792
6793         if (encoder == NULL)
6794                 return;
6795
6796         amdgpu_encoder = to_amdgpu_encoder(encoder);
6797
6798         amdgpu_encoder->native_mode.clock = 0;
6799
6800         if (!list_empty(&connector->probed_modes)) {
6801                 struct drm_display_mode *preferred_mode = NULL;
6802
6803                 list_for_each_entry(preferred_mode,
6804                                     &connector->probed_modes,
6805                                     head) {
6806                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6807                                 amdgpu_encoder->native_mode = *preferred_mode;
6808
6809                         break;
6810                 }
6811
6812         }
6813 }
6814
6815 static struct drm_display_mode *
6816 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6817                              char *name,
6818                              int hdisplay, int vdisplay)
6819 {
6820         struct drm_device *dev = encoder->dev;
6821         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6822         struct drm_display_mode *mode = NULL;
6823         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6824
6825         mode = drm_mode_duplicate(dev, native_mode);
6826
6827         if (mode == NULL)
6828                 return NULL;
6829
6830         mode->hdisplay = hdisplay;
6831         mode->vdisplay = vdisplay;
6832         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6833         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6834
6835         return mode;
6836
6837 }
6838
6839 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6840                                                  struct drm_connector *connector)
6841 {
6842         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6843         struct drm_display_mode *mode = NULL;
6844         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6845         struct amdgpu_dm_connector *amdgpu_dm_connector =
6846                                 to_amdgpu_dm_connector(connector);
6847         int i;
6848         int n;
6849         struct mode_size {
6850                 char name[DRM_DISPLAY_MODE_LEN];
6851                 int w;
6852                 int h;
6853         } common_modes[] = {
6854                 {  "640x480",  640,  480},
6855                 {  "800x600",  800,  600},
6856                 { "1024x768", 1024,  768},
6857                 { "1280x720", 1280,  720},
6858                 { "1280x800", 1280,  800},
6859                 {"1280x1024", 1280, 1024},
6860                 { "1440x900", 1440,  900},
6861                 {"1680x1050", 1680, 1050},
6862                 {"1600x1200", 1600, 1200},
6863                 {"1920x1080", 1920, 1080},
6864                 {"1920x1200", 1920, 1200}
6865         };
6866
6867         n = ARRAY_SIZE(common_modes);
6868
6869         for (i = 0; i < n; i++) {
6870                 struct drm_display_mode *curmode = NULL;
6871                 bool mode_existed = false;
6872
6873                 if (common_modes[i].w > native_mode->hdisplay ||
6874                     common_modes[i].h > native_mode->vdisplay ||
6875                    (common_modes[i].w == native_mode->hdisplay &&
6876                     common_modes[i].h == native_mode->vdisplay))
6877                         continue;
6878
6879                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6880                         if (common_modes[i].w == curmode->hdisplay &&
6881                             common_modes[i].h == curmode->vdisplay) {
6882                                 mode_existed = true;
6883                                 break;
6884                         }
6885                 }
6886
6887                 if (mode_existed)
6888                         continue;
6889
6890                 mode = amdgpu_dm_create_common_mode(encoder,
6891                                 common_modes[i].name, common_modes[i].w,
6892                                 common_modes[i].h);
6893                 drm_mode_probed_add(connector, mode);
6894                 amdgpu_dm_connector->num_modes++;
6895         }
6896 }
6897
6898 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6899                                               struct edid *edid)
6900 {
6901         struct amdgpu_dm_connector *amdgpu_dm_connector =
6902                         to_amdgpu_dm_connector(connector);
6903
6904         if (edid) {
6905                 /* empty probed_modes */
6906                 INIT_LIST_HEAD(&connector->probed_modes);
6907                 amdgpu_dm_connector->num_modes =
6908                                 drm_add_edid_modes(connector, edid);
6909
6910                 /* sorting the probed modes before calling function
6911                  * amdgpu_dm_get_native_mode() since EDID can have
6912                  * more than one preferred mode. The modes that are
6913                  * later in the probed mode list could be of higher
6914                  * and preferred resolution. For example, 3840x2160
6915                  * resolution in base EDID preferred timing and 4096x2160
6916                  * preferred resolution in DID extension block later.
6917                  */
6918                 drm_mode_sort(&connector->probed_modes);
6919                 amdgpu_dm_get_native_mode(connector);
6920         } else {
6921                 amdgpu_dm_connector->num_modes = 0;
6922         }
6923 }
6924
6925 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6926 {
6927         struct amdgpu_dm_connector *amdgpu_dm_connector =
6928                         to_amdgpu_dm_connector(connector);
6929         struct drm_encoder *encoder;
6930         struct edid *edid = amdgpu_dm_connector->edid;
6931
6932         encoder = amdgpu_dm_connector_to_encoder(connector);
6933
6934         if (!drm_edid_is_valid(edid)) {
6935                 amdgpu_dm_connector->num_modes =
6936                                 drm_add_modes_noedid(connector, 640, 480);
6937         } else {
6938                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6939                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6940         }
6941         amdgpu_dm_fbc_init(connector);
6942
6943         return amdgpu_dm_connector->num_modes;
6944 }
6945
6946 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6947                                      struct amdgpu_dm_connector *aconnector,
6948                                      int connector_type,
6949                                      struct dc_link *link,
6950                                      int link_index)
6951 {
6952         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6953
6954         /*
6955          * Some of the properties below require access to state, like bpc.
6956          * Allocate some default initial connector state with our reset helper.
6957          */
6958         if (aconnector->base.funcs->reset)
6959                 aconnector->base.funcs->reset(&aconnector->base);
6960
6961         aconnector->connector_id = link_index;
6962         aconnector->dc_link = link;
6963         aconnector->base.interlace_allowed = false;
6964         aconnector->base.doublescan_allowed = false;
6965         aconnector->base.stereo_allowed = false;
6966         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6967         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6968         aconnector->audio_inst = -1;
6969         mutex_init(&aconnector->hpd_lock);
6970
6971         /*
6972          * configure support HPD hot plug connector_>polled default value is 0
6973          * which means HPD hot plug not supported
6974          */
6975         switch (connector_type) {
6976         case DRM_MODE_CONNECTOR_HDMIA:
6977                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6978                 aconnector->base.ycbcr_420_allowed =
6979                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6980                 break;
6981         case DRM_MODE_CONNECTOR_DisplayPort:
6982                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6983                 aconnector->base.ycbcr_420_allowed =
6984                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
6985                 break;
6986         case DRM_MODE_CONNECTOR_DVID:
6987                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6988                 break;
6989         default:
6990                 break;
6991         }
6992
6993         drm_object_attach_property(&aconnector->base.base,
6994                                 dm->ddev->mode_config.scaling_mode_property,
6995                                 DRM_MODE_SCALE_NONE);
6996
6997         drm_object_attach_property(&aconnector->base.base,
6998                                 adev->mode_info.underscan_property,
6999                                 UNDERSCAN_OFF);
7000         drm_object_attach_property(&aconnector->base.base,
7001                                 adev->mode_info.underscan_hborder_property,
7002                                 0);
7003         drm_object_attach_property(&aconnector->base.base,
7004                                 adev->mode_info.underscan_vborder_property,
7005                                 0);
7006
7007         if (!aconnector->mst_port)
7008                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7009
7010         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7011         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7012         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7013
7014         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7015             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7016                 drm_object_attach_property(&aconnector->base.base,
7017                                 adev->mode_info.abm_level_property, 0);
7018         }
7019
7020         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7021             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7022             connector_type == DRM_MODE_CONNECTOR_eDP) {
7023                 drm_object_attach_property(
7024                         &aconnector->base.base,
7025                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7026
7027                 if (!aconnector->mst_port)
7028                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7029
7030 #ifdef CONFIG_DRM_AMD_DC_HDCP
7031                 if (adev->dm.hdcp_workqueue)
7032                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7033 #endif
7034         }
7035 }
7036
7037 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7038                               struct i2c_msg *msgs, int num)
7039 {
7040         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7041         struct ddc_service *ddc_service = i2c->ddc_service;
7042         struct i2c_command cmd;
7043         int i;
7044         int result = -EIO;
7045
7046         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7047
7048         if (!cmd.payloads)
7049                 return result;
7050
7051         cmd.number_of_payloads = num;
7052         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7053         cmd.speed = 100;
7054
7055         for (i = 0; i < num; i++) {
7056                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7057                 cmd.payloads[i].address = msgs[i].addr;
7058                 cmd.payloads[i].length = msgs[i].len;
7059                 cmd.payloads[i].data = msgs[i].buf;
7060         }
7061
7062         if (dc_submit_i2c(
7063                         ddc_service->ctx->dc,
7064                         ddc_service->ddc_pin->hw_info.ddc_channel,
7065                         &cmd))
7066                 result = num;
7067
7068         kfree(cmd.payloads);
7069         return result;
7070 }
7071
7072 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7073 {
7074         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7075 }
7076
7077 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7078         .master_xfer = amdgpu_dm_i2c_xfer,
7079         .functionality = amdgpu_dm_i2c_func,
7080 };
7081
7082 static struct amdgpu_i2c_adapter *
7083 create_i2c(struct ddc_service *ddc_service,
7084            int link_index,
7085            int *res)
7086 {
7087         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7088         struct amdgpu_i2c_adapter *i2c;
7089
7090         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7091         if (!i2c)
7092                 return NULL;
7093         i2c->base.owner = THIS_MODULE;
7094         i2c->base.class = I2C_CLASS_DDC;
7095         i2c->base.dev.parent = &adev->pdev->dev;
7096         i2c->base.algo = &amdgpu_dm_i2c_algo;
7097         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7098         i2c_set_adapdata(&i2c->base, i2c);
7099         i2c->ddc_service = ddc_service;
7100         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7101
7102         return i2c;
7103 }
7104
7105
7106 /*
7107  * Note: this function assumes that dc_link_detect() was called for the
7108  * dc_link which will be represented by this aconnector.
7109  */
7110 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7111                                     struct amdgpu_dm_connector *aconnector,
7112                                     uint32_t link_index,
7113                                     struct amdgpu_encoder *aencoder)
7114 {
7115         int res = 0;
7116         int connector_type;
7117         struct dc *dc = dm->dc;
7118         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7119         struct amdgpu_i2c_adapter *i2c;
7120
7121         link->priv = aconnector;
7122
7123         DRM_DEBUG_DRIVER("%s()\n", __func__);
7124
7125         i2c = create_i2c(link->ddc, link->link_index, &res);
7126         if (!i2c) {
7127                 DRM_ERROR("Failed to create i2c adapter data\n");
7128                 return -ENOMEM;
7129         }
7130
7131         aconnector->i2c = i2c;
7132         res = i2c_add_adapter(&i2c->base);
7133
7134         if (res) {
7135                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7136                 goto out_free;
7137         }
7138
7139         connector_type = to_drm_connector_type(link->connector_signal);
7140
7141         res = drm_connector_init_with_ddc(
7142                         dm->ddev,
7143                         &aconnector->base,
7144                         &amdgpu_dm_connector_funcs,
7145                         connector_type,
7146                         &i2c->base);
7147
7148         if (res) {
7149                 DRM_ERROR("connector_init failed\n");
7150                 aconnector->connector_id = -1;
7151                 goto out_free;
7152         }
7153
7154         drm_connector_helper_add(
7155                         &aconnector->base,
7156                         &amdgpu_dm_connector_helper_funcs);
7157
7158         amdgpu_dm_connector_init_helper(
7159                 dm,
7160                 aconnector,
7161                 connector_type,
7162                 link,
7163                 link_index);
7164
7165         drm_connector_attach_encoder(
7166                 &aconnector->base, &aencoder->base);
7167
7168         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7169                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7170                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7171
7172 out_free:
7173         if (res) {
7174                 kfree(i2c);
7175                 aconnector->i2c = NULL;
7176         }
7177         return res;
7178 }
7179
7180 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7181 {
7182         switch (adev->mode_info.num_crtc) {
7183         case 1:
7184                 return 0x1;
7185         case 2:
7186                 return 0x3;
7187         case 3:
7188                 return 0x7;
7189         case 4:
7190                 return 0xf;
7191         case 5:
7192                 return 0x1f;
7193         case 6:
7194         default:
7195                 return 0x3f;
7196         }
7197 }
7198
7199 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7200                                   struct amdgpu_encoder *aencoder,
7201                                   uint32_t link_index)
7202 {
7203         struct amdgpu_device *adev = drm_to_adev(dev);
7204
7205         int res = drm_encoder_init(dev,
7206                                    &aencoder->base,
7207                                    &amdgpu_dm_encoder_funcs,
7208                                    DRM_MODE_ENCODER_TMDS,
7209                                    NULL);
7210
7211         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7212
7213         if (!res)
7214                 aencoder->encoder_id = link_index;
7215         else
7216                 aencoder->encoder_id = -1;
7217
7218         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7219
7220         return res;
7221 }
7222
7223 static void manage_dm_interrupts(struct amdgpu_device *adev,
7224                                  struct amdgpu_crtc *acrtc,
7225                                  bool enable)
7226 {
7227         /*
7228          * We have no guarantee that the frontend index maps to the same
7229          * backend index - some even map to more than one.
7230          *
7231          * TODO: Use a different interrupt or check DC itself for the mapping.
7232          */
7233         int irq_type =
7234                 amdgpu_display_crtc_idx_to_irq_type(
7235                         adev,
7236                         acrtc->crtc_id);
7237
7238         if (enable) {
7239                 drm_crtc_vblank_on(&acrtc->base);
7240                 amdgpu_irq_get(
7241                         adev,
7242                         &adev->pageflip_irq,
7243                         irq_type);
7244         } else {
7245
7246                 amdgpu_irq_put(
7247                         adev,
7248                         &adev->pageflip_irq,
7249                         irq_type);
7250                 drm_crtc_vblank_off(&acrtc->base);
7251         }
7252 }
7253
7254 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7255                                       struct amdgpu_crtc *acrtc)
7256 {
7257         int irq_type =
7258                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7259
7260         /**
7261          * This reads the current state for the IRQ and force reapplies
7262          * the setting to hardware.
7263          */
7264         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7265 }
7266
7267 static bool
7268 is_scaling_state_different(const struct dm_connector_state *dm_state,
7269                            const struct dm_connector_state *old_dm_state)
7270 {
7271         if (dm_state->scaling != old_dm_state->scaling)
7272                 return true;
7273         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7274                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7275                         return true;
7276         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7277                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7278                         return true;
7279         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7280                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7281                 return true;
7282         return false;
7283 }
7284
7285 #ifdef CONFIG_DRM_AMD_DC_HDCP
7286 static bool is_content_protection_different(struct drm_connector_state *state,
7287                                             const struct drm_connector_state *old_state,
7288                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7289 {
7290         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7291         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7292
7293         /* Handle: Type0/1 change */
7294         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7295             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7296                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7297                 return true;
7298         }
7299
7300         /* CP is being re enabled, ignore this
7301          *
7302          * Handles:     ENABLED -> DESIRED
7303          */
7304         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7305             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7306                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7307                 return false;
7308         }
7309
7310         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7311          *
7312          * Handles:     UNDESIRED -> ENABLED
7313          */
7314         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7315             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7316                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7317
7318         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7319          * hot-plug, headless s3, dpms
7320          *
7321          * Handles:     DESIRED -> DESIRED (Special case)
7322          */
7323         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7324             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7325                 dm_con_state->update_hdcp = false;
7326                 return true;
7327         }
7328
7329         /*
7330          * Handles:     UNDESIRED -> UNDESIRED
7331          *              DESIRED -> DESIRED
7332          *              ENABLED -> ENABLED
7333          */
7334         if (old_state->content_protection == state->content_protection)
7335                 return false;
7336
7337         /*
7338          * Handles:     UNDESIRED -> DESIRED
7339          *              DESIRED -> UNDESIRED
7340          *              ENABLED -> UNDESIRED
7341          */
7342         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7343                 return true;
7344
7345         /*
7346          * Handles:     DESIRED -> ENABLED
7347          */
7348         return false;
7349 }
7350
7351 #endif
7352 static void remove_stream(struct amdgpu_device *adev,
7353                           struct amdgpu_crtc *acrtc,
7354                           struct dc_stream_state *stream)
7355 {
7356         /* this is the update mode case */
7357
7358         acrtc->otg_inst = -1;
7359         acrtc->enabled = false;
7360 }
7361
7362 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7363                                struct dc_cursor_position *position)
7364 {
7365         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7366         int x, y;
7367         int xorigin = 0, yorigin = 0;
7368
7369         position->enable = false;
7370         position->x = 0;
7371         position->y = 0;
7372
7373         if (!crtc || !plane->state->fb)
7374                 return 0;
7375
7376         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7377             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7378                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7379                           __func__,
7380                           plane->state->crtc_w,
7381                           plane->state->crtc_h);
7382                 return -EINVAL;
7383         }
7384
7385         x = plane->state->crtc_x;
7386         y = plane->state->crtc_y;
7387
7388         if (x <= -amdgpu_crtc->max_cursor_width ||
7389             y <= -amdgpu_crtc->max_cursor_height)
7390                 return 0;
7391
7392         if (x < 0) {
7393                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7394                 x = 0;
7395         }
7396         if (y < 0) {
7397                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7398                 y = 0;
7399         }
7400         position->enable = true;
7401         position->translate_by_source = true;
7402         position->x = x;
7403         position->y = y;
7404         position->x_hotspot = xorigin;
7405         position->y_hotspot = yorigin;
7406
7407         return 0;
7408 }
7409
7410 static void handle_cursor_update(struct drm_plane *plane,
7411                                  struct drm_plane_state *old_plane_state)
7412 {
7413         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7414         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7415         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7416         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7417         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7418         uint64_t address = afb ? afb->address : 0;
7419         struct dc_cursor_position position;
7420         struct dc_cursor_attributes attributes;
7421         int ret;
7422
7423         if (!plane->state->fb && !old_plane_state->fb)
7424                 return;
7425
7426         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7427                          __func__,
7428                          amdgpu_crtc->crtc_id,
7429                          plane->state->crtc_w,
7430                          plane->state->crtc_h);
7431
7432         ret = get_cursor_position(plane, crtc, &position);
7433         if (ret)
7434                 return;
7435
7436         if (!position.enable) {
7437                 /* turn off cursor */
7438                 if (crtc_state && crtc_state->stream) {
7439                         mutex_lock(&adev->dm.dc_lock);
7440                         dc_stream_set_cursor_position(crtc_state->stream,
7441                                                       &position);
7442                         mutex_unlock(&adev->dm.dc_lock);
7443                 }
7444                 return;
7445         }
7446
7447         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7448         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7449
7450         memset(&attributes, 0, sizeof(attributes));
7451         attributes.address.high_part = upper_32_bits(address);
7452         attributes.address.low_part  = lower_32_bits(address);
7453         attributes.width             = plane->state->crtc_w;
7454         attributes.height            = plane->state->crtc_h;
7455         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7456         attributes.rotation_angle    = 0;
7457         attributes.attribute_flags.value = 0;
7458
7459         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7460
7461         if (crtc_state->stream) {
7462                 mutex_lock(&adev->dm.dc_lock);
7463                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7464                                                          &attributes))
7465                         DRM_ERROR("DC failed to set cursor attributes\n");
7466
7467                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7468                                                    &position))
7469                         DRM_ERROR("DC failed to set cursor position\n");
7470                 mutex_unlock(&adev->dm.dc_lock);
7471         }
7472 }
7473
7474 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7475 {
7476
7477         assert_spin_locked(&acrtc->base.dev->event_lock);
7478         WARN_ON(acrtc->event);
7479
7480         acrtc->event = acrtc->base.state->event;
7481
7482         /* Set the flip status */
7483         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7484
7485         /* Mark this event as consumed */
7486         acrtc->base.state->event = NULL;
7487
7488         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7489                                                  acrtc->crtc_id);
7490 }
7491
7492 static void update_freesync_state_on_stream(
7493         struct amdgpu_display_manager *dm,
7494         struct dm_crtc_state *new_crtc_state,
7495         struct dc_stream_state *new_stream,
7496         struct dc_plane_state *surface,
7497         u32 flip_timestamp_in_us)
7498 {
7499         struct mod_vrr_params vrr_params;
7500         struct dc_info_packet vrr_infopacket = {0};
7501         struct amdgpu_device *adev = dm->adev;
7502         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7503         unsigned long flags;
7504
7505         if (!new_stream)
7506                 return;
7507
7508         /*
7509          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7510          * For now it's sufficient to just guard against these conditions.
7511          */
7512
7513         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7514                 return;
7515
7516         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7517         vrr_params = acrtc->dm_irq_params.vrr_params;
7518
7519         if (surface) {
7520                 mod_freesync_handle_preflip(
7521                         dm->freesync_module,
7522                         surface,
7523                         new_stream,
7524                         flip_timestamp_in_us,
7525                         &vrr_params);
7526
7527                 if (adev->family < AMDGPU_FAMILY_AI &&
7528                     amdgpu_dm_vrr_active(new_crtc_state)) {
7529                         mod_freesync_handle_v_update(dm->freesync_module,
7530                                                      new_stream, &vrr_params);
7531
7532                         /* Need to call this before the frame ends. */
7533                         dc_stream_adjust_vmin_vmax(dm->dc,
7534                                                    new_crtc_state->stream,
7535                                                    &vrr_params.adjust);
7536                 }
7537         }
7538
7539         mod_freesync_build_vrr_infopacket(
7540                 dm->freesync_module,
7541                 new_stream,
7542                 &vrr_params,
7543                 PACKET_TYPE_VRR,
7544                 TRANSFER_FUNC_UNKNOWN,
7545                 &vrr_infopacket);
7546
7547         new_crtc_state->freesync_timing_changed |=
7548                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7549                         &vrr_params.adjust,
7550                         sizeof(vrr_params.adjust)) != 0);
7551
7552         new_crtc_state->freesync_vrr_info_changed |=
7553                 (memcmp(&new_crtc_state->vrr_infopacket,
7554                         &vrr_infopacket,
7555                         sizeof(vrr_infopacket)) != 0);
7556
7557         acrtc->dm_irq_params.vrr_params = vrr_params;
7558         new_crtc_state->vrr_infopacket = vrr_infopacket;
7559
7560         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7561         new_stream->vrr_infopacket = vrr_infopacket;
7562
7563         if (new_crtc_state->freesync_vrr_info_changed)
7564                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7565                               new_crtc_state->base.crtc->base.id,
7566                               (int)new_crtc_state->base.vrr_enabled,
7567                               (int)vrr_params.state);
7568
7569         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7570 }
7571
7572 static void update_stream_irq_parameters(
7573         struct amdgpu_display_manager *dm,
7574         struct dm_crtc_state *new_crtc_state)
7575 {
7576         struct dc_stream_state *new_stream = new_crtc_state->stream;
7577         struct mod_vrr_params vrr_params;
7578         struct mod_freesync_config config = new_crtc_state->freesync_config;
7579         struct amdgpu_device *adev = dm->adev;
7580         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7581         unsigned long flags;
7582
7583         if (!new_stream)
7584                 return;
7585
7586         /*
7587          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7588          * For now it's sufficient to just guard against these conditions.
7589          */
7590         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7591                 return;
7592
7593         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7594         vrr_params = acrtc->dm_irq_params.vrr_params;
7595
7596         if (new_crtc_state->vrr_supported &&
7597             config.min_refresh_in_uhz &&
7598             config.max_refresh_in_uhz) {
7599                 config.state = new_crtc_state->base.vrr_enabled ?
7600                         VRR_STATE_ACTIVE_VARIABLE :
7601                         VRR_STATE_INACTIVE;
7602         } else {
7603                 config.state = VRR_STATE_UNSUPPORTED;
7604         }
7605
7606         mod_freesync_build_vrr_params(dm->freesync_module,
7607                                       new_stream,
7608                                       &config, &vrr_params);
7609
7610         new_crtc_state->freesync_timing_changed |=
7611                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7612                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7613
7614         new_crtc_state->freesync_config = config;
7615         /* Copy state for access from DM IRQ handler */
7616         acrtc->dm_irq_params.freesync_config = config;
7617         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7618         acrtc->dm_irq_params.vrr_params = vrr_params;
7619         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7620 }
7621
7622 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7623                                             struct dm_crtc_state *new_state)
7624 {
7625         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7626         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7627
7628         if (!old_vrr_active && new_vrr_active) {
7629                 /* Transition VRR inactive -> active:
7630                  * While VRR is active, we must not disable vblank irq, as a
7631                  * reenable after disable would compute bogus vblank/pflip
7632                  * timestamps if it likely happened inside display front-porch.
7633                  *
7634                  * We also need vupdate irq for the actual core vblank handling
7635                  * at end of vblank.
7636                  */
7637                 dm_set_vupdate_irq(new_state->base.crtc, true);
7638                 drm_crtc_vblank_get(new_state->base.crtc);
7639                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7640                                  __func__, new_state->base.crtc->base.id);
7641         } else if (old_vrr_active && !new_vrr_active) {
7642                 /* Transition VRR active -> inactive:
7643                  * Allow vblank irq disable again for fixed refresh rate.
7644                  */
7645                 dm_set_vupdate_irq(new_state->base.crtc, false);
7646                 drm_crtc_vblank_put(new_state->base.crtc);
7647                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7648                                  __func__, new_state->base.crtc->base.id);
7649         }
7650 }
7651
7652 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7653 {
7654         struct drm_plane *plane;
7655         struct drm_plane_state *old_plane_state, *new_plane_state;
7656         int i;
7657
7658         /*
7659          * TODO: Make this per-stream so we don't issue redundant updates for
7660          * commits with multiple streams.
7661          */
7662         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7663                                        new_plane_state, i)
7664                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7665                         handle_cursor_update(plane, old_plane_state);
7666 }
7667
7668 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7669                                     struct dc_state *dc_state,
7670                                     struct drm_device *dev,
7671                                     struct amdgpu_display_manager *dm,
7672                                     struct drm_crtc *pcrtc,
7673                                     bool wait_for_vblank)
7674 {
7675         uint32_t i;
7676         uint64_t timestamp_ns;
7677         struct drm_plane *plane;
7678         struct drm_plane_state *old_plane_state, *new_plane_state;
7679         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7680         struct drm_crtc_state *new_pcrtc_state =
7681                         drm_atomic_get_new_crtc_state(state, pcrtc);
7682         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7683         struct dm_crtc_state *dm_old_crtc_state =
7684                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7685         int planes_count = 0, vpos, hpos;
7686         long r;
7687         unsigned long flags;
7688         struct amdgpu_bo *abo;
7689         uint32_t target_vblank, last_flip_vblank;
7690         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7691         bool pflip_present = false;
7692         struct {
7693                 struct dc_surface_update surface_updates[MAX_SURFACES];
7694                 struct dc_plane_info plane_infos[MAX_SURFACES];
7695                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7696                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7697                 struct dc_stream_update stream_update;
7698         } *bundle;
7699
7700         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7701
7702         if (!bundle) {
7703                 dm_error("Failed to allocate update bundle\n");
7704                 goto cleanup;
7705         }
7706
7707         /*
7708          * Disable the cursor first if we're disabling all the planes.
7709          * It'll remain on the screen after the planes are re-enabled
7710          * if we don't.
7711          */
7712         if (acrtc_state->active_planes == 0)
7713                 amdgpu_dm_commit_cursors(state);
7714
7715         /* update planes when needed */
7716         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7717                 struct drm_crtc *crtc = new_plane_state->crtc;
7718                 struct drm_crtc_state *new_crtc_state;
7719                 struct drm_framebuffer *fb = new_plane_state->fb;
7720                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7721                 bool plane_needs_flip;
7722                 struct dc_plane_state *dc_plane;
7723                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7724
7725                 /* Cursor plane is handled after stream updates */
7726                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7727                         continue;
7728
7729                 if (!fb || !crtc || pcrtc != crtc)
7730                         continue;
7731
7732                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7733                 if (!new_crtc_state->active)
7734                         continue;
7735
7736                 dc_plane = dm_new_plane_state->dc_state;
7737
7738                 bundle->surface_updates[planes_count].surface = dc_plane;
7739                 if (new_pcrtc_state->color_mgmt_changed) {
7740                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7741                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7742                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7743                 }
7744
7745                 fill_dc_scaling_info(new_plane_state,
7746                                      &bundle->scaling_infos[planes_count]);
7747
7748                 bundle->surface_updates[planes_count].scaling_info =
7749                         &bundle->scaling_infos[planes_count];
7750
7751                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7752
7753                 pflip_present = pflip_present || plane_needs_flip;
7754
7755                 if (!plane_needs_flip) {
7756                         planes_count += 1;
7757                         continue;
7758                 }
7759
7760                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7761
7762                 /*
7763                  * Wait for all fences on this FB. Do limited wait to avoid
7764                  * deadlock during GPU reset when this fence will not signal
7765                  * but we hold reservation lock for the BO.
7766                  */
7767                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7768                                                         false,
7769                                                         msecs_to_jiffies(5000));
7770                 if (unlikely(r <= 0))
7771                         DRM_ERROR("Waiting for fences timed out!");
7772
7773                 fill_dc_plane_info_and_addr(
7774                         dm->adev, new_plane_state,
7775                         afb->tiling_flags,
7776                         &bundle->plane_infos[planes_count],
7777                         &bundle->flip_addrs[planes_count].address,
7778                         afb->tmz_surface, false);
7779
7780                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7781                                  new_plane_state->plane->index,
7782                                  bundle->plane_infos[planes_count].dcc.enable);
7783
7784                 bundle->surface_updates[planes_count].plane_info =
7785                         &bundle->plane_infos[planes_count];
7786
7787                 /*
7788                  * Only allow immediate flips for fast updates that don't
7789                  * change FB pitch, DCC state, rotation or mirroing.
7790                  */
7791                 bundle->flip_addrs[planes_count].flip_immediate =
7792                         crtc->state->async_flip &&
7793                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7794
7795                 timestamp_ns = ktime_get_ns();
7796                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7797                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7798                 bundle->surface_updates[planes_count].surface = dc_plane;
7799
7800                 if (!bundle->surface_updates[planes_count].surface) {
7801                         DRM_ERROR("No surface for CRTC: id=%d\n",
7802                                         acrtc_attach->crtc_id);
7803                         continue;
7804                 }
7805
7806                 if (plane == pcrtc->primary)
7807                         update_freesync_state_on_stream(
7808                                 dm,
7809                                 acrtc_state,
7810                                 acrtc_state->stream,
7811                                 dc_plane,
7812                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7813
7814                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7815                                  __func__,
7816                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7817                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7818
7819                 planes_count += 1;
7820
7821         }
7822
7823         if (pflip_present) {
7824                 if (!vrr_active) {
7825                         /* Use old throttling in non-vrr fixed refresh rate mode
7826                          * to keep flip scheduling based on target vblank counts
7827                          * working in a backwards compatible way, e.g., for
7828                          * clients using the GLX_OML_sync_control extension or
7829                          * DRI3/Present extension with defined target_msc.
7830                          */
7831                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7832                 }
7833                 else {
7834                         /* For variable refresh rate mode only:
7835                          * Get vblank of last completed flip to avoid > 1 vrr
7836                          * flips per video frame by use of throttling, but allow
7837                          * flip programming anywhere in the possibly large
7838                          * variable vrr vblank interval for fine-grained flip
7839                          * timing control and more opportunity to avoid stutter
7840                          * on late submission of flips.
7841                          */
7842                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7843                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7844                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7845                 }
7846
7847                 target_vblank = last_flip_vblank + wait_for_vblank;
7848
7849                 /*
7850                  * Wait until we're out of the vertical blank period before the one
7851                  * targeted by the flip
7852                  */
7853                 while ((acrtc_attach->enabled &&
7854                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7855                                                             0, &vpos, &hpos, NULL,
7856                                                             NULL, &pcrtc->hwmode)
7857                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7858                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7859                         (int)(target_vblank -
7860                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7861                         usleep_range(1000, 1100);
7862                 }
7863
7864                 /**
7865                  * Prepare the flip event for the pageflip interrupt to handle.
7866                  *
7867                  * This only works in the case where we've already turned on the
7868                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7869                  * from 0 -> n planes we have to skip a hardware generated event
7870                  * and rely on sending it from software.
7871                  */
7872                 if (acrtc_attach->base.state->event &&
7873                     acrtc_state->active_planes > 0) {
7874                         drm_crtc_vblank_get(pcrtc);
7875
7876                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7877
7878                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7879                         prepare_flip_isr(acrtc_attach);
7880
7881                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7882                 }
7883
7884                 if (acrtc_state->stream) {
7885                         if (acrtc_state->freesync_vrr_info_changed)
7886                                 bundle->stream_update.vrr_infopacket =
7887                                         &acrtc_state->stream->vrr_infopacket;
7888                 }
7889         }
7890
7891         /* Update the planes if changed or disable if we don't have any. */
7892         if ((planes_count || acrtc_state->active_planes == 0) &&
7893                 acrtc_state->stream) {
7894                 bundle->stream_update.stream = acrtc_state->stream;
7895                 if (new_pcrtc_state->mode_changed) {
7896                         bundle->stream_update.src = acrtc_state->stream->src;
7897                         bundle->stream_update.dst = acrtc_state->stream->dst;
7898                 }
7899
7900                 if (new_pcrtc_state->color_mgmt_changed) {
7901                         /*
7902                          * TODO: This isn't fully correct since we've actually
7903                          * already modified the stream in place.
7904                          */
7905                         bundle->stream_update.gamut_remap =
7906                                 &acrtc_state->stream->gamut_remap_matrix;
7907                         bundle->stream_update.output_csc_transform =
7908                                 &acrtc_state->stream->csc_color_matrix;
7909                         bundle->stream_update.out_transfer_func =
7910                                 acrtc_state->stream->out_transfer_func;
7911                 }
7912
7913                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7914                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7915                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7916
7917                 /*
7918                  * If FreeSync state on the stream has changed then we need to
7919                  * re-adjust the min/max bounds now that DC doesn't handle this
7920                  * as part of commit.
7921                  */
7922                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7923                     amdgpu_dm_vrr_active(acrtc_state)) {
7924                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7925                         dc_stream_adjust_vmin_vmax(
7926                                 dm->dc, acrtc_state->stream,
7927                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7928                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7929                 }
7930                 mutex_lock(&dm->dc_lock);
7931                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7932                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7933                         amdgpu_dm_psr_disable(acrtc_state->stream);
7934
7935                 dc_commit_updates_for_stream(dm->dc,
7936                                                      bundle->surface_updates,
7937                                                      planes_count,
7938                                                      acrtc_state->stream,
7939                                                      &bundle->stream_update,
7940                                                      dc_state);
7941
7942                 /**
7943                  * Enable or disable the interrupts on the backend.
7944                  *
7945                  * Most pipes are put into power gating when unused.
7946                  *
7947                  * When power gating is enabled on a pipe we lose the
7948                  * interrupt enablement state when power gating is disabled.
7949                  *
7950                  * So we need to update the IRQ control state in hardware
7951                  * whenever the pipe turns on (since it could be previously
7952                  * power gated) or off (since some pipes can't be power gated
7953                  * on some ASICs).
7954                  */
7955                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7956                         dm_update_pflip_irq_state(drm_to_adev(dev),
7957                                                   acrtc_attach);
7958
7959                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7960                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7961                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7962                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
7963                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7964                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7965                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7966                         amdgpu_dm_psr_enable(acrtc_state->stream);
7967                 }
7968
7969                 mutex_unlock(&dm->dc_lock);
7970         }
7971
7972         /*
7973          * Update cursor state *after* programming all the planes.
7974          * This avoids redundant programming in the case where we're going
7975          * to be disabling a single plane - those pipes are being disabled.
7976          */
7977         if (acrtc_state->active_planes)
7978                 amdgpu_dm_commit_cursors(state);
7979
7980 cleanup:
7981         kfree(bundle);
7982 }
7983
7984 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7985                                    struct drm_atomic_state *state)
7986 {
7987         struct amdgpu_device *adev = drm_to_adev(dev);
7988         struct amdgpu_dm_connector *aconnector;
7989         struct drm_connector *connector;
7990         struct drm_connector_state *old_con_state, *new_con_state;
7991         struct drm_crtc_state *new_crtc_state;
7992         struct dm_crtc_state *new_dm_crtc_state;
7993         const struct dc_stream_status *status;
7994         int i, inst;
7995
7996         /* Notify device removals. */
7997         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7998                 if (old_con_state->crtc != new_con_state->crtc) {
7999                         /* CRTC changes require notification. */
8000                         goto notify;
8001                 }
8002
8003                 if (!new_con_state->crtc)
8004                         continue;
8005
8006                 new_crtc_state = drm_atomic_get_new_crtc_state(
8007                         state, new_con_state->crtc);
8008
8009                 if (!new_crtc_state)
8010                         continue;
8011
8012                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8013                         continue;
8014
8015         notify:
8016                 aconnector = to_amdgpu_dm_connector(connector);
8017
8018                 mutex_lock(&adev->dm.audio_lock);
8019                 inst = aconnector->audio_inst;
8020                 aconnector->audio_inst = -1;
8021                 mutex_unlock(&adev->dm.audio_lock);
8022
8023                 amdgpu_dm_audio_eld_notify(adev, inst);
8024         }
8025
8026         /* Notify audio device additions. */
8027         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8028                 if (!new_con_state->crtc)
8029                         continue;
8030
8031                 new_crtc_state = drm_atomic_get_new_crtc_state(
8032                         state, new_con_state->crtc);
8033
8034                 if (!new_crtc_state)
8035                         continue;
8036
8037                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8038                         continue;
8039
8040                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8041                 if (!new_dm_crtc_state->stream)
8042                         continue;
8043
8044                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8045                 if (!status)
8046                         continue;
8047
8048                 aconnector = to_amdgpu_dm_connector(connector);
8049
8050                 mutex_lock(&adev->dm.audio_lock);
8051                 inst = status->audio_inst;
8052                 aconnector->audio_inst = inst;
8053                 mutex_unlock(&adev->dm.audio_lock);
8054
8055                 amdgpu_dm_audio_eld_notify(adev, inst);
8056         }
8057 }
8058
8059 /*
8060  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8061  * @crtc_state: the DRM CRTC state
8062  * @stream_state: the DC stream state.
8063  *
8064  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8065  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8066  */
8067 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8068                                                 struct dc_stream_state *stream_state)
8069 {
8070         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8071 }
8072
8073 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
8074                                    struct drm_atomic_state *state,
8075                                    bool nonblock)
8076 {
8077         /*
8078          * Add check here for SoC's that support hardware cursor plane, to
8079          * unset legacy_cursor_update
8080          */
8081
8082         return drm_atomic_helper_commit(dev, state, nonblock);
8083
8084         /*TODO Handle EINTR, reenable IRQ*/
8085 }
8086
8087 /**
8088  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8089  * @state: The atomic state to commit
8090  *
8091  * This will tell DC to commit the constructed DC state from atomic_check,
8092  * programming the hardware. Any failures here implies a hardware failure, since
8093  * atomic check should have filtered anything non-kosher.
8094  */
8095 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8096 {
8097         struct drm_device *dev = state->dev;
8098         struct amdgpu_device *adev = drm_to_adev(dev);
8099         struct amdgpu_display_manager *dm = &adev->dm;
8100         struct dm_atomic_state *dm_state;
8101         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8102         uint32_t i, j;
8103         struct drm_crtc *crtc;
8104         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8105         unsigned long flags;
8106         bool wait_for_vblank = true;
8107         struct drm_connector *connector;
8108         struct drm_connector_state *old_con_state, *new_con_state;
8109         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8110         int crtc_disable_count = 0;
8111         bool mode_set_reset_required = false;
8112
8113         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8114
8115         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8116
8117         dm_state = dm_atomic_get_new_state(state);
8118         if (dm_state && dm_state->context) {
8119                 dc_state = dm_state->context;
8120         } else {
8121                 /* No state changes, retain current state. */
8122                 dc_state_temp = dc_create_state(dm->dc);
8123                 ASSERT(dc_state_temp);
8124                 dc_state = dc_state_temp;
8125                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8126         }
8127
8128         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8129                                        new_crtc_state, i) {
8130                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8131
8132                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8133
8134                 if (old_crtc_state->active &&
8135                     (!new_crtc_state->active ||
8136                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8137                         manage_dm_interrupts(adev, acrtc, false);
8138                         dc_stream_release(dm_old_crtc_state->stream);
8139                 }
8140         }
8141
8142         drm_atomic_helper_calc_timestamping_constants(state);
8143
8144         /* update changed items */
8145         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8146                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8147
8148                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8149                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8150
8151                 DRM_DEBUG_DRIVER(
8152                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8153                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8154                         "connectors_changed:%d\n",
8155                         acrtc->crtc_id,
8156                         new_crtc_state->enable,
8157                         new_crtc_state->active,
8158                         new_crtc_state->planes_changed,
8159                         new_crtc_state->mode_changed,
8160                         new_crtc_state->active_changed,
8161                         new_crtc_state->connectors_changed);
8162
8163                 /* Disable cursor if disabling crtc */
8164                 if (old_crtc_state->active && !new_crtc_state->active) {
8165                         struct dc_cursor_position position;
8166
8167                         memset(&position, 0, sizeof(position));
8168                         mutex_lock(&dm->dc_lock);
8169                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8170                         mutex_unlock(&dm->dc_lock);
8171                 }
8172
8173                 /* Copy all transient state flags into dc state */
8174                 if (dm_new_crtc_state->stream) {
8175                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8176                                                             dm_new_crtc_state->stream);
8177                 }
8178
8179                 /* handles headless hotplug case, updating new_state and
8180                  * aconnector as needed
8181                  */
8182
8183                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8184
8185                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8186
8187                         if (!dm_new_crtc_state->stream) {
8188                                 /*
8189                                  * this could happen because of issues with
8190                                  * userspace notifications delivery.
8191                                  * In this case userspace tries to set mode on
8192                                  * display which is disconnected in fact.
8193                                  * dc_sink is NULL in this case on aconnector.
8194                                  * We expect reset mode will come soon.
8195                                  *
8196                                  * This can also happen when unplug is done
8197                                  * during resume sequence ended
8198                                  *
8199                                  * In this case, we want to pretend we still
8200                                  * have a sink to keep the pipe running so that
8201                                  * hw state is consistent with the sw state
8202                                  */
8203                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8204                                                 __func__, acrtc->base.base.id);
8205                                 continue;
8206                         }
8207
8208                         if (dm_old_crtc_state->stream)
8209                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8210
8211                         pm_runtime_get_noresume(dev->dev);
8212
8213                         acrtc->enabled = true;
8214                         acrtc->hw_mode = new_crtc_state->mode;
8215                         crtc->hwmode = new_crtc_state->mode;
8216                         mode_set_reset_required = true;
8217                 } else if (modereset_required(new_crtc_state)) {
8218                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8219                         /* i.e. reset mode */
8220                         if (dm_old_crtc_state->stream)
8221                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8222                         mode_set_reset_required = true;
8223                 }
8224         } /* for_each_crtc_in_state() */
8225
8226         if (dc_state) {
8227                 /* if there mode set or reset, disable eDP PSR */
8228                 if (mode_set_reset_required)
8229                         amdgpu_dm_psr_disable_all(dm);
8230
8231                 dm_enable_per_frame_crtc_master_sync(dc_state);
8232                 mutex_lock(&dm->dc_lock);
8233                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8234                 mutex_unlock(&dm->dc_lock);
8235         }
8236
8237         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8238                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8239
8240                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8241
8242                 if (dm_new_crtc_state->stream != NULL) {
8243                         const struct dc_stream_status *status =
8244                                         dc_stream_get_status(dm_new_crtc_state->stream);
8245
8246                         if (!status)
8247                                 status = dc_stream_get_status_from_state(dc_state,
8248                                                                          dm_new_crtc_state->stream);
8249                         if (!status)
8250                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8251                         else
8252                                 acrtc->otg_inst = status->primary_otg_inst;
8253                 }
8254         }
8255 #ifdef CONFIG_DRM_AMD_DC_HDCP
8256         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8257                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8258                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8259                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8260
8261                 new_crtc_state = NULL;
8262
8263                 if (acrtc)
8264                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8265
8266                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8267
8268                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8269                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8270                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8271                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8272                         dm_new_con_state->update_hdcp = true;
8273                         continue;
8274                 }
8275
8276                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8277                         hdcp_update_display(
8278                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8279                                 new_con_state->hdcp_content_type,
8280                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8281                                                                                                          : false);
8282         }
8283 #endif
8284
8285         /* Handle connector state changes */
8286         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8287                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8288                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8289                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8290                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8291                 struct dc_stream_update stream_update;
8292                 struct dc_info_packet hdr_packet;
8293                 struct dc_stream_status *status = NULL;
8294                 bool abm_changed, hdr_changed, scaling_changed;
8295
8296                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8297                 memset(&stream_update, 0, sizeof(stream_update));
8298
8299                 if (acrtc) {
8300                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8301                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8302                 }
8303
8304                 /* Skip any modesets/resets */
8305                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8306                         continue;
8307
8308                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8309                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8310
8311                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8312                                                              dm_old_con_state);
8313
8314                 abm_changed = dm_new_crtc_state->abm_level !=
8315                               dm_old_crtc_state->abm_level;
8316
8317                 hdr_changed =
8318                         is_hdr_metadata_different(old_con_state, new_con_state);
8319
8320                 if (!scaling_changed && !abm_changed && !hdr_changed)
8321                         continue;
8322
8323                 stream_update.stream = dm_new_crtc_state->stream;
8324                 if (scaling_changed) {
8325                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8326                                         dm_new_con_state, dm_new_crtc_state->stream);
8327
8328                         stream_update.src = dm_new_crtc_state->stream->src;
8329                         stream_update.dst = dm_new_crtc_state->stream->dst;
8330                 }
8331
8332                 if (abm_changed) {
8333                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8334
8335                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8336                 }
8337
8338                 if (hdr_changed) {
8339                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8340                         stream_update.hdr_static_metadata = &hdr_packet;
8341                 }
8342
8343                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8344                 WARN_ON(!status);
8345                 WARN_ON(!status->plane_count);
8346
8347                 /*
8348                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8349                  * Here we create an empty update on each plane.
8350                  * To fix this, DC should permit updating only stream properties.
8351                  */
8352                 for (j = 0; j < status->plane_count; j++)
8353                         dummy_updates[j].surface = status->plane_states[0];
8354
8355
8356                 mutex_lock(&dm->dc_lock);
8357                 dc_commit_updates_for_stream(dm->dc,
8358                                                      dummy_updates,
8359                                                      status->plane_count,
8360                                                      dm_new_crtc_state->stream,
8361                                                      &stream_update,
8362                                                      dc_state);
8363                 mutex_unlock(&dm->dc_lock);
8364         }
8365
8366         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8367         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8368                                       new_crtc_state, i) {
8369                 if (old_crtc_state->active && !new_crtc_state->active)
8370                         crtc_disable_count++;
8371
8372                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8373                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8374
8375                 /* For freesync config update on crtc state and params for irq */
8376                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8377
8378                 /* Handle vrr on->off / off->on transitions */
8379                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8380                                                 dm_new_crtc_state);
8381         }
8382
8383         /**
8384          * Enable interrupts for CRTCs that are newly enabled or went through
8385          * a modeset. It was intentionally deferred until after the front end
8386          * state was modified to wait until the OTG was on and so the IRQ
8387          * handlers didn't access stale or invalid state.
8388          */
8389         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8390                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8391                 bool configure_crc = false;
8392
8393                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8394
8395                 if (new_crtc_state->active &&
8396                     (!old_crtc_state->active ||
8397                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8398                         dc_stream_retain(dm_new_crtc_state->stream);
8399                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8400                         manage_dm_interrupts(adev, acrtc, true);
8401                 }
8402 #ifdef CONFIG_DEBUG_FS
8403                 if (new_crtc_state->active &&
8404                         amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8405                         /**
8406                          * Frontend may have changed so reapply the CRC capture
8407                          * settings for the stream.
8408                          */
8409                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8410                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8411
8412                         if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8413                                 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8414                                         configure_crc = true;
8415                         } else {
8416                                 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8417                                         configure_crc = true;
8418                         }
8419
8420                         if (configure_crc)
8421                                 amdgpu_dm_crtc_configure_crc_source(
8422                                         crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8423                 }
8424 #endif
8425         }
8426
8427         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8428                 if (new_crtc_state->async_flip)
8429                         wait_for_vblank = false;
8430
8431         /* update planes when needed per crtc*/
8432         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8433                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8434
8435                 if (dm_new_crtc_state->stream)
8436                         amdgpu_dm_commit_planes(state, dc_state, dev,
8437                                                 dm, crtc, wait_for_vblank);
8438         }
8439
8440         /* Update audio instances for each connector. */
8441         amdgpu_dm_commit_audio(dev, state);
8442
8443         /*
8444          * send vblank event on all events not handled in flip and
8445          * mark consumed event for drm_atomic_helper_commit_hw_done
8446          */
8447         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8448         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8449
8450                 if (new_crtc_state->event)
8451                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8452
8453                 new_crtc_state->event = NULL;
8454         }
8455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8456
8457         /* Signal HW programming completion */
8458         drm_atomic_helper_commit_hw_done(state);
8459
8460         if (wait_for_vblank)
8461                 drm_atomic_helper_wait_for_flip_done(dev, state);
8462
8463         drm_atomic_helper_cleanup_planes(dev, state);
8464
8465         /* return the stolen vga memory back to VRAM */
8466         if (!adev->mman.keep_stolen_vga_memory)
8467                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8468         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8469
8470         /*
8471          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8472          * so we can put the GPU into runtime suspend if we're not driving any
8473          * displays anymore
8474          */
8475         for (i = 0; i < crtc_disable_count; i++)
8476                 pm_runtime_put_autosuspend(dev->dev);
8477         pm_runtime_mark_last_busy(dev->dev);
8478
8479         if (dc_state_temp)
8480                 dc_release_state(dc_state_temp);
8481 }
8482
8483
8484 static int dm_force_atomic_commit(struct drm_connector *connector)
8485 {
8486         int ret = 0;
8487         struct drm_device *ddev = connector->dev;
8488         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8489         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8490         struct drm_plane *plane = disconnected_acrtc->base.primary;
8491         struct drm_connector_state *conn_state;
8492         struct drm_crtc_state *crtc_state;
8493         struct drm_plane_state *plane_state;
8494
8495         if (!state)
8496                 return -ENOMEM;
8497
8498         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8499
8500         /* Construct an atomic state to restore previous display setting */
8501
8502         /*
8503          * Attach connectors to drm_atomic_state
8504          */
8505         conn_state = drm_atomic_get_connector_state(state, connector);
8506
8507         ret = PTR_ERR_OR_ZERO(conn_state);
8508         if (ret)
8509                 goto err;
8510
8511         /* Attach crtc to drm_atomic_state*/
8512         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8513
8514         ret = PTR_ERR_OR_ZERO(crtc_state);
8515         if (ret)
8516                 goto err;
8517
8518         /* force a restore */
8519         crtc_state->mode_changed = true;
8520
8521         /* Attach plane to drm_atomic_state */
8522         plane_state = drm_atomic_get_plane_state(state, plane);
8523
8524         ret = PTR_ERR_OR_ZERO(plane_state);
8525         if (ret)
8526                 goto err;
8527
8528
8529         /* Call commit internally with the state we just constructed */
8530         ret = drm_atomic_commit(state);
8531         if (!ret)
8532                 return 0;
8533
8534 err:
8535         DRM_ERROR("Restoring old state failed with %i\n", ret);
8536         drm_atomic_state_put(state);
8537
8538         return ret;
8539 }
8540
8541 /*
8542  * This function handles all cases when set mode does not come upon hotplug.
8543  * This includes when a display is unplugged then plugged back into the
8544  * same port and when running without usermode desktop manager supprot
8545  */
8546 void dm_restore_drm_connector_state(struct drm_device *dev,
8547                                     struct drm_connector *connector)
8548 {
8549         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8550         struct amdgpu_crtc *disconnected_acrtc;
8551         struct dm_crtc_state *acrtc_state;
8552
8553         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8554                 return;
8555
8556         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8557         if (!disconnected_acrtc)
8558                 return;
8559
8560         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8561         if (!acrtc_state->stream)
8562                 return;
8563
8564         /*
8565          * If the previous sink is not released and different from the current,
8566          * we deduce we are in a state where we can not rely on usermode call
8567          * to turn on the display, so we do it here
8568          */
8569         if (acrtc_state->stream->sink != aconnector->dc_sink)
8570                 dm_force_atomic_commit(&aconnector->base);
8571 }
8572
8573 /*
8574  * Grabs all modesetting locks to serialize against any blocking commits,
8575  * Waits for completion of all non blocking commits.
8576  */
8577 static int do_aquire_global_lock(struct drm_device *dev,
8578                                  struct drm_atomic_state *state)
8579 {
8580         struct drm_crtc *crtc;
8581         struct drm_crtc_commit *commit;
8582         long ret;
8583
8584         /*
8585          * Adding all modeset locks to aquire_ctx will
8586          * ensure that when the framework release it the
8587          * extra locks we are locking here will get released to
8588          */
8589         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8590         if (ret)
8591                 return ret;
8592
8593         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8594                 spin_lock(&crtc->commit_lock);
8595                 commit = list_first_entry_or_null(&crtc->commit_list,
8596                                 struct drm_crtc_commit, commit_entry);
8597                 if (commit)
8598                         drm_crtc_commit_get(commit);
8599                 spin_unlock(&crtc->commit_lock);
8600
8601                 if (!commit)
8602                         continue;
8603
8604                 /*
8605                  * Make sure all pending HW programming completed and
8606                  * page flips done
8607                  */
8608                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8609
8610                 if (ret > 0)
8611                         ret = wait_for_completion_interruptible_timeout(
8612                                         &commit->flip_done, 10*HZ);
8613
8614                 if (ret == 0)
8615                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8616                                   "timed out\n", crtc->base.id, crtc->name);
8617
8618                 drm_crtc_commit_put(commit);
8619         }
8620
8621         return ret < 0 ? ret : 0;
8622 }
8623
8624 static void get_freesync_config_for_crtc(
8625         struct dm_crtc_state *new_crtc_state,
8626         struct dm_connector_state *new_con_state)
8627 {
8628         struct mod_freesync_config config = {0};
8629         struct amdgpu_dm_connector *aconnector =
8630                         to_amdgpu_dm_connector(new_con_state->base.connector);
8631         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8632         int vrefresh = drm_mode_vrefresh(mode);
8633
8634         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8635                                         vrefresh >= aconnector->min_vfreq &&
8636                                         vrefresh <= aconnector->max_vfreq;
8637
8638         if (new_crtc_state->vrr_supported) {
8639                 new_crtc_state->stream->ignore_msa_timing_param = true;
8640                 config.state = new_crtc_state->base.vrr_enabled ?
8641                                 VRR_STATE_ACTIVE_VARIABLE :
8642                                 VRR_STATE_INACTIVE;
8643                 config.min_refresh_in_uhz =
8644                                 aconnector->min_vfreq * 1000000;
8645                 config.max_refresh_in_uhz =
8646                                 aconnector->max_vfreq * 1000000;
8647                 config.vsif_supported = true;
8648                 config.btr = true;
8649         }
8650
8651         new_crtc_state->freesync_config = config;
8652 }
8653
8654 static void reset_freesync_config_for_crtc(
8655         struct dm_crtc_state *new_crtc_state)
8656 {
8657         new_crtc_state->vrr_supported = false;
8658
8659         memset(&new_crtc_state->vrr_infopacket, 0,
8660                sizeof(new_crtc_state->vrr_infopacket));
8661 }
8662
8663 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8664                                 struct drm_atomic_state *state,
8665                                 struct drm_crtc *crtc,
8666                                 struct drm_crtc_state *old_crtc_state,
8667                                 struct drm_crtc_state *new_crtc_state,
8668                                 bool enable,
8669                                 bool *lock_and_validation_needed)
8670 {
8671         struct dm_atomic_state *dm_state = NULL;
8672         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8673         struct dc_stream_state *new_stream;
8674         int ret = 0;
8675
8676         /*
8677          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8678          * update changed items
8679          */
8680         struct amdgpu_crtc *acrtc = NULL;
8681         struct amdgpu_dm_connector *aconnector = NULL;
8682         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8683         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8684
8685         new_stream = NULL;
8686
8687         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8688         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8689         acrtc = to_amdgpu_crtc(crtc);
8690         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8691
8692         /* TODO This hack should go away */
8693         if (aconnector && enable) {
8694                 /* Make sure fake sink is created in plug-in scenario */
8695                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8696                                                             &aconnector->base);
8697                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8698                                                             &aconnector->base);
8699
8700                 if (IS_ERR(drm_new_conn_state)) {
8701                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8702                         goto fail;
8703                 }
8704
8705                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8706                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8707
8708                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8709                         goto skip_modeset;
8710
8711                 new_stream = create_validate_stream_for_sink(aconnector,
8712                                                              &new_crtc_state->mode,
8713                                                              dm_new_conn_state,
8714                                                              dm_old_crtc_state->stream);
8715
8716                 /*
8717                  * we can have no stream on ACTION_SET if a display
8718                  * was disconnected during S3, in this case it is not an
8719                  * error, the OS will be updated after detection, and
8720                  * will do the right thing on next atomic commit
8721                  */
8722
8723                 if (!new_stream) {
8724                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8725                                         __func__, acrtc->base.base.id);
8726                         ret = -ENOMEM;
8727                         goto fail;
8728                 }
8729
8730                 /*
8731                  * TODO: Check VSDB bits to decide whether this should
8732                  * be enabled or not.
8733                  */
8734                 new_stream->triggered_crtc_reset.enabled =
8735                         dm->force_timing_sync;
8736
8737                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8738
8739                 ret = fill_hdr_info_packet(drm_new_conn_state,
8740                                            &new_stream->hdr_static_metadata);
8741                 if (ret)
8742                         goto fail;
8743
8744                 /*
8745                  * If we already removed the old stream from the context
8746                  * (and set the new stream to NULL) then we can't reuse
8747                  * the old stream even if the stream and scaling are unchanged.
8748                  * We'll hit the BUG_ON and black screen.
8749                  *
8750                  * TODO: Refactor this function to allow this check to work
8751                  * in all conditions.
8752                  */
8753                 if (dm_new_crtc_state->stream &&
8754                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8755                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8756                         new_crtc_state->mode_changed = false;
8757                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8758                                          new_crtc_state->mode_changed);
8759                 }
8760         }
8761
8762         /* mode_changed flag may get updated above, need to check again */
8763         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8764                 goto skip_modeset;
8765
8766         DRM_DEBUG_DRIVER(
8767                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8768                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8769                 "connectors_changed:%d\n",
8770                 acrtc->crtc_id,
8771                 new_crtc_state->enable,
8772                 new_crtc_state->active,
8773                 new_crtc_state->planes_changed,
8774                 new_crtc_state->mode_changed,
8775                 new_crtc_state->active_changed,
8776                 new_crtc_state->connectors_changed);
8777
8778         /* Remove stream for any changed/disabled CRTC */
8779         if (!enable) {
8780
8781                 if (!dm_old_crtc_state->stream)
8782                         goto skip_modeset;
8783
8784                 ret = dm_atomic_get_state(state, &dm_state);
8785                 if (ret)
8786                         goto fail;
8787
8788                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8789                                 crtc->base.id);
8790
8791                 /* i.e. reset mode */
8792                 if (dc_remove_stream_from_ctx(
8793                                 dm->dc,
8794                                 dm_state->context,
8795                                 dm_old_crtc_state->stream) != DC_OK) {
8796                         ret = -EINVAL;
8797                         goto fail;
8798                 }
8799
8800                 dc_stream_release(dm_old_crtc_state->stream);
8801                 dm_new_crtc_state->stream = NULL;
8802
8803                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8804
8805                 *lock_and_validation_needed = true;
8806
8807         } else {/* Add stream for any updated/enabled CRTC */
8808                 /*
8809                  * Quick fix to prevent NULL pointer on new_stream when
8810                  * added MST connectors not found in existing crtc_state in the chained mode
8811                  * TODO: need to dig out the root cause of that
8812                  */
8813                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8814                         goto skip_modeset;
8815
8816                 if (modereset_required(new_crtc_state))
8817                         goto skip_modeset;
8818
8819                 if (modeset_required(new_crtc_state, new_stream,
8820                                      dm_old_crtc_state->stream)) {
8821
8822                         WARN_ON(dm_new_crtc_state->stream);
8823
8824                         ret = dm_atomic_get_state(state, &dm_state);
8825                         if (ret)
8826                                 goto fail;
8827
8828                         dm_new_crtc_state->stream = new_stream;
8829
8830                         dc_stream_retain(new_stream);
8831
8832                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8833                                                 crtc->base.id);
8834
8835                         if (dc_add_stream_to_ctx(
8836                                         dm->dc,
8837                                         dm_state->context,
8838                                         dm_new_crtc_state->stream) != DC_OK) {
8839                                 ret = -EINVAL;
8840                                 goto fail;
8841                         }
8842
8843                         *lock_and_validation_needed = true;
8844                 }
8845         }
8846
8847 skip_modeset:
8848         /* Release extra reference */
8849         if (new_stream)
8850                  dc_stream_release(new_stream);
8851
8852         /*
8853          * We want to do dc stream updates that do not require a
8854          * full modeset below.
8855          */
8856         if (!(enable && aconnector && new_crtc_state->active))
8857                 return 0;
8858         /*
8859          * Given above conditions, the dc state cannot be NULL because:
8860          * 1. We're in the process of enabling CRTCs (just been added
8861          *    to the dc context, or already is on the context)
8862          * 2. Has a valid connector attached, and
8863          * 3. Is currently active and enabled.
8864          * => The dc stream state currently exists.
8865          */
8866         BUG_ON(dm_new_crtc_state->stream == NULL);
8867
8868         /* Scaling or underscan settings */
8869         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8870                 update_stream_scaling_settings(
8871                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8872
8873         /* ABM settings */
8874         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8875
8876         /*
8877          * Color management settings. We also update color properties
8878          * when a modeset is needed, to ensure it gets reprogrammed.
8879          */
8880         if (dm_new_crtc_state->base.color_mgmt_changed ||
8881             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8882                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8883                 if (ret)
8884                         goto fail;
8885         }
8886
8887         /* Update Freesync settings. */
8888         get_freesync_config_for_crtc(dm_new_crtc_state,
8889                                      dm_new_conn_state);
8890
8891         return ret;
8892
8893 fail:
8894         if (new_stream)
8895                 dc_stream_release(new_stream);
8896         return ret;
8897 }
8898
8899 static bool should_reset_plane(struct drm_atomic_state *state,
8900                                struct drm_plane *plane,
8901                                struct drm_plane_state *old_plane_state,
8902                                struct drm_plane_state *new_plane_state)
8903 {
8904         struct drm_plane *other;
8905         struct drm_plane_state *old_other_state, *new_other_state;
8906         struct drm_crtc_state *new_crtc_state;
8907         int i;
8908
8909         /*
8910          * TODO: Remove this hack once the checks below are sufficient
8911          * enough to determine when we need to reset all the planes on
8912          * the stream.
8913          */
8914         if (state->allow_modeset)
8915                 return true;
8916
8917         /* Exit early if we know that we're adding or removing the plane. */
8918         if (old_plane_state->crtc != new_plane_state->crtc)
8919                 return true;
8920
8921         /* old crtc == new_crtc == NULL, plane not in context. */
8922         if (!new_plane_state->crtc)
8923                 return false;
8924
8925         new_crtc_state =
8926                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8927
8928         if (!new_crtc_state)
8929                 return true;
8930
8931         /* CRTC Degamma changes currently require us to recreate planes. */
8932         if (new_crtc_state->color_mgmt_changed)
8933                 return true;
8934
8935         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8936                 return true;
8937
8938         /*
8939          * If there are any new primary or overlay planes being added or
8940          * removed then the z-order can potentially change. To ensure
8941          * correct z-order and pipe acquisition the current DC architecture
8942          * requires us to remove and recreate all existing planes.
8943          *
8944          * TODO: Come up with a more elegant solution for this.
8945          */
8946         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8947                 struct amdgpu_framebuffer *old_afb, *new_afb;
8948                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8949                         continue;
8950
8951                 if (old_other_state->crtc != new_plane_state->crtc &&
8952                     new_other_state->crtc != new_plane_state->crtc)
8953                         continue;
8954
8955                 if (old_other_state->crtc != new_other_state->crtc)
8956                         return true;
8957
8958                 /* Src/dst size and scaling updates. */
8959                 if (old_other_state->src_w != new_other_state->src_w ||
8960                     old_other_state->src_h != new_other_state->src_h ||
8961                     old_other_state->crtc_w != new_other_state->crtc_w ||
8962                     old_other_state->crtc_h != new_other_state->crtc_h)
8963                         return true;
8964
8965                 /* Rotation / mirroring updates. */
8966                 if (old_other_state->rotation != new_other_state->rotation)
8967                         return true;
8968
8969                 /* Blending updates. */
8970                 if (old_other_state->pixel_blend_mode !=
8971                     new_other_state->pixel_blend_mode)
8972                         return true;
8973
8974                 /* Alpha updates. */
8975                 if (old_other_state->alpha != new_other_state->alpha)
8976                         return true;
8977
8978                 /* Colorspace changes. */
8979                 if (old_other_state->color_range != new_other_state->color_range ||
8980                     old_other_state->color_encoding != new_other_state->color_encoding)
8981                         return true;
8982
8983                 /* Framebuffer checks fall at the end. */
8984                 if (!old_other_state->fb || !new_other_state->fb)
8985                         continue;
8986
8987                 /* Pixel format changes can require bandwidth updates. */
8988                 if (old_other_state->fb->format != new_other_state->fb->format)
8989                         return true;
8990
8991                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8992                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8993
8994                 /* Tiling and DCC changes also require bandwidth updates. */
8995                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8996                     old_afb->base.modifier != new_afb->base.modifier)
8997                         return true;
8998         }
8999
9000         return false;
9001 }
9002
9003 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9004                               struct drm_plane_state *new_plane_state,
9005                               struct drm_framebuffer *fb)
9006 {
9007         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9008         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9009         unsigned int pitch;
9010         bool linear;
9011
9012         if (fb->width > new_acrtc->max_cursor_width ||
9013             fb->height > new_acrtc->max_cursor_height) {
9014                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9015                                  new_plane_state->fb->width,
9016                                  new_plane_state->fb->height);
9017                 return -EINVAL;
9018         }
9019         if (new_plane_state->src_w != fb->width << 16 ||
9020             new_plane_state->src_h != fb->height << 16) {
9021                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9022                 return -EINVAL;
9023         }
9024
9025         /* Pitch in pixels */
9026         pitch = fb->pitches[0] / fb->format->cpp[0];
9027
9028         if (fb->width != pitch) {
9029                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9030                                  fb->width, pitch);
9031                 return -EINVAL;
9032         }
9033
9034         switch (pitch) {
9035         case 64:
9036         case 128:
9037         case 256:
9038                 /* FB pitch is supported by cursor plane */
9039                 break;
9040         default:
9041                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9042                 return -EINVAL;
9043         }
9044
9045         /* Core DRM takes care of checking FB modifiers, so we only need to
9046          * check tiling flags when the FB doesn't have a modifier. */
9047         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9048                 if (adev->family < AMDGPU_FAMILY_AI) {
9049                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9050                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9051                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9052                 } else {
9053                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9054                 }
9055                 if (!linear) {
9056                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9057                         return -EINVAL;
9058                 }
9059         }
9060
9061         return 0;
9062 }
9063
9064 static int dm_update_plane_state(struct dc *dc,
9065                                  struct drm_atomic_state *state,
9066                                  struct drm_plane *plane,
9067                                  struct drm_plane_state *old_plane_state,
9068                                  struct drm_plane_state *new_plane_state,
9069                                  bool enable,
9070                                  bool *lock_and_validation_needed)
9071 {
9072
9073         struct dm_atomic_state *dm_state = NULL;
9074         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9075         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9076         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9077         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9078         struct amdgpu_crtc *new_acrtc;
9079         bool needs_reset;
9080         int ret = 0;
9081
9082
9083         new_plane_crtc = new_plane_state->crtc;
9084         old_plane_crtc = old_plane_state->crtc;
9085         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9086         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9087
9088         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9089                 if (!enable || !new_plane_crtc ||
9090                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9091                         return 0;
9092
9093                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9094
9095                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9096                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9097                         return -EINVAL;
9098                 }
9099
9100                 if (new_plane_state->fb) {
9101                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9102                                                  new_plane_state->fb);
9103                         if (ret)
9104                                 return ret;
9105                 }
9106
9107                 return 0;
9108         }
9109
9110         needs_reset = should_reset_plane(state, plane, old_plane_state,
9111                                          new_plane_state);
9112
9113         /* Remove any changed/removed planes */
9114         if (!enable) {
9115                 if (!needs_reset)
9116                         return 0;
9117
9118                 if (!old_plane_crtc)
9119                         return 0;
9120
9121                 old_crtc_state = drm_atomic_get_old_crtc_state(
9122                                 state, old_plane_crtc);
9123                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9124
9125                 if (!dm_old_crtc_state->stream)
9126                         return 0;
9127
9128                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9129                                 plane->base.id, old_plane_crtc->base.id);
9130
9131                 ret = dm_atomic_get_state(state, &dm_state);
9132                 if (ret)
9133                         return ret;
9134
9135                 if (!dc_remove_plane_from_context(
9136                                 dc,
9137                                 dm_old_crtc_state->stream,
9138                                 dm_old_plane_state->dc_state,
9139                                 dm_state->context)) {
9140
9141                         return -EINVAL;
9142                 }
9143
9144
9145                 dc_plane_state_release(dm_old_plane_state->dc_state);
9146                 dm_new_plane_state->dc_state = NULL;
9147
9148                 *lock_and_validation_needed = true;
9149
9150         } else { /* Add new planes */
9151                 struct dc_plane_state *dc_new_plane_state;
9152
9153                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9154                         return 0;
9155
9156                 if (!new_plane_crtc)
9157                         return 0;
9158
9159                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9160                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9161
9162                 if (!dm_new_crtc_state->stream)
9163                         return 0;
9164
9165                 if (!needs_reset)
9166                         return 0;
9167
9168                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9169                 if (ret)
9170                         return ret;
9171
9172                 WARN_ON(dm_new_plane_state->dc_state);
9173
9174                 dc_new_plane_state = dc_create_plane_state(dc);
9175                 if (!dc_new_plane_state)
9176                         return -ENOMEM;
9177
9178                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9179                                 plane->base.id, new_plane_crtc->base.id);
9180
9181                 ret = fill_dc_plane_attributes(
9182                         drm_to_adev(new_plane_crtc->dev),
9183                         dc_new_plane_state,
9184                         new_plane_state,
9185                         new_crtc_state);
9186                 if (ret) {
9187                         dc_plane_state_release(dc_new_plane_state);
9188                         return ret;
9189                 }
9190
9191                 ret = dm_atomic_get_state(state, &dm_state);
9192                 if (ret) {
9193                         dc_plane_state_release(dc_new_plane_state);
9194                         return ret;
9195                 }
9196
9197                 /*
9198                  * Any atomic check errors that occur after this will
9199                  * not need a release. The plane state will be attached
9200                  * to the stream, and therefore part of the atomic
9201                  * state. It'll be released when the atomic state is
9202                  * cleaned.
9203                  */
9204                 if (!dc_add_plane_to_context(
9205                                 dc,
9206                                 dm_new_crtc_state->stream,
9207                                 dc_new_plane_state,
9208                                 dm_state->context)) {
9209
9210                         dc_plane_state_release(dc_new_plane_state);
9211                         return -EINVAL;
9212                 }
9213
9214                 dm_new_plane_state->dc_state = dc_new_plane_state;
9215
9216                 /* Tell DC to do a full surface update every time there
9217                  * is a plane change. Inefficient, but works for now.
9218                  */
9219                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9220
9221                 *lock_and_validation_needed = true;
9222         }
9223
9224
9225         return ret;
9226 }
9227
9228 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9229                                 struct drm_crtc *crtc,
9230                                 struct drm_crtc_state *new_crtc_state)
9231 {
9232         struct drm_plane_state *new_cursor_state, *new_primary_state;
9233         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9234
9235         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9236          * cursor per pipe but it's going to inherit the scaling and
9237          * positioning from the underlying pipe. Check the cursor plane's
9238          * blending properties match the primary plane's. */
9239
9240         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9241         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9242         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9243                 return 0;
9244         }
9245
9246         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9247                          (new_cursor_state->src_w >> 16);
9248         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9249                          (new_cursor_state->src_h >> 16);
9250
9251         primary_scale_w = new_primary_state->crtc_w * 1000 /
9252                          (new_primary_state->src_w >> 16);
9253         primary_scale_h = new_primary_state->crtc_h * 1000 /
9254                          (new_primary_state->src_h >> 16);
9255
9256         if (cursor_scale_w != primary_scale_w ||
9257             cursor_scale_h != primary_scale_h) {
9258                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9259                 return -EINVAL;
9260         }
9261
9262         return 0;
9263 }
9264
9265 #if defined(CONFIG_DRM_AMD_DC_DCN)
9266 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9267 {
9268         struct drm_connector *connector;
9269         struct drm_connector_state *conn_state;
9270         struct amdgpu_dm_connector *aconnector = NULL;
9271         int i;
9272         for_each_new_connector_in_state(state, connector, conn_state, i) {
9273                 if (conn_state->crtc != crtc)
9274                         continue;
9275
9276                 aconnector = to_amdgpu_dm_connector(connector);
9277                 if (!aconnector->port || !aconnector->mst_port)
9278                         aconnector = NULL;
9279                 else
9280                         break;
9281         }
9282
9283         if (!aconnector)
9284                 return 0;
9285
9286         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9287 }
9288 #endif
9289
9290 /**
9291  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9292  * @dev: The DRM device
9293  * @state: The atomic state to commit
9294  *
9295  * Validate that the given atomic state is programmable by DC into hardware.
9296  * This involves constructing a &struct dc_state reflecting the new hardware
9297  * state we wish to commit, then querying DC to see if it is programmable. It's
9298  * important not to modify the existing DC state. Otherwise, atomic_check
9299  * may unexpectedly commit hardware changes.
9300  *
9301  * When validating the DC state, it's important that the right locks are
9302  * acquired. For full updates case which removes/adds/updates streams on one
9303  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9304  * that any such full update commit will wait for completion of any outstanding
9305  * flip using DRMs synchronization events.
9306  *
9307  * Note that DM adds the affected connectors for all CRTCs in state, when that
9308  * might not seem necessary. This is because DC stream creation requires the
9309  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9310  * be possible but non-trivial - a possible TODO item.
9311  *
9312  * Return: -Error code if validation failed.
9313  */
9314 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9315                                   struct drm_atomic_state *state)
9316 {
9317         struct amdgpu_device *adev = drm_to_adev(dev);
9318         struct dm_atomic_state *dm_state = NULL;
9319         struct dc *dc = adev->dm.dc;
9320         struct drm_connector *connector;
9321         struct drm_connector_state *old_con_state, *new_con_state;
9322         struct drm_crtc *crtc;
9323         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9324         struct drm_plane *plane;
9325         struct drm_plane_state *old_plane_state, *new_plane_state;
9326         enum dc_status status;
9327         int ret, i;
9328         bool lock_and_validation_needed = false;
9329         struct dm_crtc_state *dm_old_crtc_state;
9330
9331         trace_amdgpu_dm_atomic_check_begin(state);
9332
9333         ret = drm_atomic_helper_check_modeset(dev, state);
9334         if (ret)
9335                 goto fail;
9336
9337         /* Check connector changes */
9338         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9339                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9340                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9341
9342                 /* Skip connectors that are disabled or part of modeset already. */
9343                 if (!old_con_state->crtc && !new_con_state->crtc)
9344                         continue;
9345
9346                 if (!new_con_state->crtc)
9347                         continue;
9348
9349                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9350                 if (IS_ERR(new_crtc_state)) {
9351                         ret = PTR_ERR(new_crtc_state);
9352                         goto fail;
9353                 }
9354
9355                 if (dm_old_con_state->abm_level !=
9356                     dm_new_con_state->abm_level)
9357                         new_crtc_state->connectors_changed = true;
9358         }
9359
9360 #if defined(CONFIG_DRM_AMD_DC_DCN)
9361         if (adev->asic_type >= CHIP_NAVI10) {
9362                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9363                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9364                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9365                                 if (ret)
9366                                         goto fail;
9367                         }
9368                 }
9369         }
9370 #endif
9371         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9372                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9373
9374                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9375                     !new_crtc_state->color_mgmt_changed &&
9376                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9377                         dm_old_crtc_state->dsc_force_changed == false)
9378                         continue;
9379
9380                 if (!new_crtc_state->enable)
9381                         continue;
9382
9383                 ret = drm_atomic_add_affected_connectors(state, crtc);
9384                 if (ret)
9385                         return ret;
9386
9387                 ret = drm_atomic_add_affected_planes(state, crtc);
9388                 if (ret)
9389                         goto fail;
9390
9391                 if (dm_old_crtc_state->dsc_force_changed && new_crtc_state)
9392                         new_crtc_state->mode_changed = true;
9393         }
9394
9395         /*
9396          * Add all primary and overlay planes on the CRTC to the state
9397          * whenever a plane is enabled to maintain correct z-ordering
9398          * and to enable fast surface updates.
9399          */
9400         drm_for_each_crtc(crtc, dev) {
9401                 bool modified = false;
9402
9403                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9404                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9405                                 continue;
9406
9407                         if (new_plane_state->crtc == crtc ||
9408                             old_plane_state->crtc == crtc) {
9409                                 modified = true;
9410                                 break;
9411                         }
9412                 }
9413
9414                 if (!modified)
9415                         continue;
9416
9417                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9418                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9419                                 continue;
9420
9421                         new_plane_state =
9422                                 drm_atomic_get_plane_state(state, plane);
9423
9424                         if (IS_ERR(new_plane_state)) {
9425                                 ret = PTR_ERR(new_plane_state);
9426                                 goto fail;
9427                         }
9428                 }
9429         }
9430
9431         /* Remove exiting planes if they are modified */
9432         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9433                 ret = dm_update_plane_state(dc, state, plane,
9434                                             old_plane_state,
9435                                             new_plane_state,
9436                                             false,
9437                                             &lock_and_validation_needed);
9438                 if (ret)
9439                         goto fail;
9440         }
9441
9442         /* Disable all crtcs which require disable */
9443         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9444                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9445                                            old_crtc_state,
9446                                            new_crtc_state,
9447                                            false,
9448                                            &lock_and_validation_needed);
9449                 if (ret)
9450                         goto fail;
9451         }
9452
9453         /* Enable all crtcs which require enable */
9454         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9455                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9456                                            old_crtc_state,
9457                                            new_crtc_state,
9458                                            true,
9459                                            &lock_and_validation_needed);
9460                 if (ret)
9461                         goto fail;
9462         }
9463
9464         /* Add new/modified planes */
9465         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9466                 ret = dm_update_plane_state(dc, state, plane,
9467                                             old_plane_state,
9468                                             new_plane_state,
9469                                             true,
9470                                             &lock_and_validation_needed);
9471                 if (ret)
9472                         goto fail;
9473         }
9474
9475         /* Run this here since we want to validate the streams we created */
9476         ret = drm_atomic_helper_check_planes(dev, state);
9477         if (ret)
9478                 goto fail;
9479
9480         /* Check cursor planes scaling */
9481         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9482                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9483                 if (ret)
9484                         goto fail;
9485         }
9486
9487         if (state->legacy_cursor_update) {
9488                 /*
9489                  * This is a fast cursor update coming from the plane update
9490                  * helper, check if it can be done asynchronously for better
9491                  * performance.
9492                  */
9493                 state->async_update =
9494                         !drm_atomic_helper_async_check(dev, state);
9495
9496                 /*
9497                  * Skip the remaining global validation if this is an async
9498                  * update. Cursor updates can be done without affecting
9499                  * state or bandwidth calcs and this avoids the performance
9500                  * penalty of locking the private state object and
9501                  * allocating a new dc_state.
9502                  */
9503                 if (state->async_update)
9504                         return 0;
9505         }
9506
9507         /* Check scaling and underscan changes*/
9508         /* TODO Removed scaling changes validation due to inability to commit
9509          * new stream into context w\o causing full reset. Need to
9510          * decide how to handle.
9511          */
9512         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9513                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9514                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9515                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9516
9517                 /* Skip any modesets/resets */
9518                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9519                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9520                         continue;
9521
9522                 /* Skip any thing not scale or underscan changes */
9523                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9524                         continue;
9525
9526                 lock_and_validation_needed = true;
9527         }
9528
9529         /**
9530          * Streams and planes are reset when there are changes that affect
9531          * bandwidth. Anything that affects bandwidth needs to go through
9532          * DC global validation to ensure that the configuration can be applied
9533          * to hardware.
9534          *
9535          * We have to currently stall out here in atomic_check for outstanding
9536          * commits to finish in this case because our IRQ handlers reference
9537          * DRM state directly - we can end up disabling interrupts too early
9538          * if we don't.
9539          *
9540          * TODO: Remove this stall and drop DM state private objects.
9541          */
9542         if (lock_and_validation_needed) {
9543                 ret = dm_atomic_get_state(state, &dm_state);
9544                 if (ret)
9545                         goto fail;
9546
9547                 ret = do_aquire_global_lock(dev, state);
9548                 if (ret)
9549                         goto fail;
9550
9551 #if defined(CONFIG_DRM_AMD_DC_DCN)
9552                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9553                         goto fail;
9554
9555                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9556                 if (ret)
9557                         goto fail;
9558 #endif
9559
9560                 /*
9561                  * Perform validation of MST topology in the state:
9562                  * We need to perform MST atomic check before calling
9563                  * dc_validate_global_state(), or there is a chance
9564                  * to get stuck in an infinite loop and hang eventually.
9565                  */
9566                 ret = drm_dp_mst_atomic_check(state);
9567                 if (ret)
9568                         goto fail;
9569                 status = dc_validate_global_state(dc, dm_state->context, false);
9570                 if (status != DC_OK) {
9571                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
9572                                        dc_status_to_str(status), status);
9573                         ret = -EINVAL;
9574                         goto fail;
9575                 }
9576         } else {
9577                 /*
9578                  * The commit is a fast update. Fast updates shouldn't change
9579                  * the DC context, affect global validation, and can have their
9580                  * commit work done in parallel with other commits not touching
9581                  * the same resource. If we have a new DC context as part of
9582                  * the DM atomic state from validation we need to free it and
9583                  * retain the existing one instead.
9584                  *
9585                  * Furthermore, since the DM atomic state only contains the DC
9586                  * context and can safely be annulled, we can free the state
9587                  * and clear the associated private object now to free
9588                  * some memory and avoid a possible use-after-free later.
9589                  */
9590
9591                 for (i = 0; i < state->num_private_objs; i++) {
9592                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9593
9594                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9595                                 int j = state->num_private_objs-1;
9596
9597                                 dm_atomic_destroy_state(obj,
9598                                                 state->private_objs[i].state);
9599
9600                                 /* If i is not at the end of the array then the
9601                                  * last element needs to be moved to where i was
9602                                  * before the array can safely be truncated.
9603                                  */
9604                                 if (i != j)
9605                                         state->private_objs[i] =
9606                                                 state->private_objs[j];
9607
9608                                 state->private_objs[j].ptr = NULL;
9609                                 state->private_objs[j].state = NULL;
9610                                 state->private_objs[j].old_state = NULL;
9611                                 state->private_objs[j].new_state = NULL;
9612
9613                                 state->num_private_objs = j;
9614                                 break;
9615                         }
9616                 }
9617         }
9618
9619         /* Store the overall update type for use later in atomic check. */
9620         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9621                 struct dm_crtc_state *dm_new_crtc_state =
9622                         to_dm_crtc_state(new_crtc_state);
9623
9624                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9625                                                          UPDATE_TYPE_FULL :
9626                                                          UPDATE_TYPE_FAST;
9627         }
9628
9629         /* Must be success */
9630         WARN_ON(ret);
9631
9632         trace_amdgpu_dm_atomic_check_finish(state, ret);
9633
9634         return ret;
9635
9636 fail:
9637         if (ret == -EDEADLK)
9638                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9639         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9640                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9641         else
9642                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9643
9644         trace_amdgpu_dm_atomic_check_finish(state, ret);
9645
9646         return ret;
9647 }
9648
9649 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9650                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9651 {
9652         uint8_t dpcd_data;
9653         bool capable = false;
9654
9655         if (amdgpu_dm_connector->dc_link &&
9656                 dm_helpers_dp_read_dpcd(
9657                                 NULL,
9658                                 amdgpu_dm_connector->dc_link,
9659                                 DP_DOWN_STREAM_PORT_COUNT,
9660                                 &dpcd_data,
9661                                 sizeof(dpcd_data))) {
9662                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9663         }
9664
9665         return capable;
9666 }
9667 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9668                                         struct edid *edid)
9669 {
9670         int i;
9671         bool edid_check_required;
9672         struct detailed_timing *timing;
9673         struct detailed_non_pixel *data;
9674         struct detailed_data_monitor_range *range;
9675         struct amdgpu_dm_connector *amdgpu_dm_connector =
9676                         to_amdgpu_dm_connector(connector);
9677         struct dm_connector_state *dm_con_state = NULL;
9678
9679         struct drm_device *dev = connector->dev;
9680         struct amdgpu_device *adev = drm_to_adev(dev);
9681         bool freesync_capable = false;
9682
9683         if (!connector->state) {
9684                 DRM_ERROR("%s - Connector has no state", __func__);
9685                 goto update;
9686         }
9687
9688         if (!edid) {
9689                 dm_con_state = to_dm_connector_state(connector->state);
9690
9691                 amdgpu_dm_connector->min_vfreq = 0;
9692                 amdgpu_dm_connector->max_vfreq = 0;
9693                 amdgpu_dm_connector->pixel_clock_mhz = 0;
9694
9695                 goto update;
9696         }
9697
9698         dm_con_state = to_dm_connector_state(connector->state);
9699
9700         edid_check_required = false;
9701         if (!amdgpu_dm_connector->dc_sink) {
9702                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9703                 goto update;
9704         }
9705         if (!adev->dm.freesync_module)
9706                 goto update;
9707         /*
9708          * if edid non zero restrict freesync only for dp and edp
9709          */
9710         if (edid) {
9711                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9712                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9713                         edid_check_required = is_dp_capable_without_timing_msa(
9714                                                 adev->dm.dc,
9715                                                 amdgpu_dm_connector);
9716                 }
9717         }
9718         if (edid_check_required == true && (edid->version > 1 ||
9719            (edid->version == 1 && edid->revision > 1))) {
9720                 for (i = 0; i < 4; i++) {
9721
9722                         timing  = &edid->detailed_timings[i];
9723                         data    = &timing->data.other_data;
9724                         range   = &data->data.range;
9725                         /*
9726                          * Check if monitor has continuous frequency mode
9727                          */
9728                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
9729                                 continue;
9730                         /*
9731                          * Check for flag range limits only. If flag == 1 then
9732                          * no additional timing information provided.
9733                          * Default GTF, GTF Secondary curve and CVT are not
9734                          * supported
9735                          */
9736                         if (range->flags != 1)
9737                                 continue;
9738
9739                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9740                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9741                         amdgpu_dm_connector->pixel_clock_mhz =
9742                                 range->pixel_clock_mhz * 10;
9743                         break;
9744                 }
9745
9746                 if (amdgpu_dm_connector->max_vfreq -
9747                     amdgpu_dm_connector->min_vfreq > 10) {
9748
9749                         freesync_capable = true;
9750                 }
9751         }
9752
9753 update:
9754         if (dm_con_state)
9755                 dm_con_state->freesync_capable = freesync_capable;
9756
9757         if (connector->vrr_capable_property)
9758                 drm_connector_set_vrr_capable_property(connector,
9759                                                        freesync_capable);
9760 }
9761
9762 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9763 {
9764         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9765
9766         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9767                 return;
9768         if (link->type == dc_connection_none)
9769                 return;
9770         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9771                                         dpcd_data, sizeof(dpcd_data))) {
9772                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9773
9774                 if (dpcd_data[0] == 0) {
9775                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9776                         link->psr_settings.psr_feature_enabled = false;
9777                 } else {
9778                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9779                         link->psr_settings.psr_feature_enabled = true;
9780                 }
9781
9782                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9783         }
9784 }
9785
9786 /*
9787  * amdgpu_dm_link_setup_psr() - configure psr link
9788  * @stream: stream state
9789  *
9790  * Return: true if success
9791  */
9792 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9793 {
9794         struct dc_link *link = NULL;
9795         struct psr_config psr_config = {0};
9796         struct psr_context psr_context = {0};
9797         bool ret = false;
9798
9799         if (stream == NULL)
9800                 return false;
9801
9802         link = stream->link;
9803
9804         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9805
9806         if (psr_config.psr_version > 0) {
9807                 psr_config.psr_exit_link_training_required = 0x1;
9808                 psr_config.psr_frame_capture_indication_req = 0;
9809                 psr_config.psr_rfb_setup_time = 0x37;
9810                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9811                 psr_config.allow_smu_optimizations = 0x0;
9812
9813                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9814
9815         }
9816         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9817
9818         return ret;
9819 }
9820
9821 /*
9822  * amdgpu_dm_psr_enable() - enable psr f/w
9823  * @stream: stream state
9824  *
9825  * Return: true if success
9826  */
9827 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9828 {
9829         struct dc_link *link = stream->link;
9830         unsigned int vsync_rate_hz = 0;
9831         struct dc_static_screen_params params = {0};
9832         /* Calculate number of static frames before generating interrupt to
9833          * enter PSR.
9834          */
9835         // Init fail safe of 2 frames static
9836         unsigned int num_frames_static = 2;
9837
9838         DRM_DEBUG_DRIVER("Enabling psr...\n");
9839
9840         vsync_rate_hz = div64_u64(div64_u64((
9841                         stream->timing.pix_clk_100hz * 100),
9842                         stream->timing.v_total),
9843                         stream->timing.h_total);
9844
9845         /* Round up
9846          * Calculate number of frames such that at least 30 ms of time has
9847          * passed.
9848          */
9849         if (vsync_rate_hz != 0) {
9850                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9851                 num_frames_static = (30000 / frame_time_microsec) + 1;
9852         }
9853
9854         params.triggers.cursor_update = true;
9855         params.triggers.overlay_update = true;
9856         params.triggers.surface_update = true;
9857         params.num_frames = num_frames_static;
9858
9859         dc_stream_set_static_screen_params(link->ctx->dc,
9860                                            &stream, 1,
9861                                            &params);
9862
9863         return dc_link_set_psr_allow_active(link, true, false, false);
9864 }
9865
9866 /*
9867  * amdgpu_dm_psr_disable() - disable psr f/w
9868  * @stream:  stream state
9869  *
9870  * Return: true if success
9871  */
9872 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9873 {
9874
9875         DRM_DEBUG_DRIVER("Disabling psr...\n");
9876
9877         return dc_link_set_psr_allow_active(stream->link, false, true, false);
9878 }
9879
9880 /*
9881  * amdgpu_dm_psr_disable() - disable psr f/w
9882  * if psr is enabled on any stream
9883  *
9884  * Return: true if success
9885  */
9886 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9887 {
9888         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9889         return dc_set_psr_allow_active(dm->dc, false);
9890 }
9891
9892 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9893 {
9894         struct amdgpu_device *adev = drm_to_adev(dev);
9895         struct dc *dc = adev->dm.dc;
9896         int i;
9897
9898         mutex_lock(&adev->dm.dc_lock);
9899         if (dc->current_state) {
9900                 for (i = 0; i < dc->current_state->stream_count; ++i)
9901                         dc->current_state->streams[i]
9902                                 ->triggered_crtc_reset.enabled =
9903                                 adev->dm.force_timing_sync;
9904
9905                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9906                 dc_trigger_sync(dc, dc->current_state);
9907         }
9908         mutex_unlock(&adev->dm.dc_lock);
9909 }
9910
9911 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9912                        uint32_t value, const char *func_name)
9913 {
9914 #ifdef DM_CHECK_ADDR_0
9915         if (address == 0) {
9916                 DC_ERR("invalid register write. address = 0");
9917                 return;
9918         }
9919 #endif
9920         cgs_write_register(ctx->cgs_device, address, value);
9921         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9922 }
9923
9924 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9925                           const char *func_name)
9926 {
9927         uint32_t value;
9928 #ifdef DM_CHECK_ADDR_0
9929         if (address == 0) {
9930                 DC_ERR("invalid register read; address = 0\n");
9931                 return 0;
9932         }
9933 #endif
9934
9935         if (ctx->dmub_srv &&
9936             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9937             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9938                 ASSERT(false);
9939                 return 0;
9940         }
9941
9942         value = cgs_read_register(ctx->cgs_device, address);
9943
9944         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9945
9946         return value;
9947 }