drm/amdgpu/display: handle aux backlight in backlight_get_brightness
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58
59 #include "ivsrcid/ivsrcid_vislands30.h"
60
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136         switch (link->dpcd_caps.dongle_type) {
137         case DISPLAY_DONGLE_NONE:
138                 return DRM_MODE_SUBCONNECTOR_Native;
139         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140                 return DRM_MODE_SUBCONNECTOR_VGA;
141         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142         case DISPLAY_DONGLE_DP_DVI_DONGLE:
143                 return DRM_MODE_SUBCONNECTOR_DVID;
144         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_HDMIA;
147         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148         default:
149                 return DRM_MODE_SUBCONNECTOR_Unknown;
150         }
151 }
152
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155         struct dc_link *link = aconnector->dc_link;
156         struct drm_connector *connector = &aconnector->base;
157         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160                 return;
161
162         if (aconnector->dc_sink)
163                 subconnector = get_subconnector_type(link);
164
165         drm_object_property_set_value(&connector->base,
166                         connector->dev->mode_config.dp_subconnector_property,
167                         subconnector);
168 }
169
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182                                 struct drm_plane *plane,
183                                 unsigned long possible_crtcs,
184                                 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186                                struct drm_plane *plane,
187                                uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
190                                     uint32_t link_index,
191                                     struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193                                   struct amdgpu_encoder *aencoder,
194                                   uint32_t link_index);
195
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201                                   struct drm_atomic_state *state);
202
203 static void handle_cursor_update(struct drm_plane *plane,
204                                  struct drm_plane_state *old_plane_state);
205
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230         if (crtc >= adev->mode_info.num_crtc)
231                 return 0;
232         else {
233                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
235                 if (acrtc->dm_irq_params.stream == NULL) {
236                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237                                   crtc);
238                         return 0;
239                 }
240
241                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242         }
243 }
244
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246                                   u32 *vbl, u32 *position)
247 {
248         uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
250         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251                 return -EINVAL;
252         else {
253                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
255                 if (acrtc->dm_irq_params.stream ==  NULL) {
256                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257                                   crtc);
258                         return 0;
259                 }
260
261                 /*
262                  * TODO rework base driver to use values directly.
263                  * for now parse it back into reg-format
264                  */
265                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266                                          &v_blank_start,
267                                          &v_blank_end,
268                                          &h_position,
269                                          &v_position);
270
271                 *position = v_position | (h_position << 16);
272                 *vbl = v_blank_start | (v_blank_end << 16);
273         }
274
275         return 0;
276 }
277
278 static bool dm_is_idle(void *handle)
279 {
280         /* XXX todo */
281         return true;
282 }
283
284 static int dm_wait_for_idle(void *handle)
285 {
286         /* XXX todo */
287         return 0;
288 }
289
290 static bool dm_check_soft_reset(void *handle)
291 {
292         return false;
293 }
294
295 static int dm_soft_reset(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303                      int otg_inst)
304 {
305         struct drm_device *dev = adev_to_drm(adev);
306         struct drm_crtc *crtc;
307         struct amdgpu_crtc *amdgpu_crtc;
308
309         if (otg_inst == -1) {
310                 WARN_ON(1);
311                 return adev->mode_info.crtcs[0];
312         }
313
314         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315                 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317                 if (amdgpu_crtc->otg_inst == otg_inst)
318                         return amdgpu_crtc;
319         }
320
321         return NULL;
322 }
323
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326         return acrtc->dm_irq_params.freesync_config.state ==
327                        VRR_STATE_ACTIVE_VARIABLE ||
328                acrtc->dm_irq_params.freesync_config.state ==
329                        VRR_STATE_ACTIVE_FIXED;
330 }
331
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347         struct amdgpu_crtc *amdgpu_crtc;
348         struct common_irq_params *irq_params = interrupt_params;
349         struct amdgpu_device *adev = irq_params->adev;
350         unsigned long flags;
351         struct drm_pending_vblank_event *e;
352         uint32_t vpos, hpos, v_blank_start, v_blank_end;
353         bool vrr_active;
354
355         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357         /* IRQ could occur when in initial stage */
358         /* TODO work and BO cleanup */
359         if (amdgpu_crtc == NULL) {
360                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361                 return;
362         }
363
364         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365
366         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368                                                  amdgpu_crtc->pflip_status,
369                                                  AMDGPU_FLIP_SUBMITTED,
370                                                  amdgpu_crtc->crtc_id,
371                                                  amdgpu_crtc);
372                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373                 return;
374         }
375
376         /* page flip completed. */
377         e = amdgpu_crtc->event;
378         amdgpu_crtc->event = NULL;
379
380         if (!e)
381                 WARN_ON(1);
382
383         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384
385         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386         if (!vrr_active ||
387             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388                                       &v_blank_end, &hpos, &vpos) ||
389             (vpos < v_blank_start)) {
390                 /* Update to correct count and vblank timestamp if racing with
391                  * vblank irq. This also updates to the correct vblank timestamp
392                  * even in VRR mode, as scanout is past the front-porch atm.
393                  */
394                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395
396                 /* Wake up userspace by sending the pageflip event with proper
397                  * count and timestamp of vblank of flip completion.
398                  */
399                 if (e) {
400                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402                         /* Event sent, so done with vblank for this flip */
403                         drm_crtc_vblank_put(&amdgpu_crtc->base);
404                 }
405         } else if (e) {
406                 /* VRR active and inside front-porch: vblank count and
407                  * timestamp for pageflip event will only be up to date after
408                  * drm_crtc_handle_vblank() has been executed from late vblank
409                  * irq handler after start of back-porch (vline 0). We queue the
410                  * pageflip event for send-out by drm_crtc_handle_vblank() with
411                  * updated timestamp and count, once it runs after us.
412                  *
413                  * We need to open-code this instead of using the helper
414                  * drm_crtc_arm_vblank_event(), as that helper would
415                  * call drm_crtc_accurate_vblank_count(), which we must
416                  * not call in VRR mode while we are in front-porch!
417                  */
418
419                 /* sequence will be replaced by real count during send-out. */
420                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421                 e->pipe = amdgpu_crtc->crtc_id;
422
423                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424                 e = NULL;
425         }
426
427         /* Keep track of vblank of this flip for flip throttling. We use the
428          * cooked hw counter, as that one incremented at start of this vblank
429          * of pageflip completion, so last_flip_vblank is the forbidden count
430          * for queueing new pageflips if vsync + VRR is enabled.
431          */
432         amdgpu_crtc->dm_irq_params.last_flip_vblank =
433                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434
435         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437
438         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439                          amdgpu_crtc->crtc_id, amdgpu_crtc,
440                          vrr_active, (int) !e);
441 }
442
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445         struct common_irq_params *irq_params = interrupt_params;
446         struct amdgpu_device *adev = irq_params->adev;
447         struct amdgpu_crtc *acrtc;
448         unsigned long flags;
449         int vrr_active;
450
451         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453         if (acrtc) {
454                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455
456                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457                               acrtc->crtc_id,
458                               vrr_active);
459
460                 /* Core vblank handling is done here after end of front-porch in
461                  * vrr mode, as vblank timestamping will give valid results
462                  * while now done after front-porch. This will also deliver
463                  * page-flip completion events that have been queued to us
464                  * if a pageflip happened inside front-porch.
465                  */
466                 if (vrr_active) {
467                         drm_crtc_handle_vblank(&acrtc->base);
468
469                         /* BTR processing for pre-DCE12 ASICs */
470                         if (acrtc->dm_irq_params.stream &&
471                             adev->family < AMDGPU_FAMILY_AI) {
472                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473                                 mod_freesync_handle_v_update(
474                                     adev->dm.freesync_module,
475                                     acrtc->dm_irq_params.stream,
476                                     &acrtc->dm_irq_params.vrr_params);
477
478                                 dc_stream_adjust_vmin_vmax(
479                                     adev->dm.dc,
480                                     acrtc->dm_irq_params.stream,
481                                     &acrtc->dm_irq_params.vrr_params.adjust);
482                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483                         }
484                 }
485         }
486 }
487
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497         struct common_irq_params *irq_params = interrupt_params;
498         struct amdgpu_device *adev = irq_params->adev;
499         struct amdgpu_crtc *acrtc;
500         unsigned long flags;
501         int vrr_active;
502
503         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504         if (!acrtc)
505                 return;
506
507         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508
509         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510                       vrr_active, acrtc->dm_irq_params.active_planes);
511
512         /**
513          * Core vblank handling at start of front-porch is only possible
514          * in non-vrr mode, as only there vblank timestamping will give
515          * valid results while done in front-porch. Otherwise defer it
516          * to dm_vupdate_high_irq after end of front-porch.
517          */
518         if (!vrr_active)
519                 drm_crtc_handle_vblank(&acrtc->base);
520
521         /**
522          * Following stuff must happen at start of vblank, for crc
523          * computation and below-the-range btr support in vrr mode.
524          */
525         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526
527         /* BTR updates need to happen before VUPDATE on Vega and above. */
528         if (adev->family < AMDGPU_FAMILY_AI)
529                 return;
530
531         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532
533         if (acrtc->dm_irq_params.stream &&
534             acrtc->dm_irq_params.vrr_params.supported &&
535             acrtc->dm_irq_params.freesync_config.state ==
536                     VRR_STATE_ACTIVE_VARIABLE) {
537                 mod_freesync_handle_v_update(adev->dm.freesync_module,
538                                              acrtc->dm_irq_params.stream,
539                                              &acrtc->dm_irq_params.vrr_params);
540
541                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542                                            &acrtc->dm_irq_params.vrr_params.adjust);
543         }
544
545         /*
546          * If there aren't any active_planes then DCH HUBP may be clock-gated.
547          * In that case, pageflip completion interrupts won't fire and pageflip
548          * completion events won't get delivered. Prevent this by sending
549          * pending pageflip events from here if a flip is still pending.
550          *
551          * If any planes are enabled, use dm_pflip_high_irq() instead, to
552          * avoid race conditions between flip programming and completion,
553          * which could cause too early flip completion events.
554          */
555         if (adev->family >= AMDGPU_FAMILY_RV &&
556             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557             acrtc->dm_irq_params.active_planes == 0) {
558                 if (acrtc->event) {
559                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560                         acrtc->event = NULL;
561                         drm_crtc_vblank_put(&acrtc->base);
562                 }
563                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564         }
565
566         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568
569 static int dm_set_clockgating_state(void *handle,
570                   enum amd_clockgating_state state)
571 {
572         return 0;
573 }
574
575 static int dm_set_powergating_state(void *handle,
576                   enum amd_powergating_state state)
577 {
578         return 0;
579 }
580
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587         struct drm_device *dev = connector->dev;
588         struct amdgpu_device *adev = drm_to_adev(dev);
589         struct dm_compressor_info *compressor = &adev->dm.compressor;
590         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591         struct drm_display_mode *mode;
592         unsigned long max_size = 0;
593
594         if (adev->dm.dc->fbc_compressor == NULL)
595                 return;
596
597         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598                 return;
599
600         if (compressor->bo_ptr)
601                 return;
602
603
604         list_for_each_entry(mode, &connector->modes, head) {
605                 if (max_size < mode->htotal * mode->vtotal)
606                         max_size = mode->htotal * mode->vtotal;
607         }
608
609         if (max_size) {
610                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612                             &compressor->gpu_addr, &compressor->cpu_addr);
613
614                 if (r)
615                         DRM_ERROR("DM: Failed to initialize FBC\n");
616                 else {
617                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619                 }
620
621         }
622
623 }
624
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626                                           int pipe, bool *enabled,
627                                           unsigned char *buf, int max_bytes)
628 {
629         struct drm_device *dev = dev_get_drvdata(kdev);
630         struct amdgpu_device *adev = drm_to_adev(dev);
631         struct drm_connector *connector;
632         struct drm_connector_list_iter conn_iter;
633         struct amdgpu_dm_connector *aconnector;
634         int ret = 0;
635
636         *enabled = false;
637
638         mutex_lock(&adev->dm.audio_lock);
639
640         drm_connector_list_iter_begin(dev, &conn_iter);
641         drm_for_each_connector_iter(connector, &conn_iter) {
642                 aconnector = to_amdgpu_dm_connector(connector);
643                 if (aconnector->audio_inst != port)
644                         continue;
645
646                 *enabled = true;
647                 ret = drm_eld_size(connector->eld);
648                 memcpy(buf, connector->eld, min(max_bytes, ret));
649
650                 break;
651         }
652         drm_connector_list_iter_end(&conn_iter);
653
654         mutex_unlock(&adev->dm.audio_lock);
655
656         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657
658         return ret;
659 }
660
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662         .get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666                                        struct device *hda_kdev, void *data)
667 {
668         struct drm_device *dev = dev_get_drvdata(kdev);
669         struct amdgpu_device *adev = drm_to_adev(dev);
670         struct drm_audio_component *acomp = data;
671
672         acomp->ops = &amdgpu_dm_audio_component_ops;
673         acomp->dev = kdev;
674         adev->dm.audio_component = acomp;
675
676         return 0;
677 }
678
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680                                           struct device *hda_kdev, void *data)
681 {
682         struct drm_device *dev = dev_get_drvdata(kdev);
683         struct amdgpu_device *adev = drm_to_adev(dev);
684         struct drm_audio_component *acomp = data;
685
686         acomp->ops = NULL;
687         acomp->dev = NULL;
688         adev->dm.audio_component = NULL;
689 }
690
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692         .bind   = amdgpu_dm_audio_component_bind,
693         .unbind = amdgpu_dm_audio_component_unbind,
694 };
695
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698         int i, ret;
699
700         if (!amdgpu_audio)
701                 return 0;
702
703         adev->mode_info.audio.enabled = true;
704
705         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706
707         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708                 adev->mode_info.audio.pin[i].channels = -1;
709                 adev->mode_info.audio.pin[i].rate = -1;
710                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711                 adev->mode_info.audio.pin[i].status_bits = 0;
712                 adev->mode_info.audio.pin[i].category_code = 0;
713                 adev->mode_info.audio.pin[i].connected = false;
714                 adev->mode_info.audio.pin[i].id =
715                         adev->dm.dc->res_pool->audios[i]->inst;
716                 adev->mode_info.audio.pin[i].offset = 0;
717         }
718
719         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720         if (ret < 0)
721                 return ret;
722
723         adev->dm.audio_registered = true;
724
725         return 0;
726 }
727
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730         if (!amdgpu_audio)
731                 return;
732
733         if (!adev->mode_info.audio.enabled)
734                 return;
735
736         if (adev->dm.audio_registered) {
737                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738                 adev->dm.audio_registered = false;
739         }
740
741         /* TODO: Disable audio? */
742
743         adev->mode_info.audio.enabled = false;
744 }
745
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748         struct drm_audio_component *acomp = adev->dm.audio_component;
749
750         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752
753                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754                                                  pin, -1);
755         }
756 }
757
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760         const struct dmcub_firmware_header_v1_0 *hdr;
761         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763         const struct firmware *dmub_fw = adev->dm.dmub_fw;
764         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765         struct abm *abm = adev->dm.dc->res_pool->abm;
766         struct dmub_srv_hw_params hw_params;
767         enum dmub_status status;
768         const unsigned char *fw_inst_const, *fw_bss_data;
769         uint32_t i, fw_inst_const_size, fw_bss_data_size;
770         bool has_hw_support;
771
772         if (!dmub_srv)
773                 /* DMUB isn't supported on the ASIC. */
774                 return 0;
775
776         if (!fb_info) {
777                 DRM_ERROR("No framebuffer info for DMUB service.\n");
778                 return -EINVAL;
779         }
780
781         if (!dmub_fw) {
782                 /* Firmware required for DMUB support. */
783                 DRM_ERROR("No firmware provided for DMUB.\n");
784                 return -EINVAL;
785         }
786
787         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788         if (status != DMUB_STATUS_OK) {
789                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790                 return -EINVAL;
791         }
792
793         if (!has_hw_support) {
794                 DRM_INFO("DMUB unsupported on ASIC\n");
795                 return 0;
796         }
797
798         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799
800         fw_inst_const = dmub_fw->data +
801                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802                         PSP_HEADER_BYTES;
803
804         fw_bss_data = dmub_fw->data +
805                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                       le32_to_cpu(hdr->inst_const_bytes);
807
808         /* Copy firmware and bios info into FB memory. */
809         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811
812         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813
814         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815          * amdgpu_ucode_init_single_fw will load dmub firmware
816          * fw_inst_const part to cw0; otherwise, the firmware back door load
817          * will be done by dm_dmub_hw_init
818          */
819         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821                                 fw_inst_const_size);
822         }
823
824         if (fw_bss_data_size)
825                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826                        fw_bss_data, fw_bss_data_size);
827
828         /* Copy firmware bios info into FB memory. */
829         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830                adev->bios_size);
831
832         /* Reset regions that need to be reset. */
833         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835
836         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838
839         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841
842         /* Initialize hardware. */
843         memset(&hw_params, 0, sizeof(hw_params));
844         hw_params.fb_base = adev->gmc.fb_start;
845         hw_params.fb_offset = adev->gmc.aper_base;
846
847         /* backdoor load firmware and trigger dmub running */
848         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849                 hw_params.load_inst_const = true;
850
851         if (dmcu)
852                 hw_params.psp_version = dmcu->psp_version;
853
854         for (i = 0; i < fb_info->num_fb; ++i)
855                 hw_params.fb[i] = &fb_info->fb[i];
856
857         status = dmub_srv_hw_init(dmub_srv, &hw_params);
858         if (status != DMUB_STATUS_OK) {
859                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860                 return -EINVAL;
861         }
862
863         /* Wait for firmware load to finish. */
864         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865         if (status != DMUB_STATUS_OK)
866                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867
868         /* Init DMCU and ABM if available. */
869         if (dmcu && abm) {
870                 dmcu->funcs->dmcu_init(dmcu);
871                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872         }
873
874         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875         if (!adev->dm.dc->ctx->dmub_srv) {
876                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877                 return -ENOMEM;
878         }
879
880         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881                  adev->dm.dmcub_fw_version);
882
883         return 0;
884 }
885
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889         uint64_t pt_base;
890         uint32_t logical_addr_low;
891         uint32_t logical_addr_high;
892         uint32_t agp_base, agp_bot, agp_top;
893         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894
895         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897
898         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899                 /*
900                  * Raven2 has a HW issue that it is unable to use the vram which
901                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902                  * workaround that increase system aperture high address (add 1)
903                  * to get rid of the VM fault and hardware hang.
904                  */
905                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906         else
907                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908
909         agp_base = 0;
910         agp_bot = adev->gmc.agp_start >> 24;
911         agp_top = adev->gmc.agp_end >> 24;
912
913
914         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919         page_table_base.low_part = lower_32_bits(pt_base);
920
921         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923
924         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927
928         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931
932         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935
936         pa_config->is_hvm_enabled = 0;
937
938 }
939 #endif
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 static void event_mall_stutter(struct work_struct *work)
942 {
943
944         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
945         struct amdgpu_display_manager *dm = vblank_work->dm;
946
947         mutex_lock(&dm->dc_lock);
948
949         if (vblank_work->enable)
950                 dm->active_vblank_irq_count++;
951         else
952                 dm->active_vblank_irq_count--;
953
954
955         dc_allow_idle_optimizations(
956                 dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
957
958         DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
959
960
961         mutex_unlock(&dm->dc_lock);
962 }
963
964 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
965 {
966
967         int max_caps = dc->caps.max_links;
968         struct vblank_workqueue *vblank_work;
969         int i = 0;
970
971         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
972         if (ZERO_OR_NULL_PTR(vblank_work)) {
973                 kfree(vblank_work);
974                 return NULL;
975         }
976
977         for (i = 0; i < max_caps; i++)
978                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
979
980         return vblank_work;
981 }
982 #endif
983 static int amdgpu_dm_init(struct amdgpu_device *adev)
984 {
985         struct dc_init_data init_data;
986 #ifdef CONFIG_DRM_AMD_DC_HDCP
987         struct dc_callback_init init_params;
988 #endif
989         int r;
990
991         adev->dm.ddev = adev_to_drm(adev);
992         adev->dm.adev = adev;
993
994         /* Zero all the fields */
995         memset(&init_data, 0, sizeof(init_data));
996 #ifdef CONFIG_DRM_AMD_DC_HDCP
997         memset(&init_params, 0, sizeof(init_params));
998 #endif
999
1000         mutex_init(&adev->dm.dc_lock);
1001         mutex_init(&adev->dm.audio_lock);
1002 #if defined(CONFIG_DRM_AMD_DC_DCN)
1003         spin_lock_init(&adev->dm.vblank_lock);
1004 #endif
1005
1006         if(amdgpu_dm_irq_init(adev)) {
1007                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1008                 goto error;
1009         }
1010
1011         init_data.asic_id.chip_family = adev->family;
1012
1013         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1014         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1015
1016         init_data.asic_id.vram_width = adev->gmc.vram_width;
1017         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1018         init_data.asic_id.atombios_base_address =
1019                 adev->mode_info.atom_context->bios;
1020
1021         init_data.driver = adev;
1022
1023         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1024
1025         if (!adev->dm.cgs_device) {
1026                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1027                 goto error;
1028         }
1029
1030         init_data.cgs_device = adev->dm.cgs_device;
1031
1032         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1033
1034         switch (adev->asic_type) {
1035         case CHIP_CARRIZO:
1036         case CHIP_STONEY:
1037         case CHIP_RAVEN:
1038         case CHIP_RENOIR:
1039                 init_data.flags.gpu_vm_support = true;
1040                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1041                         init_data.flags.disable_dmcu = true;
1042                 break;
1043 #if defined(CONFIG_DRM_AMD_DC_DCN)
1044         case CHIP_VANGOGH:
1045                 init_data.flags.gpu_vm_support = true;
1046                 break;
1047 #endif
1048         default:
1049                 break;
1050         }
1051
1052         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053                 init_data.flags.fbc_support = true;
1054
1055         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056                 init_data.flags.multi_mon_pp_mclk_switch = true;
1057
1058         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059                 init_data.flags.disable_fractional_pwm = true;
1060
1061         init_data.flags.power_down_display_on_boot = true;
1062
1063         /* Display Core create. */
1064         adev->dm.dc = dc_create(&init_data);
1065
1066         if (adev->dm.dc) {
1067                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1068         } else {
1069                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1070                 goto error;
1071         }
1072
1073         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1074                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1075                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1076         }
1077
1078         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1079                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1080
1081         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1082                 adev->dm.dc->debug.disable_stutter = true;
1083
1084         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1085                 adev->dm.dc->debug.disable_dsc = true;
1086
1087         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1088                 adev->dm.dc->debug.disable_clock_gate = true;
1089
1090         r = dm_dmub_hw_init(adev);
1091         if (r) {
1092                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1093                 goto error;
1094         }
1095
1096         dc_hardware_init(adev->dm.dc);
1097
1098 #if defined(CONFIG_DRM_AMD_DC_DCN)
1099         if (adev->apu_flags) {
1100                 struct dc_phy_addr_space_config pa_config;
1101
1102                 mmhub_read_system_context(adev, &pa_config);
1103
1104                 // Call the DC init_memory func
1105                 dc_setup_system_context(adev->dm.dc, &pa_config);
1106         }
1107 #endif
1108
1109         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110         if (!adev->dm.freesync_module) {
1111                 DRM_ERROR(
1112                 "amdgpu: failed to initialize freesync_module.\n");
1113         } else
1114                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115                                 adev->dm.freesync_module);
1116
1117         amdgpu_dm_init_color_mod();
1118
1119 #if defined(CONFIG_DRM_AMD_DC_DCN)
1120         if (adev->dm.dc->caps.max_links > 0) {
1121                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1122
1123                 if (!adev->dm.vblank_workqueue)
1124                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1125                 else
1126                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1127         }
1128 #endif
1129
1130 #ifdef CONFIG_DRM_AMD_DC_HDCP
1131         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1132                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1133
1134                 if (!adev->dm.hdcp_workqueue)
1135                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1136                 else
1137                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1138
1139                 dc_init_callbacks(adev->dm.dc, &init_params);
1140         }
1141 #endif
1142         if (amdgpu_dm_initialize_drm_device(adev)) {
1143                 DRM_ERROR(
1144                 "amdgpu: failed to initialize sw for display support.\n");
1145                 goto error;
1146         }
1147
1148         /* create fake encoders for MST */
1149         dm_dp_create_fake_mst_encoders(adev);
1150
1151         /* TODO: Add_display_info? */
1152
1153         /* TODO use dynamic cursor width */
1154         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1155         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1156
1157         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1158                 DRM_ERROR(
1159                 "amdgpu: failed to initialize sw for display support.\n");
1160                 goto error;
1161         }
1162
1163
1164         DRM_DEBUG_DRIVER("KMS initialized.\n");
1165
1166         return 0;
1167 error:
1168         amdgpu_dm_fini(adev);
1169
1170         return -EINVAL;
1171 }
1172
1173 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1174 {
1175         int i;
1176
1177         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1178                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1179         }
1180
1181         amdgpu_dm_audio_fini(adev);
1182
1183         amdgpu_dm_destroy_drm_device(&adev->dm);
1184
1185 #ifdef CONFIG_DRM_AMD_DC_HDCP
1186         if (adev->dm.hdcp_workqueue) {
1187                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1188                 adev->dm.hdcp_workqueue = NULL;
1189         }
1190
1191         if (adev->dm.dc)
1192                 dc_deinit_callbacks(adev->dm.dc);
1193 #endif
1194         if (adev->dm.dc->ctx->dmub_srv) {
1195                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1196                 adev->dm.dc->ctx->dmub_srv = NULL;
1197         }
1198
1199         if (adev->dm.dmub_bo)
1200                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1201                                       &adev->dm.dmub_bo_gpu_addr,
1202                                       &adev->dm.dmub_bo_cpu_addr);
1203
1204         /* DC Destroy TODO: Replace destroy DAL */
1205         if (adev->dm.dc)
1206                 dc_destroy(&adev->dm.dc);
1207         /*
1208          * TODO: pageflip, vlank interrupt
1209          *
1210          * amdgpu_dm_irq_fini(adev);
1211          */
1212
1213         if (adev->dm.cgs_device) {
1214                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1215                 adev->dm.cgs_device = NULL;
1216         }
1217         if (adev->dm.freesync_module) {
1218                 mod_freesync_destroy(adev->dm.freesync_module);
1219                 adev->dm.freesync_module = NULL;
1220         }
1221
1222         mutex_destroy(&adev->dm.audio_lock);
1223         mutex_destroy(&adev->dm.dc_lock);
1224
1225         return;
1226 }
1227
1228 static int load_dmcu_fw(struct amdgpu_device *adev)
1229 {
1230         const char *fw_name_dmcu = NULL;
1231         int r;
1232         const struct dmcu_firmware_header_v1_0 *hdr;
1233
1234         switch(adev->asic_type) {
1235 #if defined(CONFIG_DRM_AMD_DC_SI)
1236         case CHIP_TAHITI:
1237         case CHIP_PITCAIRN:
1238         case CHIP_VERDE:
1239         case CHIP_OLAND:
1240 #endif
1241         case CHIP_BONAIRE:
1242         case CHIP_HAWAII:
1243         case CHIP_KAVERI:
1244         case CHIP_KABINI:
1245         case CHIP_MULLINS:
1246         case CHIP_TONGA:
1247         case CHIP_FIJI:
1248         case CHIP_CARRIZO:
1249         case CHIP_STONEY:
1250         case CHIP_POLARIS11:
1251         case CHIP_POLARIS10:
1252         case CHIP_POLARIS12:
1253         case CHIP_VEGAM:
1254         case CHIP_VEGA10:
1255         case CHIP_VEGA12:
1256         case CHIP_VEGA20:
1257         case CHIP_NAVI10:
1258         case CHIP_NAVI14:
1259         case CHIP_RENOIR:
1260         case CHIP_SIENNA_CICHLID:
1261         case CHIP_NAVY_FLOUNDER:
1262         case CHIP_DIMGREY_CAVEFISH:
1263         case CHIP_VANGOGH:
1264                 return 0;
1265         case CHIP_NAVI12:
1266                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1267                 break;
1268         case CHIP_RAVEN:
1269                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1270                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1272                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1273                 else
1274                         return 0;
1275                 break;
1276         default:
1277                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1278                 return -EINVAL;
1279         }
1280
1281         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1282                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1283                 return 0;
1284         }
1285
1286         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1287         if (r == -ENOENT) {
1288                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1289                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1290                 adev->dm.fw_dmcu = NULL;
1291                 return 0;
1292         }
1293         if (r) {
1294                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1295                         fw_name_dmcu);
1296                 return r;
1297         }
1298
1299         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1300         if (r) {
1301                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1302                         fw_name_dmcu);
1303                 release_firmware(adev->dm.fw_dmcu);
1304                 adev->dm.fw_dmcu = NULL;
1305                 return r;
1306         }
1307
1308         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1309         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1310         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1311         adev->firmware.fw_size +=
1312                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1313
1314         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1315         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1316         adev->firmware.fw_size +=
1317                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1318
1319         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1320
1321         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1322
1323         return 0;
1324 }
1325
1326 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1327 {
1328         struct amdgpu_device *adev = ctx;
1329
1330         return dm_read_reg(adev->dm.dc->ctx, address);
1331 }
1332
1333 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1334                                      uint32_t value)
1335 {
1336         struct amdgpu_device *adev = ctx;
1337
1338         return dm_write_reg(adev->dm.dc->ctx, address, value);
1339 }
1340
1341 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1342 {
1343         struct dmub_srv_create_params create_params;
1344         struct dmub_srv_region_params region_params;
1345         struct dmub_srv_region_info region_info;
1346         struct dmub_srv_fb_params fb_params;
1347         struct dmub_srv_fb_info *fb_info;
1348         struct dmub_srv *dmub_srv;
1349         const struct dmcub_firmware_header_v1_0 *hdr;
1350         const char *fw_name_dmub;
1351         enum dmub_asic dmub_asic;
1352         enum dmub_status status;
1353         int r;
1354
1355         switch (adev->asic_type) {
1356         case CHIP_RENOIR:
1357                 dmub_asic = DMUB_ASIC_DCN21;
1358                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1359                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1361                 break;
1362         case CHIP_SIENNA_CICHLID:
1363                 dmub_asic = DMUB_ASIC_DCN30;
1364                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1365                 break;
1366         case CHIP_NAVY_FLOUNDER:
1367                 dmub_asic = DMUB_ASIC_DCN30;
1368                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1369                 break;
1370         case CHIP_VANGOGH:
1371                 dmub_asic = DMUB_ASIC_DCN301;
1372                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1373                 break;
1374         case CHIP_DIMGREY_CAVEFISH:
1375                 dmub_asic = DMUB_ASIC_DCN302;
1376                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1377                 break;
1378
1379         default:
1380                 /* ASIC doesn't support DMUB. */
1381                 return 0;
1382         }
1383
1384         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1385         if (r) {
1386                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1387                 return 0;
1388         }
1389
1390         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1391         if (r) {
1392                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1393                 return 0;
1394         }
1395
1396         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1397
1398         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1399                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1400                         AMDGPU_UCODE_ID_DMCUB;
1401                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1402                         adev->dm.dmub_fw;
1403                 adev->firmware.fw_size +=
1404                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1405
1406                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1407                          adev->dm.dmcub_fw_version);
1408         }
1409
1410         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1411
1412         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1413         dmub_srv = adev->dm.dmub_srv;
1414
1415         if (!dmub_srv) {
1416                 DRM_ERROR("Failed to allocate DMUB service!\n");
1417                 return -ENOMEM;
1418         }
1419
1420         memset(&create_params, 0, sizeof(create_params));
1421         create_params.user_ctx = adev;
1422         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1423         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1424         create_params.asic = dmub_asic;
1425
1426         /* Create the DMUB service. */
1427         status = dmub_srv_create(dmub_srv, &create_params);
1428         if (status != DMUB_STATUS_OK) {
1429                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1430                 return -EINVAL;
1431         }
1432
1433         /* Calculate the size of all the regions for the DMUB service. */
1434         memset(&region_params, 0, sizeof(region_params));
1435
1436         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1437                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1438         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1439         region_params.vbios_size = adev->bios_size;
1440         region_params.fw_bss_data = region_params.bss_data_size ?
1441                 adev->dm.dmub_fw->data +
1442                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1443                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1444         region_params.fw_inst_const =
1445                 adev->dm.dmub_fw->data +
1446                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1447                 PSP_HEADER_BYTES;
1448
1449         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1450                                            &region_info);
1451
1452         if (status != DMUB_STATUS_OK) {
1453                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1454                 return -EINVAL;
1455         }
1456
1457         /*
1458          * Allocate a framebuffer based on the total size of all the regions.
1459          * TODO: Move this into GART.
1460          */
1461         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1462                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1463                                     &adev->dm.dmub_bo_gpu_addr,
1464                                     &adev->dm.dmub_bo_cpu_addr);
1465         if (r)
1466                 return r;
1467
1468         /* Rebase the regions on the framebuffer address. */
1469         memset(&fb_params, 0, sizeof(fb_params));
1470         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1471         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1472         fb_params.region_info = &region_info;
1473
1474         adev->dm.dmub_fb_info =
1475                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1476         fb_info = adev->dm.dmub_fb_info;
1477
1478         if (!fb_info) {
1479                 DRM_ERROR(
1480                         "Failed to allocate framebuffer info for DMUB service!\n");
1481                 return -ENOMEM;
1482         }
1483
1484         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1485         if (status != DMUB_STATUS_OK) {
1486                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1487                 return -EINVAL;
1488         }
1489
1490         return 0;
1491 }
1492
1493 static int dm_sw_init(void *handle)
1494 {
1495         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496         int r;
1497
1498         r = dm_dmub_sw_init(adev);
1499         if (r)
1500                 return r;
1501
1502         return load_dmcu_fw(adev);
1503 }
1504
1505 static int dm_sw_fini(void *handle)
1506 {
1507         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508
1509         kfree(adev->dm.dmub_fb_info);
1510         adev->dm.dmub_fb_info = NULL;
1511
1512         if (adev->dm.dmub_srv) {
1513                 dmub_srv_destroy(adev->dm.dmub_srv);
1514                 adev->dm.dmub_srv = NULL;
1515         }
1516
1517         release_firmware(adev->dm.dmub_fw);
1518         adev->dm.dmub_fw = NULL;
1519
1520         release_firmware(adev->dm.fw_dmcu);
1521         adev->dm.fw_dmcu = NULL;
1522
1523         return 0;
1524 }
1525
1526 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1527 {
1528         struct amdgpu_dm_connector *aconnector;
1529         struct drm_connector *connector;
1530         struct drm_connector_list_iter iter;
1531         int ret = 0;
1532
1533         drm_connector_list_iter_begin(dev, &iter);
1534         drm_for_each_connector_iter(connector, &iter) {
1535                 aconnector = to_amdgpu_dm_connector(connector);
1536                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1537                     aconnector->mst_mgr.aux) {
1538                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1539                                          aconnector,
1540                                          aconnector->base.base.id);
1541
1542                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1543                         if (ret < 0) {
1544                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1545                                 aconnector->dc_link->type =
1546                                         dc_connection_single;
1547                                 break;
1548                         }
1549                 }
1550         }
1551         drm_connector_list_iter_end(&iter);
1552
1553         return ret;
1554 }
1555
1556 static int dm_late_init(void *handle)
1557 {
1558         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1559
1560         struct dmcu_iram_parameters params;
1561         unsigned int linear_lut[16];
1562         int i;
1563         struct dmcu *dmcu = NULL;
1564         bool ret = true;
1565
1566         dmcu = adev->dm.dc->res_pool->dmcu;
1567
1568         for (i = 0; i < 16; i++)
1569                 linear_lut[i] = 0xFFFF * i / 15;
1570
1571         params.set = 0;
1572         params.backlight_ramping_start = 0xCCCC;
1573         params.backlight_ramping_reduction = 0xCCCCCCCC;
1574         params.backlight_lut_array_size = 16;
1575         params.backlight_lut_array = linear_lut;
1576
1577         /* Min backlight level after ABM reduction,  Don't allow below 1%
1578          * 0xFFFF x 0.01 = 0x28F
1579          */
1580         params.min_abm_backlight = 0x28F;
1581
1582         /* In the case where abm is implemented on dmcub,
1583          * dmcu object will be null.
1584          * ABM 2.4 and up are implemented on dmcub.
1585          */
1586         if (dmcu)
1587                 ret = dmcu_load_iram(dmcu, params);
1588         else if (adev->dm.dc->ctx->dmub_srv)
1589                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1590
1591         if (!ret)
1592                 return -EINVAL;
1593
1594         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1595 }
1596
1597 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1598 {
1599         struct amdgpu_dm_connector *aconnector;
1600         struct drm_connector *connector;
1601         struct drm_connector_list_iter iter;
1602         struct drm_dp_mst_topology_mgr *mgr;
1603         int ret;
1604         bool need_hotplug = false;
1605
1606         drm_connector_list_iter_begin(dev, &iter);
1607         drm_for_each_connector_iter(connector, &iter) {
1608                 aconnector = to_amdgpu_dm_connector(connector);
1609                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1610                     aconnector->mst_port)
1611                         continue;
1612
1613                 mgr = &aconnector->mst_mgr;
1614
1615                 if (suspend) {
1616                         drm_dp_mst_topology_mgr_suspend(mgr);
1617                 } else {
1618                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1619                         if (ret < 0) {
1620                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1621                                 need_hotplug = true;
1622                         }
1623                 }
1624         }
1625         drm_connector_list_iter_end(&iter);
1626
1627         if (need_hotplug)
1628                 drm_kms_helper_hotplug_event(dev);
1629 }
1630
1631 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1632 {
1633         struct smu_context *smu = &adev->smu;
1634         int ret = 0;
1635
1636         if (!is_support_sw_smu(adev))
1637                 return 0;
1638
1639         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1640          * on window driver dc implementation.
1641          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1642          * should be passed to smu during boot up and resume from s3.
1643          * boot up: dc calculate dcn watermark clock settings within dc_create,
1644          * dcn20_resource_construct
1645          * then call pplib functions below to pass the settings to smu:
1646          * smu_set_watermarks_for_clock_ranges
1647          * smu_set_watermarks_table
1648          * navi10_set_watermarks_table
1649          * smu_write_watermarks_table
1650          *
1651          * For Renoir, clock settings of dcn watermark are also fixed values.
1652          * dc has implemented different flow for window driver:
1653          * dc_hardware_init / dc_set_power_state
1654          * dcn10_init_hw
1655          * notify_wm_ranges
1656          * set_wm_ranges
1657          * -- Linux
1658          * smu_set_watermarks_for_clock_ranges
1659          * renoir_set_watermarks_table
1660          * smu_write_watermarks_table
1661          *
1662          * For Linux,
1663          * dc_hardware_init -> amdgpu_dm_init
1664          * dc_set_power_state --> dm_resume
1665          *
1666          * therefore, this function apply to navi10/12/14 but not Renoir
1667          * *
1668          */
1669         switch(adev->asic_type) {
1670         case CHIP_NAVI10:
1671         case CHIP_NAVI14:
1672         case CHIP_NAVI12:
1673                 break;
1674         default:
1675                 return 0;
1676         }
1677
1678         ret = smu_write_watermarks_table(smu);
1679         if (ret) {
1680                 DRM_ERROR("Failed to update WMTABLE!\n");
1681                 return ret;
1682         }
1683
1684         return 0;
1685 }
1686
1687 /**
1688  * dm_hw_init() - Initialize DC device
1689  * @handle: The base driver device containing the amdgpu_dm device.
1690  *
1691  * Initialize the &struct amdgpu_display_manager device. This involves calling
1692  * the initializers of each DM component, then populating the struct with them.
1693  *
1694  * Although the function implies hardware initialization, both hardware and
1695  * software are initialized here. Splitting them out to their relevant init
1696  * hooks is a future TODO item.
1697  *
1698  * Some notable things that are initialized here:
1699  *
1700  * - Display Core, both software and hardware
1701  * - DC modules that we need (freesync and color management)
1702  * - DRM software states
1703  * - Interrupt sources and handlers
1704  * - Vblank support
1705  * - Debug FS entries, if enabled
1706  */
1707 static int dm_hw_init(void *handle)
1708 {
1709         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710         /* Create DAL display manager */
1711         amdgpu_dm_init(adev);
1712         amdgpu_dm_hpd_init(adev);
1713
1714         return 0;
1715 }
1716
1717 /**
1718  * dm_hw_fini() - Teardown DC device
1719  * @handle: The base driver device containing the amdgpu_dm device.
1720  *
1721  * Teardown components within &struct amdgpu_display_manager that require
1722  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1723  * were loaded. Also flush IRQ workqueues and disable them.
1724  */
1725 static int dm_hw_fini(void *handle)
1726 {
1727         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1728
1729         amdgpu_dm_hpd_fini(adev);
1730
1731         amdgpu_dm_irq_fini(adev);
1732         amdgpu_dm_fini(adev);
1733         return 0;
1734 }
1735
1736
1737 static int dm_enable_vblank(struct drm_crtc *crtc);
1738 static void dm_disable_vblank(struct drm_crtc *crtc);
1739
1740 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1741                                  struct dc_state *state, bool enable)
1742 {
1743         enum dc_irq_source irq_source;
1744         struct amdgpu_crtc *acrtc;
1745         int rc = -EBUSY;
1746         int i = 0;
1747
1748         for (i = 0; i < state->stream_count; i++) {
1749                 acrtc = get_crtc_by_otg_inst(
1750                                 adev, state->stream_status[i].primary_otg_inst);
1751
1752                 if (acrtc && state->stream_status[i].plane_count != 0) {
1753                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1754                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1755                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1756                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1757                         if (rc)
1758                                 DRM_WARN("Failed to %s pflip interrupts\n",
1759                                          enable ? "enable" : "disable");
1760
1761                         if (enable) {
1762                                 rc = dm_enable_vblank(&acrtc->base);
1763                                 if (rc)
1764                                         DRM_WARN("Failed to enable vblank interrupts\n");
1765                         } else {
1766                                 dm_disable_vblank(&acrtc->base);
1767                         }
1768
1769                 }
1770         }
1771
1772 }
1773
1774 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1775 {
1776         struct dc_state *context = NULL;
1777         enum dc_status res = DC_ERROR_UNEXPECTED;
1778         int i;
1779         struct dc_stream_state *del_streams[MAX_PIPES];
1780         int del_streams_count = 0;
1781
1782         memset(del_streams, 0, sizeof(del_streams));
1783
1784         context = dc_create_state(dc);
1785         if (context == NULL)
1786                 goto context_alloc_fail;
1787
1788         dc_resource_state_copy_construct_current(dc, context);
1789
1790         /* First remove from context all streams */
1791         for (i = 0; i < context->stream_count; i++) {
1792                 struct dc_stream_state *stream = context->streams[i];
1793
1794                 del_streams[del_streams_count++] = stream;
1795         }
1796
1797         /* Remove all planes for removed streams and then remove the streams */
1798         for (i = 0; i < del_streams_count; i++) {
1799                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1800                         res = DC_FAIL_DETACH_SURFACES;
1801                         goto fail;
1802                 }
1803
1804                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1805                 if (res != DC_OK)
1806                         goto fail;
1807         }
1808
1809
1810         res = dc_validate_global_state(dc, context, false);
1811
1812         if (res != DC_OK) {
1813                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1814                 goto fail;
1815         }
1816
1817         res = dc_commit_state(dc, context);
1818
1819 fail:
1820         dc_release_state(context);
1821
1822 context_alloc_fail:
1823         return res;
1824 }
1825
1826 static int dm_suspend(void *handle)
1827 {
1828         struct amdgpu_device *adev = handle;
1829         struct amdgpu_display_manager *dm = &adev->dm;
1830         int ret = 0;
1831
1832         if (amdgpu_in_reset(adev)) {
1833                 mutex_lock(&dm->dc_lock);
1834
1835 #if defined(CONFIG_DRM_AMD_DC_DCN)
1836                 dc_allow_idle_optimizations(adev->dm.dc, false);
1837 #endif
1838
1839                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1840
1841                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1842
1843                 amdgpu_dm_commit_zero_streams(dm->dc);
1844
1845                 amdgpu_dm_irq_suspend(adev);
1846
1847                 return ret;
1848         }
1849
1850         WARN_ON(adev->dm.cached_state);
1851         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1852
1853         s3_handle_mst(adev_to_drm(adev), true);
1854
1855         amdgpu_dm_irq_suspend(adev);
1856
1857
1858         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1859
1860         return 0;
1861 }
1862
1863 static struct amdgpu_dm_connector *
1864 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1865                                              struct drm_crtc *crtc)
1866 {
1867         uint32_t i;
1868         struct drm_connector_state *new_con_state;
1869         struct drm_connector *connector;
1870         struct drm_crtc *crtc_from_state;
1871
1872         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1873                 crtc_from_state = new_con_state->crtc;
1874
1875                 if (crtc_from_state == crtc)
1876                         return to_amdgpu_dm_connector(connector);
1877         }
1878
1879         return NULL;
1880 }
1881
1882 static void emulated_link_detect(struct dc_link *link)
1883 {
1884         struct dc_sink_init_data sink_init_data = { 0 };
1885         struct display_sink_capability sink_caps = { 0 };
1886         enum dc_edid_status edid_status;
1887         struct dc_context *dc_ctx = link->ctx;
1888         struct dc_sink *sink = NULL;
1889         struct dc_sink *prev_sink = NULL;
1890
1891         link->type = dc_connection_none;
1892         prev_sink = link->local_sink;
1893
1894         if (prev_sink)
1895                 dc_sink_release(prev_sink);
1896
1897         switch (link->connector_signal) {
1898         case SIGNAL_TYPE_HDMI_TYPE_A: {
1899                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1900                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1901                 break;
1902         }
1903
1904         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1905                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1906                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1907                 break;
1908         }
1909
1910         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1911                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1912                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1913                 break;
1914         }
1915
1916         case SIGNAL_TYPE_LVDS: {
1917                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1918                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1919                 break;
1920         }
1921
1922         case SIGNAL_TYPE_EDP: {
1923                 sink_caps.transaction_type =
1924                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1925                 sink_caps.signal = SIGNAL_TYPE_EDP;
1926                 break;
1927         }
1928
1929         case SIGNAL_TYPE_DISPLAY_PORT: {
1930                 sink_caps.transaction_type =
1931                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1932                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1933                 break;
1934         }
1935
1936         default:
1937                 DC_ERROR("Invalid connector type! signal:%d\n",
1938                         link->connector_signal);
1939                 return;
1940         }
1941
1942         sink_init_data.link = link;
1943         sink_init_data.sink_signal = sink_caps.signal;
1944
1945         sink = dc_sink_create(&sink_init_data);
1946         if (!sink) {
1947                 DC_ERROR("Failed to create sink!\n");
1948                 return;
1949         }
1950
1951         /* dc_sink_create returns a new reference */
1952         link->local_sink = sink;
1953
1954         edid_status = dm_helpers_read_local_edid(
1955                         link->ctx,
1956                         link,
1957                         sink);
1958
1959         if (edid_status != EDID_OK)
1960                 DC_ERROR("Failed to read EDID");
1961
1962 }
1963
1964 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1965                                      struct amdgpu_display_manager *dm)
1966 {
1967         struct {
1968                 struct dc_surface_update surface_updates[MAX_SURFACES];
1969                 struct dc_plane_info plane_infos[MAX_SURFACES];
1970                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1971                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1972                 struct dc_stream_update stream_update;
1973         } * bundle;
1974         int k, m;
1975
1976         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1977
1978         if (!bundle) {
1979                 dm_error("Failed to allocate update bundle\n");
1980                 goto cleanup;
1981         }
1982
1983         for (k = 0; k < dc_state->stream_count; k++) {
1984                 bundle->stream_update.stream = dc_state->streams[k];
1985
1986                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1987                         bundle->surface_updates[m].surface =
1988                                 dc_state->stream_status->plane_states[m];
1989                         bundle->surface_updates[m].surface->force_full_update =
1990                                 true;
1991                 }
1992                 dc_commit_updates_for_stream(
1993                         dm->dc, bundle->surface_updates,
1994                         dc_state->stream_status->plane_count,
1995                         dc_state->streams[k], &bundle->stream_update, dc_state);
1996         }
1997
1998 cleanup:
1999         kfree(bundle);
2000
2001         return;
2002 }
2003
2004 static void dm_set_dpms_off(struct dc_link *link)
2005 {
2006         struct dc_stream_state *stream_state;
2007         struct amdgpu_dm_connector *aconnector = link->priv;
2008         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2009         struct dc_stream_update stream_update;
2010         bool dpms_off = true;
2011
2012         memset(&stream_update, 0, sizeof(stream_update));
2013         stream_update.dpms_off = &dpms_off;
2014
2015         mutex_lock(&adev->dm.dc_lock);
2016         stream_state = dc_stream_find_from_link(link);
2017
2018         if (stream_state == NULL) {
2019                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2020                 mutex_unlock(&adev->dm.dc_lock);
2021                 return;
2022         }
2023
2024         stream_update.stream = stream_state;
2025         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2026                                      stream_state, &stream_update,
2027                                      stream_state->ctx->dc->current_state);
2028         mutex_unlock(&adev->dm.dc_lock);
2029 }
2030
2031 static int dm_resume(void *handle)
2032 {
2033         struct amdgpu_device *adev = handle;
2034         struct drm_device *ddev = adev_to_drm(adev);
2035         struct amdgpu_display_manager *dm = &adev->dm;
2036         struct amdgpu_dm_connector *aconnector;
2037         struct drm_connector *connector;
2038         struct drm_connector_list_iter iter;
2039         struct drm_crtc *crtc;
2040         struct drm_crtc_state *new_crtc_state;
2041         struct dm_crtc_state *dm_new_crtc_state;
2042         struct drm_plane *plane;
2043         struct drm_plane_state *new_plane_state;
2044         struct dm_plane_state *dm_new_plane_state;
2045         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2046         enum dc_connection_type new_connection_type = dc_connection_none;
2047         struct dc_state *dc_state;
2048         int i, r, j;
2049
2050         if (amdgpu_in_reset(adev)) {
2051                 dc_state = dm->cached_dc_state;
2052
2053                 r = dm_dmub_hw_init(adev);
2054                 if (r)
2055                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2056
2057                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2058                 dc_resume(dm->dc);
2059
2060                 amdgpu_dm_irq_resume_early(adev);
2061
2062                 for (i = 0; i < dc_state->stream_count; i++) {
2063                         dc_state->streams[i]->mode_changed = true;
2064                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2065                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2066                                         = 0xffffffff;
2067                         }
2068                 }
2069
2070                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2071
2072                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2073
2074                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2075
2076                 dc_release_state(dm->cached_dc_state);
2077                 dm->cached_dc_state = NULL;
2078
2079                 amdgpu_dm_irq_resume_late(adev);
2080
2081                 mutex_unlock(&dm->dc_lock);
2082
2083                 return 0;
2084         }
2085         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2086         dc_release_state(dm_state->context);
2087         dm_state->context = dc_create_state(dm->dc);
2088         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2089         dc_resource_state_construct(dm->dc, dm_state->context);
2090
2091         /* Before powering on DC we need to re-initialize DMUB. */
2092         r = dm_dmub_hw_init(adev);
2093         if (r)
2094                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2095
2096         /* power on hardware */
2097         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2098
2099         /* program HPD filter */
2100         dc_resume(dm->dc);
2101
2102         /*
2103          * early enable HPD Rx IRQ, should be done before set mode as short
2104          * pulse interrupts are used for MST
2105          */
2106         amdgpu_dm_irq_resume_early(adev);
2107
2108         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2109         s3_handle_mst(ddev, false);
2110
2111         /* Do detection*/
2112         drm_connector_list_iter_begin(ddev, &iter);
2113         drm_for_each_connector_iter(connector, &iter) {
2114                 aconnector = to_amdgpu_dm_connector(connector);
2115
2116                 /*
2117                  * this is the case when traversing through already created
2118                  * MST connectors, should be skipped
2119                  */
2120                 if (aconnector->mst_port)
2121                         continue;
2122
2123                 mutex_lock(&aconnector->hpd_lock);
2124                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2125                         DRM_ERROR("KMS: Failed to detect connector\n");
2126
2127                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2128                         emulated_link_detect(aconnector->dc_link);
2129                 else
2130                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2131
2132                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2133                         aconnector->fake_enable = false;
2134
2135                 if (aconnector->dc_sink)
2136                         dc_sink_release(aconnector->dc_sink);
2137                 aconnector->dc_sink = NULL;
2138                 amdgpu_dm_update_connector_after_detect(aconnector);
2139                 mutex_unlock(&aconnector->hpd_lock);
2140         }
2141         drm_connector_list_iter_end(&iter);
2142
2143         /* Force mode set in atomic commit */
2144         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2145                 new_crtc_state->active_changed = true;
2146
2147         /*
2148          * atomic_check is expected to create the dc states. We need to release
2149          * them here, since they were duplicated as part of the suspend
2150          * procedure.
2151          */
2152         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2153                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2154                 if (dm_new_crtc_state->stream) {
2155                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2156                         dc_stream_release(dm_new_crtc_state->stream);
2157                         dm_new_crtc_state->stream = NULL;
2158                 }
2159         }
2160
2161         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2162                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2163                 if (dm_new_plane_state->dc_state) {
2164                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2165                         dc_plane_state_release(dm_new_plane_state->dc_state);
2166                         dm_new_plane_state->dc_state = NULL;
2167                 }
2168         }
2169
2170         drm_atomic_helper_resume(ddev, dm->cached_state);
2171
2172         dm->cached_state = NULL;
2173
2174         amdgpu_dm_irq_resume_late(adev);
2175
2176         amdgpu_dm_smu_write_watermarks_table(adev);
2177
2178         return 0;
2179 }
2180
2181 /**
2182  * DOC: DM Lifecycle
2183  *
2184  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2185  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2186  * the base driver's device list to be initialized and torn down accordingly.
2187  *
2188  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2189  */
2190
2191 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2192         .name = "dm",
2193         .early_init = dm_early_init,
2194         .late_init = dm_late_init,
2195         .sw_init = dm_sw_init,
2196         .sw_fini = dm_sw_fini,
2197         .hw_init = dm_hw_init,
2198         .hw_fini = dm_hw_fini,
2199         .suspend = dm_suspend,
2200         .resume = dm_resume,
2201         .is_idle = dm_is_idle,
2202         .wait_for_idle = dm_wait_for_idle,
2203         .check_soft_reset = dm_check_soft_reset,
2204         .soft_reset = dm_soft_reset,
2205         .set_clockgating_state = dm_set_clockgating_state,
2206         .set_powergating_state = dm_set_powergating_state,
2207 };
2208
2209 const struct amdgpu_ip_block_version dm_ip_block =
2210 {
2211         .type = AMD_IP_BLOCK_TYPE_DCE,
2212         .major = 1,
2213         .minor = 0,
2214         .rev = 0,
2215         .funcs = &amdgpu_dm_funcs,
2216 };
2217
2218
2219 /**
2220  * DOC: atomic
2221  *
2222  * *WIP*
2223  */
2224
2225 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2226         .fb_create = amdgpu_display_user_framebuffer_create,
2227         .get_format_info = amd_get_format_info,
2228         .output_poll_changed = drm_fb_helper_output_poll_changed,
2229         .atomic_check = amdgpu_dm_atomic_check,
2230         .atomic_commit = drm_atomic_helper_commit,
2231 };
2232
2233 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2234         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2235 };
2236
2237 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2238 {
2239         u32 max_cll, min_cll, max, min, q, r;
2240         struct amdgpu_dm_backlight_caps *caps;
2241         struct amdgpu_display_manager *dm;
2242         struct drm_connector *conn_base;
2243         struct amdgpu_device *adev;
2244         struct dc_link *link = NULL;
2245         static const u8 pre_computed_values[] = {
2246                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2247                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2248
2249         if (!aconnector || !aconnector->dc_link)
2250                 return;
2251
2252         link = aconnector->dc_link;
2253         if (link->connector_signal != SIGNAL_TYPE_EDP)
2254                 return;
2255
2256         conn_base = &aconnector->base;
2257         adev = drm_to_adev(conn_base->dev);
2258         dm = &adev->dm;
2259         caps = &dm->backlight_caps;
2260         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2261         caps->aux_support = false;
2262         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2263         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2264
2265         if (caps->ext_caps->bits.oled == 1 ||
2266             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2267             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2268                 caps->aux_support = true;
2269
2270         /* From the specification (CTA-861-G), for calculating the maximum
2271          * luminance we need to use:
2272          *      Luminance = 50*2**(CV/32)
2273          * Where CV is a one-byte value.
2274          * For calculating this expression we may need float point precision;
2275          * to avoid this complexity level, we take advantage that CV is divided
2276          * by a constant. From the Euclids division algorithm, we know that CV
2277          * can be written as: CV = 32*q + r. Next, we replace CV in the
2278          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2279          * need to pre-compute the value of r/32. For pre-computing the values
2280          * We just used the following Ruby line:
2281          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2282          * The results of the above expressions can be verified at
2283          * pre_computed_values.
2284          */
2285         q = max_cll >> 5;
2286         r = max_cll % 32;
2287         max = (1 << q) * pre_computed_values[r];
2288
2289         // min luminance: maxLum * (CV/255)^2 / 100
2290         q = DIV_ROUND_CLOSEST(min_cll, 255);
2291         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2292
2293         caps->aux_max_input_signal = max;
2294         caps->aux_min_input_signal = min;
2295 }
2296
2297 void amdgpu_dm_update_connector_after_detect(
2298                 struct amdgpu_dm_connector *aconnector)
2299 {
2300         struct drm_connector *connector = &aconnector->base;
2301         struct drm_device *dev = connector->dev;
2302         struct dc_sink *sink;
2303
2304         /* MST handled by drm_mst framework */
2305         if (aconnector->mst_mgr.mst_state == true)
2306                 return;
2307
2308         sink = aconnector->dc_link->local_sink;
2309         if (sink)
2310                 dc_sink_retain(sink);
2311
2312         /*
2313          * Edid mgmt connector gets first update only in mode_valid hook and then
2314          * the connector sink is set to either fake or physical sink depends on link status.
2315          * Skip if already done during boot.
2316          */
2317         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2318                         && aconnector->dc_em_sink) {
2319
2320                 /*
2321                  * For S3 resume with headless use eml_sink to fake stream
2322                  * because on resume connector->sink is set to NULL
2323                  */
2324                 mutex_lock(&dev->mode_config.mutex);
2325
2326                 if (sink) {
2327                         if (aconnector->dc_sink) {
2328                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2329                                 /*
2330                                  * retain and release below are used to
2331                                  * bump up refcount for sink because the link doesn't point
2332                                  * to it anymore after disconnect, so on next crtc to connector
2333                                  * reshuffle by UMD we will get into unwanted dc_sink release
2334                                  */
2335                                 dc_sink_release(aconnector->dc_sink);
2336                         }
2337                         aconnector->dc_sink = sink;
2338                         dc_sink_retain(aconnector->dc_sink);
2339                         amdgpu_dm_update_freesync_caps(connector,
2340                                         aconnector->edid);
2341                 } else {
2342                         amdgpu_dm_update_freesync_caps(connector, NULL);
2343                         if (!aconnector->dc_sink) {
2344                                 aconnector->dc_sink = aconnector->dc_em_sink;
2345                                 dc_sink_retain(aconnector->dc_sink);
2346                         }
2347                 }
2348
2349                 mutex_unlock(&dev->mode_config.mutex);
2350
2351                 if (sink)
2352                         dc_sink_release(sink);
2353                 return;
2354         }
2355
2356         /*
2357          * TODO: temporary guard to look for proper fix
2358          * if this sink is MST sink, we should not do anything
2359          */
2360         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2361                 dc_sink_release(sink);
2362                 return;
2363         }
2364
2365         if (aconnector->dc_sink == sink) {
2366                 /*
2367                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2368                  * Do nothing!!
2369                  */
2370                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2371                                 aconnector->connector_id);
2372                 if (sink)
2373                         dc_sink_release(sink);
2374                 return;
2375         }
2376
2377         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2378                 aconnector->connector_id, aconnector->dc_sink, sink);
2379
2380         mutex_lock(&dev->mode_config.mutex);
2381
2382         /*
2383          * 1. Update status of the drm connector
2384          * 2. Send an event and let userspace tell us what to do
2385          */
2386         if (sink) {
2387                 /*
2388                  * TODO: check if we still need the S3 mode update workaround.
2389                  * If yes, put it here.
2390                  */
2391                 if (aconnector->dc_sink) {
2392                         amdgpu_dm_update_freesync_caps(connector, NULL);
2393                         dc_sink_release(aconnector->dc_sink);
2394                 }
2395
2396                 aconnector->dc_sink = sink;
2397                 dc_sink_retain(aconnector->dc_sink);
2398                 if (sink->dc_edid.length == 0) {
2399                         aconnector->edid = NULL;
2400                         if (aconnector->dc_link->aux_mode) {
2401                                 drm_dp_cec_unset_edid(
2402                                         &aconnector->dm_dp_aux.aux);
2403                         }
2404                 } else {
2405                         aconnector->edid =
2406                                 (struct edid *)sink->dc_edid.raw_edid;
2407
2408                         drm_connector_update_edid_property(connector,
2409                                                            aconnector->edid);
2410                         if (aconnector->dc_link->aux_mode)
2411                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2412                                                     aconnector->edid);
2413                 }
2414
2415                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2416                 update_connector_ext_caps(aconnector);
2417         } else {
2418                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2419                 amdgpu_dm_update_freesync_caps(connector, NULL);
2420                 drm_connector_update_edid_property(connector, NULL);
2421                 aconnector->num_modes = 0;
2422                 dc_sink_release(aconnector->dc_sink);
2423                 aconnector->dc_sink = NULL;
2424                 aconnector->edid = NULL;
2425 #ifdef CONFIG_DRM_AMD_DC_HDCP
2426                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2427                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2428                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2429 #endif
2430         }
2431
2432         mutex_unlock(&dev->mode_config.mutex);
2433
2434         update_subconnector_property(aconnector);
2435
2436         if (sink)
2437                 dc_sink_release(sink);
2438 }
2439
2440 static void handle_hpd_irq(void *param)
2441 {
2442         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2443         struct drm_connector *connector = &aconnector->base;
2444         struct drm_device *dev = connector->dev;
2445         enum dc_connection_type new_connection_type = dc_connection_none;
2446 #ifdef CONFIG_DRM_AMD_DC_HDCP
2447         struct amdgpu_device *adev = drm_to_adev(dev);
2448         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2449 #endif
2450
2451         /*
2452          * In case of failure or MST no need to update connector status or notify the OS
2453          * since (for MST case) MST does this in its own context.
2454          */
2455         mutex_lock(&aconnector->hpd_lock);
2456
2457 #ifdef CONFIG_DRM_AMD_DC_HDCP
2458         if (adev->dm.hdcp_workqueue) {
2459                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2460                 dm_con_state->update_hdcp = true;
2461         }
2462 #endif
2463         if (aconnector->fake_enable)
2464                 aconnector->fake_enable = false;
2465
2466         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2467                 DRM_ERROR("KMS: Failed to detect connector\n");
2468
2469         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2470                 emulated_link_detect(aconnector->dc_link);
2471
2472
2473                 drm_modeset_lock_all(dev);
2474                 dm_restore_drm_connector_state(dev, connector);
2475                 drm_modeset_unlock_all(dev);
2476
2477                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2478                         drm_kms_helper_hotplug_event(dev);
2479
2480         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2481                 if (new_connection_type == dc_connection_none &&
2482                     aconnector->dc_link->type == dc_connection_none)
2483                         dm_set_dpms_off(aconnector->dc_link);
2484
2485                 amdgpu_dm_update_connector_after_detect(aconnector);
2486
2487                 drm_modeset_lock_all(dev);
2488                 dm_restore_drm_connector_state(dev, connector);
2489                 drm_modeset_unlock_all(dev);
2490
2491                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2492                         drm_kms_helper_hotplug_event(dev);
2493         }
2494         mutex_unlock(&aconnector->hpd_lock);
2495
2496 }
2497
2498 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2499 {
2500         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2501         uint8_t dret;
2502         bool new_irq_handled = false;
2503         int dpcd_addr;
2504         int dpcd_bytes_to_read;
2505
2506         const int max_process_count = 30;
2507         int process_count = 0;
2508
2509         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2510
2511         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2512                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2513                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2514                 dpcd_addr = DP_SINK_COUNT;
2515         } else {
2516                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2517                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2518                 dpcd_addr = DP_SINK_COUNT_ESI;
2519         }
2520
2521         dret = drm_dp_dpcd_read(
2522                 &aconnector->dm_dp_aux.aux,
2523                 dpcd_addr,
2524                 esi,
2525                 dpcd_bytes_to_read);
2526
2527         while (dret == dpcd_bytes_to_read &&
2528                 process_count < max_process_count) {
2529                 uint8_t retry;
2530                 dret = 0;
2531
2532                 process_count++;
2533
2534                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2535                 /* handle HPD short pulse irq */
2536                 if (aconnector->mst_mgr.mst_state)
2537                         drm_dp_mst_hpd_irq(
2538                                 &aconnector->mst_mgr,
2539                                 esi,
2540                                 &new_irq_handled);
2541
2542                 if (new_irq_handled) {
2543                         /* ACK at DPCD to notify down stream */
2544                         const int ack_dpcd_bytes_to_write =
2545                                 dpcd_bytes_to_read - 1;
2546
2547                         for (retry = 0; retry < 3; retry++) {
2548                                 uint8_t wret;
2549
2550                                 wret = drm_dp_dpcd_write(
2551                                         &aconnector->dm_dp_aux.aux,
2552                                         dpcd_addr + 1,
2553                                         &esi[1],
2554                                         ack_dpcd_bytes_to_write);
2555                                 if (wret == ack_dpcd_bytes_to_write)
2556                                         break;
2557                         }
2558
2559                         /* check if there is new irq to be handled */
2560                         dret = drm_dp_dpcd_read(
2561                                 &aconnector->dm_dp_aux.aux,
2562                                 dpcd_addr,
2563                                 esi,
2564                                 dpcd_bytes_to_read);
2565
2566                         new_irq_handled = false;
2567                 } else {
2568                         break;
2569                 }
2570         }
2571
2572         if (process_count == max_process_count)
2573                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2574 }
2575
2576 static void handle_hpd_rx_irq(void *param)
2577 {
2578         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2579         struct drm_connector *connector = &aconnector->base;
2580         struct drm_device *dev = connector->dev;
2581         struct dc_link *dc_link = aconnector->dc_link;
2582         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2583         bool result = false;
2584         enum dc_connection_type new_connection_type = dc_connection_none;
2585         struct amdgpu_device *adev = drm_to_adev(dev);
2586         union hpd_irq_data hpd_irq_data;
2587
2588         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2589
2590         /*
2591          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2592          * conflict, after implement i2c helper, this mutex should be
2593          * retired.
2594          */
2595         if (dc_link->type != dc_connection_mst_branch)
2596                 mutex_lock(&aconnector->hpd_lock);
2597
2598         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2599
2600         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2601                 (dc_link->type == dc_connection_mst_branch)) {
2602                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2603                         result = true;
2604                         dm_handle_hpd_rx_irq(aconnector);
2605                         goto out;
2606                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2607                         result = false;
2608                         dm_handle_hpd_rx_irq(aconnector);
2609                         goto out;
2610                 }
2611         }
2612
2613         mutex_lock(&adev->dm.dc_lock);
2614 #ifdef CONFIG_DRM_AMD_DC_HDCP
2615         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2616 #else
2617         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2618 #endif
2619         mutex_unlock(&adev->dm.dc_lock);
2620
2621 out:
2622         if (result && !is_mst_root_connector) {
2623                 /* Downstream Port status changed. */
2624                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2625                         DRM_ERROR("KMS: Failed to detect connector\n");
2626
2627                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2628                         emulated_link_detect(dc_link);
2629
2630                         if (aconnector->fake_enable)
2631                                 aconnector->fake_enable = false;
2632
2633                         amdgpu_dm_update_connector_after_detect(aconnector);
2634
2635
2636                         drm_modeset_lock_all(dev);
2637                         dm_restore_drm_connector_state(dev, connector);
2638                         drm_modeset_unlock_all(dev);
2639
2640                         drm_kms_helper_hotplug_event(dev);
2641                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2642
2643                         if (aconnector->fake_enable)
2644                                 aconnector->fake_enable = false;
2645
2646                         amdgpu_dm_update_connector_after_detect(aconnector);
2647
2648
2649                         drm_modeset_lock_all(dev);
2650                         dm_restore_drm_connector_state(dev, connector);
2651                         drm_modeset_unlock_all(dev);
2652
2653                         drm_kms_helper_hotplug_event(dev);
2654                 }
2655         }
2656 #ifdef CONFIG_DRM_AMD_DC_HDCP
2657         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2658                 if (adev->dm.hdcp_workqueue)
2659                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2660         }
2661 #endif
2662
2663         if (dc_link->type != dc_connection_mst_branch) {
2664                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2665                 mutex_unlock(&aconnector->hpd_lock);
2666         }
2667 }
2668
2669 static void register_hpd_handlers(struct amdgpu_device *adev)
2670 {
2671         struct drm_device *dev = adev_to_drm(adev);
2672         struct drm_connector *connector;
2673         struct amdgpu_dm_connector *aconnector;
2674         const struct dc_link *dc_link;
2675         struct dc_interrupt_params int_params = {0};
2676
2677         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2678         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2679
2680         list_for_each_entry(connector,
2681                         &dev->mode_config.connector_list, head) {
2682
2683                 aconnector = to_amdgpu_dm_connector(connector);
2684                 dc_link = aconnector->dc_link;
2685
2686                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2687                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2688                         int_params.irq_source = dc_link->irq_source_hpd;
2689
2690                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2691                                         handle_hpd_irq,
2692                                         (void *) aconnector);
2693                 }
2694
2695                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2696
2697                         /* Also register for DP short pulse (hpd_rx). */
2698                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2699                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2700
2701                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2702                                         handle_hpd_rx_irq,
2703                                         (void *) aconnector);
2704                 }
2705         }
2706 }
2707
2708 #if defined(CONFIG_DRM_AMD_DC_SI)
2709 /* Register IRQ sources and initialize IRQ callbacks */
2710 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2711 {
2712         struct dc *dc = adev->dm.dc;
2713         struct common_irq_params *c_irq_params;
2714         struct dc_interrupt_params int_params = {0};
2715         int r;
2716         int i;
2717         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2718
2719         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2720         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2721
2722         /*
2723          * Actions of amdgpu_irq_add_id():
2724          * 1. Register a set() function with base driver.
2725          *    Base driver will call set() function to enable/disable an
2726          *    interrupt in DC hardware.
2727          * 2. Register amdgpu_dm_irq_handler().
2728          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2729          *    coming from DC hardware.
2730          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2731          *    for acknowledging and handling. */
2732
2733         /* Use VBLANK interrupt */
2734         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2735                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2736                 if (r) {
2737                         DRM_ERROR("Failed to add crtc irq id!\n");
2738                         return r;
2739                 }
2740
2741                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2742                 int_params.irq_source =
2743                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2744
2745                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2746
2747                 c_irq_params->adev = adev;
2748                 c_irq_params->irq_src = int_params.irq_source;
2749
2750                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2751                                 dm_crtc_high_irq, c_irq_params);
2752         }
2753
2754         /* Use GRPH_PFLIP interrupt */
2755         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2756                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2757                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2758                 if (r) {
2759                         DRM_ERROR("Failed to add page flip irq id!\n");
2760                         return r;
2761                 }
2762
2763                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2764                 int_params.irq_source =
2765                         dc_interrupt_to_irq_source(dc, i, 0);
2766
2767                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2768
2769                 c_irq_params->adev = adev;
2770                 c_irq_params->irq_src = int_params.irq_source;
2771
2772                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2773                                 dm_pflip_high_irq, c_irq_params);
2774
2775         }
2776
2777         /* HPD */
2778         r = amdgpu_irq_add_id(adev, client_id,
2779                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2780         if (r) {
2781                 DRM_ERROR("Failed to add hpd irq id!\n");
2782                 return r;
2783         }
2784
2785         register_hpd_handlers(adev);
2786
2787         return 0;
2788 }
2789 #endif
2790
2791 /* Register IRQ sources and initialize IRQ callbacks */
2792 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2793 {
2794         struct dc *dc = adev->dm.dc;
2795         struct common_irq_params *c_irq_params;
2796         struct dc_interrupt_params int_params = {0};
2797         int r;
2798         int i;
2799         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2800
2801         if (adev->asic_type >= CHIP_VEGA10)
2802                 client_id = SOC15_IH_CLIENTID_DCE;
2803
2804         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2805         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2806
2807         /*
2808          * Actions of amdgpu_irq_add_id():
2809          * 1. Register a set() function with base driver.
2810          *    Base driver will call set() function to enable/disable an
2811          *    interrupt in DC hardware.
2812          * 2. Register amdgpu_dm_irq_handler().
2813          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2814          *    coming from DC hardware.
2815          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2816          *    for acknowledging and handling. */
2817
2818         /* Use VBLANK interrupt */
2819         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2820                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2821                 if (r) {
2822                         DRM_ERROR("Failed to add crtc irq id!\n");
2823                         return r;
2824                 }
2825
2826                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2827                 int_params.irq_source =
2828                         dc_interrupt_to_irq_source(dc, i, 0);
2829
2830                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2831
2832                 c_irq_params->adev = adev;
2833                 c_irq_params->irq_src = int_params.irq_source;
2834
2835                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2836                                 dm_crtc_high_irq, c_irq_params);
2837         }
2838
2839         /* Use VUPDATE interrupt */
2840         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2841                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2842                 if (r) {
2843                         DRM_ERROR("Failed to add vupdate irq id!\n");
2844                         return r;
2845                 }
2846
2847                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2848                 int_params.irq_source =
2849                         dc_interrupt_to_irq_source(dc, i, 0);
2850
2851                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2852
2853                 c_irq_params->adev = adev;
2854                 c_irq_params->irq_src = int_params.irq_source;
2855
2856                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2857                                 dm_vupdate_high_irq, c_irq_params);
2858         }
2859
2860         /* Use GRPH_PFLIP interrupt */
2861         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2862                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2863                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2864                 if (r) {
2865                         DRM_ERROR("Failed to add page flip irq id!\n");
2866                         return r;
2867                 }
2868
2869                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2870                 int_params.irq_source =
2871                         dc_interrupt_to_irq_source(dc, i, 0);
2872
2873                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2874
2875                 c_irq_params->adev = adev;
2876                 c_irq_params->irq_src = int_params.irq_source;
2877
2878                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2879                                 dm_pflip_high_irq, c_irq_params);
2880
2881         }
2882
2883         /* HPD */
2884         r = amdgpu_irq_add_id(adev, client_id,
2885                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2886         if (r) {
2887                 DRM_ERROR("Failed to add hpd irq id!\n");
2888                 return r;
2889         }
2890
2891         register_hpd_handlers(adev);
2892
2893         return 0;
2894 }
2895
2896 #if defined(CONFIG_DRM_AMD_DC_DCN)
2897 /* Register IRQ sources and initialize IRQ callbacks */
2898 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2899 {
2900         struct dc *dc = adev->dm.dc;
2901         struct common_irq_params *c_irq_params;
2902         struct dc_interrupt_params int_params = {0};
2903         int r;
2904         int i;
2905
2906         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2907         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2908
2909         /*
2910          * Actions of amdgpu_irq_add_id():
2911          * 1. Register a set() function with base driver.
2912          *    Base driver will call set() function to enable/disable an
2913          *    interrupt in DC hardware.
2914          * 2. Register amdgpu_dm_irq_handler().
2915          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2916          *    coming from DC hardware.
2917          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2918          *    for acknowledging and handling.
2919          */
2920
2921         /* Use VSTARTUP interrupt */
2922         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2923                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2924                         i++) {
2925                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2926
2927                 if (r) {
2928                         DRM_ERROR("Failed to add crtc irq id!\n");
2929                         return r;
2930                 }
2931
2932                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2933                 int_params.irq_source =
2934                         dc_interrupt_to_irq_source(dc, i, 0);
2935
2936                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2937
2938                 c_irq_params->adev = adev;
2939                 c_irq_params->irq_src = int_params.irq_source;
2940
2941                 amdgpu_dm_irq_register_interrupt(
2942                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2943         }
2944
2945         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2946          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2947          * to trigger at end of each vblank, regardless of state of the lock,
2948          * matching DCE behaviour.
2949          */
2950         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2951              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2952              i++) {
2953                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2954
2955                 if (r) {
2956                         DRM_ERROR("Failed to add vupdate irq id!\n");
2957                         return r;
2958                 }
2959
2960                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2961                 int_params.irq_source =
2962                         dc_interrupt_to_irq_source(dc, i, 0);
2963
2964                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2965
2966                 c_irq_params->adev = adev;
2967                 c_irq_params->irq_src = int_params.irq_source;
2968
2969                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2970                                 dm_vupdate_high_irq, c_irq_params);
2971         }
2972
2973         /* Use GRPH_PFLIP interrupt */
2974         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2975                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2976                         i++) {
2977                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2978                 if (r) {
2979                         DRM_ERROR("Failed to add page flip irq id!\n");
2980                         return r;
2981                 }
2982
2983                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2984                 int_params.irq_source =
2985                         dc_interrupt_to_irq_source(dc, i, 0);
2986
2987                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2988
2989                 c_irq_params->adev = adev;
2990                 c_irq_params->irq_src = int_params.irq_source;
2991
2992                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2993                                 dm_pflip_high_irq, c_irq_params);
2994
2995         }
2996
2997         /* HPD */
2998         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2999                         &adev->hpd_irq);
3000         if (r) {
3001                 DRM_ERROR("Failed to add hpd irq id!\n");
3002                 return r;
3003         }
3004
3005         register_hpd_handlers(adev);
3006
3007         return 0;
3008 }
3009 #endif
3010
3011 /*
3012  * Acquires the lock for the atomic state object and returns
3013  * the new atomic state.
3014  *
3015  * This should only be called during atomic check.
3016  */
3017 static int dm_atomic_get_state(struct drm_atomic_state *state,
3018                                struct dm_atomic_state **dm_state)
3019 {
3020         struct drm_device *dev = state->dev;
3021         struct amdgpu_device *adev = drm_to_adev(dev);
3022         struct amdgpu_display_manager *dm = &adev->dm;
3023         struct drm_private_state *priv_state;
3024
3025         if (*dm_state)
3026                 return 0;
3027
3028         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3029         if (IS_ERR(priv_state))
3030                 return PTR_ERR(priv_state);
3031
3032         *dm_state = to_dm_atomic_state(priv_state);
3033
3034         return 0;
3035 }
3036
3037 static struct dm_atomic_state *
3038 dm_atomic_get_new_state(struct drm_atomic_state *state)
3039 {
3040         struct drm_device *dev = state->dev;
3041         struct amdgpu_device *adev = drm_to_adev(dev);
3042         struct amdgpu_display_manager *dm = &adev->dm;
3043         struct drm_private_obj *obj;
3044         struct drm_private_state *new_obj_state;
3045         int i;
3046
3047         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3048                 if (obj->funcs == dm->atomic_obj.funcs)
3049                         return to_dm_atomic_state(new_obj_state);
3050         }
3051
3052         return NULL;
3053 }
3054
3055 static struct drm_private_state *
3056 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3057 {
3058         struct dm_atomic_state *old_state, *new_state;
3059
3060         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3061         if (!new_state)
3062                 return NULL;
3063
3064         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3065
3066         old_state = to_dm_atomic_state(obj->state);
3067
3068         if (old_state && old_state->context)
3069                 new_state->context = dc_copy_state(old_state->context);
3070
3071         if (!new_state->context) {
3072                 kfree(new_state);
3073                 return NULL;
3074         }
3075
3076         return &new_state->base;
3077 }
3078
3079 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3080                                     struct drm_private_state *state)
3081 {
3082         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3083
3084         if (dm_state && dm_state->context)
3085                 dc_release_state(dm_state->context);
3086
3087         kfree(dm_state);
3088 }
3089
3090 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3091         .atomic_duplicate_state = dm_atomic_duplicate_state,
3092         .atomic_destroy_state = dm_atomic_destroy_state,
3093 };
3094
3095 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3096 {
3097         struct dm_atomic_state *state;
3098         int r;
3099
3100         adev->mode_info.mode_config_initialized = true;
3101
3102         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3103         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3104
3105         adev_to_drm(adev)->mode_config.max_width = 16384;
3106         adev_to_drm(adev)->mode_config.max_height = 16384;
3107
3108         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3109         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3110         /* indicates support for immediate flip */
3111         adev_to_drm(adev)->mode_config.async_page_flip = true;
3112
3113         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3114
3115         state = kzalloc(sizeof(*state), GFP_KERNEL);
3116         if (!state)
3117                 return -ENOMEM;
3118
3119         state->context = dc_create_state(adev->dm.dc);
3120         if (!state->context) {
3121                 kfree(state);
3122                 return -ENOMEM;
3123         }
3124
3125         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3126
3127         drm_atomic_private_obj_init(adev_to_drm(adev),
3128                                     &adev->dm.atomic_obj,
3129                                     &state->base,
3130                                     &dm_atomic_state_funcs);
3131
3132         r = amdgpu_display_modeset_create_props(adev);
3133         if (r) {
3134                 dc_release_state(state->context);
3135                 kfree(state);
3136                 return r;
3137         }
3138
3139         r = amdgpu_dm_audio_init(adev);
3140         if (r) {
3141                 dc_release_state(state->context);
3142                 kfree(state);
3143                 return r;
3144         }
3145
3146         return 0;
3147 }
3148
3149 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3150 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3151 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3152
3153 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3154         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3155
3156 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3157 {
3158 #if defined(CONFIG_ACPI)
3159         struct amdgpu_dm_backlight_caps caps;
3160
3161         memset(&caps, 0, sizeof(caps));
3162
3163         if (dm->backlight_caps.caps_valid)
3164                 return;
3165
3166         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3167         if (caps.caps_valid) {
3168                 dm->backlight_caps.caps_valid = true;
3169                 if (caps.aux_support)
3170                         return;
3171                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3172                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3173         } else {
3174                 dm->backlight_caps.min_input_signal =
3175                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3176                 dm->backlight_caps.max_input_signal =
3177                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3178         }
3179 #else
3180         if (dm->backlight_caps.aux_support)
3181                 return;
3182
3183         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3184         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3185 #endif
3186 }
3187
3188 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3189                                 unsigned *min, unsigned *max)
3190 {
3191         if (!caps)
3192                 return 0;
3193
3194         if (caps->aux_support) {
3195                 // Firmware limits are in nits, DC API wants millinits.
3196                 *max = 1000 * caps->aux_max_input_signal;
3197                 *min = 1000 * caps->aux_min_input_signal;
3198         } else {
3199                 // Firmware limits are 8-bit, PWM control is 16-bit.
3200                 *max = 0x101 * caps->max_input_signal;
3201                 *min = 0x101 * caps->min_input_signal;
3202         }
3203         return 1;
3204 }
3205
3206 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3207                                         uint32_t brightness)
3208 {
3209         unsigned min, max;
3210
3211         if (!get_brightness_range(caps, &min, &max))
3212                 return brightness;
3213
3214         // Rescale 0..255 to min..max
3215         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3216                                        AMDGPU_MAX_BL_LEVEL);
3217 }
3218
3219 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3220                                       uint32_t brightness)
3221 {
3222         unsigned min, max;
3223
3224         if (!get_brightness_range(caps, &min, &max))
3225                 return brightness;
3226
3227         if (brightness < min)
3228                 return 0;
3229         // Rescale min..max to 0..255
3230         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3231                                  max - min);
3232 }
3233
3234 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3235 {
3236         struct amdgpu_display_manager *dm = bl_get_data(bd);
3237         struct amdgpu_dm_backlight_caps caps;
3238         struct dc_link *link = NULL;
3239         u32 brightness;
3240         bool rc;
3241
3242         amdgpu_dm_update_backlight_caps(dm);
3243         caps = dm->backlight_caps;
3244
3245         link = (struct dc_link *)dm->backlight_link;
3246
3247         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3248         // Change brightness based on AUX property
3249         if (caps.aux_support)
3250                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3251                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3252         else
3253                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3254
3255         return rc ? 0 : 1;
3256 }
3257
3258 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3259 {
3260         struct amdgpu_display_manager *dm = bl_get_data(bd);
3261         struct amdgpu_dm_backlight_caps caps;
3262
3263         amdgpu_dm_update_backlight_caps(dm);
3264         caps = dm->backlight_caps;
3265
3266         if (caps.aux_support) {
3267                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3268                 u32 avg, peak;
3269                 bool rc;
3270
3271                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3272                 if (!rc)
3273                         return bd->props.brightness;
3274                 return convert_brightness_to_user(&caps, avg);
3275         } else {
3276                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3277
3278                 if (ret == DC_ERROR_UNEXPECTED)
3279                         return bd->props.brightness;
3280                 return convert_brightness_to_user(&caps, ret);
3281         }
3282 }
3283
3284 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3285         .options = BL_CORE_SUSPENDRESUME,
3286         .get_brightness = amdgpu_dm_backlight_get_brightness,
3287         .update_status  = amdgpu_dm_backlight_update_status,
3288 };
3289
3290 static void
3291 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3292 {
3293         char bl_name[16];
3294         struct backlight_properties props = { 0 };
3295
3296         amdgpu_dm_update_backlight_caps(dm);
3297
3298         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3299         props.brightness = AMDGPU_MAX_BL_LEVEL;
3300         props.type = BACKLIGHT_RAW;
3301
3302         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3303                  adev_to_drm(dm->adev)->primary->index);
3304
3305         dm->backlight_dev = backlight_device_register(bl_name,
3306                                                       adev_to_drm(dm->adev)->dev,
3307                                                       dm,
3308                                                       &amdgpu_dm_backlight_ops,
3309                                                       &props);
3310
3311         if (IS_ERR(dm->backlight_dev))
3312                 DRM_ERROR("DM: Backlight registration failed!\n");
3313         else
3314                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3315 }
3316
3317 #endif
3318
3319 static int initialize_plane(struct amdgpu_display_manager *dm,
3320                             struct amdgpu_mode_info *mode_info, int plane_id,
3321                             enum drm_plane_type plane_type,
3322                             const struct dc_plane_cap *plane_cap)
3323 {
3324         struct drm_plane *plane;
3325         unsigned long possible_crtcs;
3326         int ret = 0;
3327
3328         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3329         if (!plane) {
3330                 DRM_ERROR("KMS: Failed to allocate plane\n");
3331                 return -ENOMEM;
3332         }
3333         plane->type = plane_type;
3334
3335         /*
3336          * HACK: IGT tests expect that the primary plane for a CRTC
3337          * can only have one possible CRTC. Only expose support for
3338          * any CRTC if they're not going to be used as a primary plane
3339          * for a CRTC - like overlay or underlay planes.
3340          */
3341         possible_crtcs = 1 << plane_id;
3342         if (plane_id >= dm->dc->caps.max_streams)
3343                 possible_crtcs = 0xff;
3344
3345         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3346
3347         if (ret) {
3348                 DRM_ERROR("KMS: Failed to initialize plane\n");
3349                 kfree(plane);
3350                 return ret;
3351         }
3352
3353         if (mode_info)
3354                 mode_info->planes[plane_id] = plane;
3355
3356         return ret;
3357 }
3358
3359
3360 static void register_backlight_device(struct amdgpu_display_manager *dm,
3361                                       struct dc_link *link)
3362 {
3363 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3364         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3365
3366         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3367             link->type != dc_connection_none) {
3368                 /*
3369                  * Event if registration failed, we should continue with
3370                  * DM initialization because not having a backlight control
3371                  * is better then a black screen.
3372                  */
3373                 amdgpu_dm_register_backlight_device(dm);
3374
3375                 if (dm->backlight_dev)
3376                         dm->backlight_link = link;
3377         }
3378 #endif
3379 }
3380
3381
3382 /*
3383  * In this architecture, the association
3384  * connector -> encoder -> crtc
3385  * id not really requried. The crtc and connector will hold the
3386  * display_index as an abstraction to use with DAL component
3387  *
3388  * Returns 0 on success
3389  */
3390 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3391 {
3392         struct amdgpu_display_manager *dm = &adev->dm;
3393         int32_t i;
3394         struct amdgpu_dm_connector *aconnector = NULL;
3395         struct amdgpu_encoder *aencoder = NULL;
3396         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3397         uint32_t link_cnt;
3398         int32_t primary_planes;
3399         enum dc_connection_type new_connection_type = dc_connection_none;
3400         const struct dc_plane_cap *plane;
3401
3402         dm->display_indexes_num = dm->dc->caps.max_streams;
3403         /* Update the actual used number of crtc */
3404         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3405
3406         link_cnt = dm->dc->caps.max_links;
3407         if (amdgpu_dm_mode_config_init(dm->adev)) {
3408                 DRM_ERROR("DM: Failed to initialize mode config\n");
3409                 return -EINVAL;
3410         }
3411
3412         /* There is one primary plane per CRTC */
3413         primary_planes = dm->dc->caps.max_streams;
3414         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3415
3416         /*
3417          * Initialize primary planes, implicit planes for legacy IOCTLS.
3418          * Order is reversed to match iteration order in atomic check.
3419          */
3420         for (i = (primary_planes - 1); i >= 0; i--) {
3421                 plane = &dm->dc->caps.planes[i];
3422
3423                 if (initialize_plane(dm, mode_info, i,
3424                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3425                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3426                         goto fail;
3427                 }
3428         }
3429
3430         /*
3431          * Initialize overlay planes, index starting after primary planes.
3432          * These planes have a higher DRM index than the primary planes since
3433          * they should be considered as having a higher z-order.
3434          * Order is reversed to match iteration order in atomic check.
3435          *
3436          * Only support DCN for now, and only expose one so we don't encourage
3437          * userspace to use up all the pipes.
3438          */
3439         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3440                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3441
3442                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3443                         continue;
3444
3445                 if (!plane->blends_with_above || !plane->blends_with_below)
3446                         continue;
3447
3448                 if (!plane->pixel_format_support.argb8888)
3449                         continue;
3450
3451                 if (initialize_plane(dm, NULL, primary_planes + i,
3452                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3453                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3454                         goto fail;
3455                 }
3456
3457                 /* Only create one overlay plane. */
3458                 break;
3459         }
3460
3461         for (i = 0; i < dm->dc->caps.max_streams; i++)
3462                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3463                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3464                         goto fail;
3465                 }
3466
3467         /* loops over all connectors on the board */
3468         for (i = 0; i < link_cnt; i++) {
3469                 struct dc_link *link = NULL;
3470
3471                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3472                         DRM_ERROR(
3473                                 "KMS: Cannot support more than %d display indexes\n",
3474                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3475                         continue;
3476                 }
3477
3478                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3479                 if (!aconnector)
3480                         goto fail;
3481
3482                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3483                 if (!aencoder)
3484                         goto fail;
3485
3486                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3487                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3488                         goto fail;
3489                 }
3490
3491                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3492                         DRM_ERROR("KMS: Failed to initialize connector\n");
3493                         goto fail;
3494                 }
3495
3496                 link = dc_get_link_at_index(dm->dc, i);
3497
3498                 if (!dc_link_detect_sink(link, &new_connection_type))
3499                         DRM_ERROR("KMS: Failed to detect connector\n");
3500
3501                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3502                         emulated_link_detect(link);
3503                         amdgpu_dm_update_connector_after_detect(aconnector);
3504
3505                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3506                         amdgpu_dm_update_connector_after_detect(aconnector);
3507                         register_backlight_device(dm, link);
3508                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3509                                 amdgpu_dm_set_psr_caps(link);
3510                 }
3511
3512
3513         }
3514
3515         /* Software is initialized. Now we can register interrupt handlers. */
3516         switch (adev->asic_type) {
3517 #if defined(CONFIG_DRM_AMD_DC_SI)
3518         case CHIP_TAHITI:
3519         case CHIP_PITCAIRN:
3520         case CHIP_VERDE:
3521         case CHIP_OLAND:
3522                 if (dce60_register_irq_handlers(dm->adev)) {
3523                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3524                         goto fail;
3525                 }
3526                 break;
3527 #endif
3528         case CHIP_BONAIRE:
3529         case CHIP_HAWAII:
3530         case CHIP_KAVERI:
3531         case CHIP_KABINI:
3532         case CHIP_MULLINS:
3533         case CHIP_TONGA:
3534         case CHIP_FIJI:
3535         case CHIP_CARRIZO:
3536         case CHIP_STONEY:
3537         case CHIP_POLARIS11:
3538         case CHIP_POLARIS10:
3539         case CHIP_POLARIS12:
3540         case CHIP_VEGAM:
3541         case CHIP_VEGA10:
3542         case CHIP_VEGA12:
3543         case CHIP_VEGA20:
3544                 if (dce110_register_irq_handlers(dm->adev)) {
3545                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3546                         goto fail;
3547                 }
3548                 break;
3549 #if defined(CONFIG_DRM_AMD_DC_DCN)
3550         case CHIP_RAVEN:
3551         case CHIP_NAVI12:
3552         case CHIP_NAVI10:
3553         case CHIP_NAVI14:
3554         case CHIP_RENOIR:
3555         case CHIP_SIENNA_CICHLID:
3556         case CHIP_NAVY_FLOUNDER:
3557         case CHIP_DIMGREY_CAVEFISH:
3558         case CHIP_VANGOGH:
3559                 if (dcn10_register_irq_handlers(dm->adev)) {
3560                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3561                         goto fail;
3562                 }
3563                 break;
3564 #endif
3565         default:
3566                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3567                 goto fail;
3568         }
3569
3570         return 0;
3571 fail:
3572         kfree(aencoder);
3573         kfree(aconnector);
3574
3575         return -EINVAL;
3576 }
3577
3578 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3579 {
3580         drm_mode_config_cleanup(dm->ddev);
3581         drm_atomic_private_obj_fini(&dm->atomic_obj);
3582         return;
3583 }
3584
3585 /******************************************************************************
3586  * amdgpu_display_funcs functions
3587  *****************************************************************************/
3588
3589 /*
3590  * dm_bandwidth_update - program display watermarks
3591  *
3592  * @adev: amdgpu_device pointer
3593  *
3594  * Calculate and program the display watermarks and line buffer allocation.
3595  */
3596 static void dm_bandwidth_update(struct amdgpu_device *adev)
3597 {
3598         /* TODO: implement later */
3599 }
3600
3601 static const struct amdgpu_display_funcs dm_display_funcs = {
3602         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3603         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3604         .backlight_set_level = NULL, /* never called for DC */
3605         .backlight_get_level = NULL, /* never called for DC */
3606         .hpd_sense = NULL,/* called unconditionally */
3607         .hpd_set_polarity = NULL, /* called unconditionally */
3608         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3609         .page_flip_get_scanoutpos =
3610                 dm_crtc_get_scanoutpos,/* called unconditionally */
3611         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3612         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3613 };
3614
3615 #if defined(CONFIG_DEBUG_KERNEL_DC)
3616
3617 static ssize_t s3_debug_store(struct device *device,
3618                               struct device_attribute *attr,
3619                               const char *buf,
3620                               size_t count)
3621 {
3622         int ret;
3623         int s3_state;
3624         struct drm_device *drm_dev = dev_get_drvdata(device);
3625         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3626
3627         ret = kstrtoint(buf, 0, &s3_state);
3628
3629         if (ret == 0) {
3630                 if (s3_state) {
3631                         dm_resume(adev);
3632                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3633                 } else
3634                         dm_suspend(adev);
3635         }
3636
3637         return ret == 0 ? count : 0;
3638 }
3639
3640 DEVICE_ATTR_WO(s3_debug);
3641
3642 #endif
3643
3644 static int dm_early_init(void *handle)
3645 {
3646         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3647
3648         switch (adev->asic_type) {
3649 #if defined(CONFIG_DRM_AMD_DC_SI)
3650         case CHIP_TAHITI:
3651         case CHIP_PITCAIRN:
3652         case CHIP_VERDE:
3653                 adev->mode_info.num_crtc = 6;
3654                 adev->mode_info.num_hpd = 6;
3655                 adev->mode_info.num_dig = 6;
3656                 break;
3657         case CHIP_OLAND:
3658                 adev->mode_info.num_crtc = 2;
3659                 adev->mode_info.num_hpd = 2;
3660                 adev->mode_info.num_dig = 2;
3661                 break;
3662 #endif
3663         case CHIP_BONAIRE:
3664         case CHIP_HAWAII:
3665                 adev->mode_info.num_crtc = 6;
3666                 adev->mode_info.num_hpd = 6;
3667                 adev->mode_info.num_dig = 6;
3668                 break;
3669         case CHIP_KAVERI:
3670                 adev->mode_info.num_crtc = 4;
3671                 adev->mode_info.num_hpd = 6;
3672                 adev->mode_info.num_dig = 7;
3673                 break;
3674         case CHIP_KABINI:
3675         case CHIP_MULLINS:
3676                 adev->mode_info.num_crtc = 2;
3677                 adev->mode_info.num_hpd = 6;
3678                 adev->mode_info.num_dig = 6;
3679                 break;
3680         case CHIP_FIJI:
3681         case CHIP_TONGA:
3682                 adev->mode_info.num_crtc = 6;
3683                 adev->mode_info.num_hpd = 6;
3684                 adev->mode_info.num_dig = 7;
3685                 break;
3686         case CHIP_CARRIZO:
3687                 adev->mode_info.num_crtc = 3;
3688                 adev->mode_info.num_hpd = 6;
3689                 adev->mode_info.num_dig = 9;
3690                 break;
3691         case CHIP_STONEY:
3692                 adev->mode_info.num_crtc = 2;
3693                 adev->mode_info.num_hpd = 6;
3694                 adev->mode_info.num_dig = 9;
3695                 break;
3696         case CHIP_POLARIS11:
3697         case CHIP_POLARIS12:
3698                 adev->mode_info.num_crtc = 5;
3699                 adev->mode_info.num_hpd = 5;
3700                 adev->mode_info.num_dig = 5;
3701                 break;
3702         case CHIP_POLARIS10:
3703         case CHIP_VEGAM:
3704                 adev->mode_info.num_crtc = 6;
3705                 adev->mode_info.num_hpd = 6;
3706                 adev->mode_info.num_dig = 6;
3707                 break;
3708         case CHIP_VEGA10:
3709         case CHIP_VEGA12:
3710         case CHIP_VEGA20:
3711                 adev->mode_info.num_crtc = 6;
3712                 adev->mode_info.num_hpd = 6;
3713                 adev->mode_info.num_dig = 6;
3714                 break;
3715 #if defined(CONFIG_DRM_AMD_DC_DCN)
3716         case CHIP_RAVEN:
3717         case CHIP_RENOIR:
3718         case CHIP_VANGOGH:
3719                 adev->mode_info.num_crtc = 4;
3720                 adev->mode_info.num_hpd = 4;
3721                 adev->mode_info.num_dig = 4;
3722                 break;
3723         case CHIP_NAVI10:
3724         case CHIP_NAVI12:
3725         case CHIP_SIENNA_CICHLID:
3726         case CHIP_NAVY_FLOUNDER:
3727                 adev->mode_info.num_crtc = 6;
3728                 adev->mode_info.num_hpd = 6;
3729                 adev->mode_info.num_dig = 6;
3730                 break;
3731         case CHIP_NAVI14:
3732         case CHIP_DIMGREY_CAVEFISH:
3733                 adev->mode_info.num_crtc = 5;
3734                 adev->mode_info.num_hpd = 5;
3735                 adev->mode_info.num_dig = 5;
3736                 break;
3737 #endif
3738         default:
3739                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3740                 return -EINVAL;
3741         }
3742
3743         amdgpu_dm_set_irq_funcs(adev);
3744
3745         if (adev->mode_info.funcs == NULL)
3746                 adev->mode_info.funcs = &dm_display_funcs;
3747
3748         /*
3749          * Note: Do NOT change adev->audio_endpt_rreg and
3750          * adev->audio_endpt_wreg because they are initialised in
3751          * amdgpu_device_init()
3752          */
3753 #if defined(CONFIG_DEBUG_KERNEL_DC)
3754         device_create_file(
3755                 adev_to_drm(adev)->dev,
3756                 &dev_attr_s3_debug);
3757 #endif
3758
3759         return 0;
3760 }
3761
3762 static bool modeset_required(struct drm_crtc_state *crtc_state,
3763                              struct dc_stream_state *new_stream,
3764                              struct dc_stream_state *old_stream)
3765 {
3766         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3767 }
3768
3769 static bool modereset_required(struct drm_crtc_state *crtc_state)
3770 {
3771         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3772 }
3773
3774 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3775 {
3776         drm_encoder_cleanup(encoder);
3777         kfree(encoder);
3778 }
3779
3780 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3781         .destroy = amdgpu_dm_encoder_destroy,
3782 };
3783
3784
3785 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3786                                          struct drm_framebuffer *fb,
3787                                          int *min_downscale, int *max_upscale)
3788 {
3789         struct amdgpu_device *adev = drm_to_adev(dev);
3790         struct dc *dc = adev->dm.dc;
3791         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3792         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3793
3794         switch (fb->format->format) {
3795         case DRM_FORMAT_P010:
3796         case DRM_FORMAT_NV12:
3797         case DRM_FORMAT_NV21:
3798                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3799                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3800                 break;
3801
3802         case DRM_FORMAT_XRGB16161616F:
3803         case DRM_FORMAT_ARGB16161616F:
3804         case DRM_FORMAT_XBGR16161616F:
3805         case DRM_FORMAT_ABGR16161616F:
3806                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3807                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3808                 break;
3809
3810         default:
3811                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3812                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3813                 break;
3814         }
3815
3816         /*
3817          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3818          * scaling factor of 1.0 == 1000 units.
3819          */
3820         if (*max_upscale == 1)
3821                 *max_upscale = 1000;
3822
3823         if (*min_downscale == 1)
3824                 *min_downscale = 1000;
3825 }
3826
3827
3828 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3829                                 struct dc_scaling_info *scaling_info)
3830 {
3831         int scale_w, scale_h, min_downscale, max_upscale;
3832
3833         memset(scaling_info, 0, sizeof(*scaling_info));
3834
3835         /* Source is fixed 16.16 but we ignore mantissa for now... */
3836         scaling_info->src_rect.x = state->src_x >> 16;
3837         scaling_info->src_rect.y = state->src_y >> 16;
3838
3839         scaling_info->src_rect.width = state->src_w >> 16;
3840         if (scaling_info->src_rect.width == 0)
3841                 return -EINVAL;
3842
3843         scaling_info->src_rect.height = state->src_h >> 16;
3844         if (scaling_info->src_rect.height == 0)
3845                 return -EINVAL;
3846
3847         scaling_info->dst_rect.x = state->crtc_x;
3848         scaling_info->dst_rect.y = state->crtc_y;
3849
3850         if (state->crtc_w == 0)
3851                 return -EINVAL;
3852
3853         scaling_info->dst_rect.width = state->crtc_w;
3854
3855         if (state->crtc_h == 0)
3856                 return -EINVAL;
3857
3858         scaling_info->dst_rect.height = state->crtc_h;
3859
3860         /* DRM doesn't specify clipping on destination output. */
3861         scaling_info->clip_rect = scaling_info->dst_rect;
3862
3863         /* Validate scaling per-format with DC plane caps */
3864         if (state->plane && state->plane->dev && state->fb) {
3865                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3866                                              &min_downscale, &max_upscale);
3867         } else {
3868                 min_downscale = 250;
3869                 max_upscale = 16000;
3870         }
3871
3872         scale_w = scaling_info->dst_rect.width * 1000 /
3873                   scaling_info->src_rect.width;
3874
3875         if (scale_w < min_downscale || scale_w > max_upscale)
3876                 return -EINVAL;
3877
3878         scale_h = scaling_info->dst_rect.height * 1000 /
3879                   scaling_info->src_rect.height;
3880
3881         if (scale_h < min_downscale || scale_h > max_upscale)
3882                 return -EINVAL;
3883
3884         /*
3885          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3886          * assume reasonable defaults based on the format.
3887          */
3888
3889         return 0;
3890 }
3891
3892 static void
3893 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3894                                  uint64_t tiling_flags)
3895 {
3896         /* Fill GFX8 params */
3897         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3898                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3899
3900                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3901                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3902                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3903                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3904                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3905
3906                 /* XXX fix me for VI */
3907                 tiling_info->gfx8.num_banks = num_banks;
3908                 tiling_info->gfx8.array_mode =
3909                                 DC_ARRAY_2D_TILED_THIN1;
3910                 tiling_info->gfx8.tile_split = tile_split;
3911                 tiling_info->gfx8.bank_width = bankw;
3912                 tiling_info->gfx8.bank_height = bankh;
3913                 tiling_info->gfx8.tile_aspect = mtaspect;
3914                 tiling_info->gfx8.tile_mode =
3915                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3916         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3917                         == DC_ARRAY_1D_TILED_THIN1) {
3918                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3919         }
3920
3921         tiling_info->gfx8.pipe_config =
3922                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3923 }
3924
3925 static void
3926 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3927                                   union dc_tiling_info *tiling_info)
3928 {
3929         tiling_info->gfx9.num_pipes =
3930                 adev->gfx.config.gb_addr_config_fields.num_pipes;
3931         tiling_info->gfx9.num_banks =
3932                 adev->gfx.config.gb_addr_config_fields.num_banks;
3933         tiling_info->gfx9.pipe_interleave =
3934                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3935         tiling_info->gfx9.num_shader_engines =
3936                 adev->gfx.config.gb_addr_config_fields.num_se;
3937         tiling_info->gfx9.max_compressed_frags =
3938                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3939         tiling_info->gfx9.num_rb_per_se =
3940                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3941         tiling_info->gfx9.shaderEnable = 1;
3942         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3943             adev->asic_type == CHIP_NAVY_FLOUNDER ||
3944             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3945             adev->asic_type == CHIP_VANGOGH)
3946                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3947 }
3948
3949 static int
3950 validate_dcc(struct amdgpu_device *adev,
3951              const enum surface_pixel_format format,
3952              const enum dc_rotation_angle rotation,
3953              const union dc_tiling_info *tiling_info,
3954              const struct dc_plane_dcc_param *dcc,
3955              const struct dc_plane_address *address,
3956              const struct plane_size *plane_size)
3957 {
3958         struct dc *dc = adev->dm.dc;
3959         struct dc_dcc_surface_param input;
3960         struct dc_surface_dcc_cap output;
3961
3962         memset(&input, 0, sizeof(input));
3963         memset(&output, 0, sizeof(output));
3964
3965         if (!dcc->enable)
3966                 return 0;
3967
3968         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3969             !dc->cap_funcs.get_dcc_compression_cap)
3970                 return -EINVAL;
3971
3972         input.format = format;
3973         input.surface_size.width = plane_size->surface_size.width;
3974         input.surface_size.height = plane_size->surface_size.height;
3975         input.swizzle_mode = tiling_info->gfx9.swizzle;
3976
3977         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3978                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3979         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3980                 input.scan = SCAN_DIRECTION_VERTICAL;
3981
3982         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3983                 return -EINVAL;
3984
3985         if (!output.capable)
3986                 return -EINVAL;
3987
3988         if (dcc->independent_64b_blks == 0 &&
3989             output.grph.rgb.independent_64b_blks != 0)
3990                 return -EINVAL;
3991
3992         return 0;
3993 }
3994
3995 static bool
3996 modifier_has_dcc(uint64_t modifier)
3997 {
3998         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3999 }
4000
4001 static unsigned
4002 modifier_gfx9_swizzle_mode(uint64_t modifier)
4003 {
4004         if (modifier == DRM_FORMAT_MOD_LINEAR)
4005                 return 0;
4006
4007         return AMD_FMT_MOD_GET(TILE, modifier);
4008 }
4009
4010 static const struct drm_format_info *
4011 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4012 {
4013         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4014 }
4015
4016 static void
4017 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4018                                     union dc_tiling_info *tiling_info,
4019                                     uint64_t modifier)
4020 {
4021         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4022         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4023         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4024         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4025
4026         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4027
4028         if (!IS_AMD_FMT_MOD(modifier))
4029                 return;
4030
4031         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4032         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4033
4034         if (adev->family >= AMDGPU_FAMILY_NV) {
4035                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4036         } else {
4037                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4038
4039                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4040         }
4041 }
4042
4043 enum dm_micro_swizzle {
4044         MICRO_SWIZZLE_Z = 0,
4045         MICRO_SWIZZLE_S = 1,
4046         MICRO_SWIZZLE_D = 2,
4047         MICRO_SWIZZLE_R = 3
4048 };
4049
4050 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4051                                           uint32_t format,
4052                                           uint64_t modifier)
4053 {
4054         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4055         const struct drm_format_info *info = drm_format_info(format);
4056
4057         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4058
4059         if (!info)
4060                 return false;
4061
4062         /*
4063          * We always have to allow this modifier, because core DRM still
4064          * checks LINEAR support if userspace does not provide modifers.
4065          */
4066         if (modifier == DRM_FORMAT_MOD_LINEAR)
4067                 return true;
4068
4069         /*
4070          * The arbitrary tiling support for multiplane formats has not been hooked
4071          * up.
4072          */
4073         if (info->num_planes > 1)
4074                 return false;
4075
4076         /*
4077          * For D swizzle the canonical modifier depends on the bpp, so check
4078          * it here.
4079          */
4080         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4081             adev->family >= AMDGPU_FAMILY_NV) {
4082                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4083                         return false;
4084         }
4085
4086         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4087             info->cpp[0] < 8)
4088                 return false;
4089
4090         if (modifier_has_dcc(modifier)) {
4091                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4092                 if (info->cpp[0] != 4)
4093                         return false;
4094         }
4095
4096         return true;
4097 }
4098
4099 static void
4100 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4101 {
4102         if (!*mods)
4103                 return;
4104
4105         if (*cap - *size < 1) {
4106                 uint64_t new_cap = *cap * 2;
4107                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4108
4109                 if (!new_mods) {
4110                         kfree(*mods);
4111                         *mods = NULL;
4112                         return;
4113                 }
4114
4115                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4116                 kfree(*mods);
4117                 *mods = new_mods;
4118                 *cap = new_cap;
4119         }
4120
4121         (*mods)[*size] = mod;
4122         *size += 1;
4123 }
4124
4125 static void
4126 add_gfx9_modifiers(const struct amdgpu_device *adev,
4127                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4128 {
4129         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4130         int pipe_xor_bits = min(8, pipes +
4131                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4132         int bank_xor_bits = min(8 - pipe_xor_bits,
4133                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4134         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4135                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4136
4137
4138         if (adev->family == AMDGPU_FAMILY_RV) {
4139                 /* Raven2 and later */
4140                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4141
4142                 /*
4143                  * No _D DCC swizzles yet because we only allow 32bpp, which
4144                  * doesn't support _D on DCN
4145                  */
4146
4147                 if (has_constant_encode) {
4148                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4149                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4150                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4151                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4152                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4153                                     AMD_FMT_MOD_SET(DCC, 1) |
4154                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4155                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4156                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4157                 }
4158
4159                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4160                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4161                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4162                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4163                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4164                             AMD_FMT_MOD_SET(DCC, 1) |
4165                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4166                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4167                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4168
4169                 if (has_constant_encode) {
4170                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4171                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4172                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4173                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4174                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4175                                     AMD_FMT_MOD_SET(DCC, 1) |
4176                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4177                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4178                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4179
4180                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4181                                     AMD_FMT_MOD_SET(RB, rb) |
4182                                     AMD_FMT_MOD_SET(PIPE, pipes));
4183                 }
4184
4185                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4186                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4187                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4188                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4189                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4190                             AMD_FMT_MOD_SET(DCC, 1) |
4191                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4192                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4193                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4194                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4195                             AMD_FMT_MOD_SET(RB, rb) |
4196                             AMD_FMT_MOD_SET(PIPE, pipes));
4197         }
4198
4199         /*
4200          * Only supported for 64bpp on Raven, will be filtered on format in
4201          * dm_plane_format_mod_supported.
4202          */
4203         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4204                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4205                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4206                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4207                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4208
4209         if (adev->family == AMDGPU_FAMILY_RV) {
4210                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4211                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4212                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4213                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4214                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4215         }
4216
4217         /*
4218          * Only supported for 64bpp on Raven, will be filtered on format in
4219          * dm_plane_format_mod_supported.
4220          */
4221         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4222                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4223                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4224
4225         if (adev->family == AMDGPU_FAMILY_RV) {
4226                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4228                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4229         }
4230 }
4231
4232 static void
4233 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4234                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4235 {
4236         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4237
4238         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4240                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4241                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4242                     AMD_FMT_MOD_SET(DCC, 1) |
4243                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4244                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4245                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4246
4247         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4248                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4249                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4250                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4251                     AMD_FMT_MOD_SET(DCC, 1) |
4252                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4253                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4254                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4255                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4256
4257         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4258                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4259                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4260                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4261
4262         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4263                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4264                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4265                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4266
4267
4268         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4269         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4270                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4271                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4272
4273         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4274                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4275                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4276 }
4277
4278 static void
4279 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4280                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4281 {
4282         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4283         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4284
4285         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4286                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4287                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4288                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4289                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4290                     AMD_FMT_MOD_SET(DCC, 1) |
4291                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4292                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4293                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4294                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4295
4296         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4297                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4298                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4299                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4300                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4301                     AMD_FMT_MOD_SET(DCC, 1) |
4302                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4303                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4304                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4305                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4306                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4307
4308         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4309                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4310                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4311                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4312                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4313
4314         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4315                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4316                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4317                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4318                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4319
4320         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4321         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4322                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4323                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4324
4325         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4326                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4327                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4328 }
4329
4330 static int
4331 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4332 {
4333         uint64_t size = 0, capacity = 128;
4334         *mods = NULL;
4335
4336         /* We have not hooked up any pre-GFX9 modifiers. */
4337         if (adev->family < AMDGPU_FAMILY_AI)
4338                 return 0;
4339
4340         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4341
4342         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4343                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4344                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4345                 return *mods ? 0 : -ENOMEM;
4346         }
4347
4348         switch (adev->family) {
4349         case AMDGPU_FAMILY_AI:
4350         case AMDGPU_FAMILY_RV:
4351                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4352                 break;
4353         case AMDGPU_FAMILY_NV:
4354         case AMDGPU_FAMILY_VGH:
4355                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4356                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4357                 else
4358                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4359                 break;
4360         }
4361
4362         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4363
4364         /* INVALID marks the end of the list. */
4365         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4366
4367         if (!*mods)
4368                 return -ENOMEM;
4369
4370         return 0;
4371 }
4372
4373 static int
4374 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4375                                           const struct amdgpu_framebuffer *afb,
4376                                           const enum surface_pixel_format format,
4377                                           const enum dc_rotation_angle rotation,
4378                                           const struct plane_size *plane_size,
4379                                           union dc_tiling_info *tiling_info,
4380                                           struct dc_plane_dcc_param *dcc,
4381                                           struct dc_plane_address *address,
4382                                           const bool force_disable_dcc)
4383 {
4384         const uint64_t modifier = afb->base.modifier;
4385         int ret;
4386
4387         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4388         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4389
4390         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4391                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4392
4393                 dcc->enable = 1;
4394                 dcc->meta_pitch = afb->base.pitches[1];
4395                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4396
4397                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4398                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4399         }
4400
4401         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4402         if (ret)
4403                 return ret;
4404
4405         return 0;
4406 }
4407
4408 static int
4409 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4410                              const struct amdgpu_framebuffer *afb,
4411                              const enum surface_pixel_format format,
4412                              const enum dc_rotation_angle rotation,
4413                              const uint64_t tiling_flags,
4414                              union dc_tiling_info *tiling_info,
4415                              struct plane_size *plane_size,
4416                              struct dc_plane_dcc_param *dcc,
4417                              struct dc_plane_address *address,
4418                              bool tmz_surface,
4419                              bool force_disable_dcc)
4420 {
4421         const struct drm_framebuffer *fb = &afb->base;
4422         int ret;
4423
4424         memset(tiling_info, 0, sizeof(*tiling_info));
4425         memset(plane_size, 0, sizeof(*plane_size));
4426         memset(dcc, 0, sizeof(*dcc));
4427         memset(address, 0, sizeof(*address));
4428
4429         address->tmz_surface = tmz_surface;
4430
4431         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4432                 uint64_t addr = afb->address + fb->offsets[0];
4433
4434                 plane_size->surface_size.x = 0;
4435                 plane_size->surface_size.y = 0;
4436                 plane_size->surface_size.width = fb->width;
4437                 plane_size->surface_size.height = fb->height;
4438                 plane_size->surface_pitch =
4439                         fb->pitches[0] / fb->format->cpp[0];
4440
4441                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4442                 address->grph.addr.low_part = lower_32_bits(addr);
4443                 address->grph.addr.high_part = upper_32_bits(addr);
4444         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4445                 uint64_t luma_addr = afb->address + fb->offsets[0];
4446                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4447
4448                 plane_size->surface_size.x = 0;
4449                 plane_size->surface_size.y = 0;
4450                 plane_size->surface_size.width = fb->width;
4451                 plane_size->surface_size.height = fb->height;
4452                 plane_size->surface_pitch =
4453                         fb->pitches[0] / fb->format->cpp[0];
4454
4455                 plane_size->chroma_size.x = 0;
4456                 plane_size->chroma_size.y = 0;
4457                 /* TODO: set these based on surface format */
4458                 plane_size->chroma_size.width = fb->width / 2;
4459                 plane_size->chroma_size.height = fb->height / 2;
4460
4461                 plane_size->chroma_pitch =
4462                         fb->pitches[1] / fb->format->cpp[1];
4463
4464                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4465                 address->video_progressive.luma_addr.low_part =
4466                         lower_32_bits(luma_addr);
4467                 address->video_progressive.luma_addr.high_part =
4468                         upper_32_bits(luma_addr);
4469                 address->video_progressive.chroma_addr.low_part =
4470                         lower_32_bits(chroma_addr);
4471                 address->video_progressive.chroma_addr.high_part =
4472                         upper_32_bits(chroma_addr);
4473         }
4474
4475         if (adev->family >= AMDGPU_FAMILY_AI) {
4476                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4477                                                                 rotation, plane_size,
4478                                                                 tiling_info, dcc,
4479                                                                 address,
4480                                                                 force_disable_dcc);
4481                 if (ret)
4482                         return ret;
4483         } else {
4484                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4485         }
4486
4487         return 0;
4488 }
4489
4490 static void
4491 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4492                                bool *per_pixel_alpha, bool *global_alpha,
4493                                int *global_alpha_value)
4494 {
4495         *per_pixel_alpha = false;
4496         *global_alpha = false;
4497         *global_alpha_value = 0xff;
4498
4499         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4500                 return;
4501
4502         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4503                 static const uint32_t alpha_formats[] = {
4504                         DRM_FORMAT_ARGB8888,
4505                         DRM_FORMAT_RGBA8888,
4506                         DRM_FORMAT_ABGR8888,
4507                 };
4508                 uint32_t format = plane_state->fb->format->format;
4509                 unsigned int i;
4510
4511                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4512                         if (format == alpha_formats[i]) {
4513                                 *per_pixel_alpha = true;
4514                                 break;
4515                         }
4516                 }
4517         }
4518
4519         if (plane_state->alpha < 0xffff) {
4520                 *global_alpha = true;
4521                 *global_alpha_value = plane_state->alpha >> 8;
4522         }
4523 }
4524
4525 static int
4526 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4527                             const enum surface_pixel_format format,
4528                             enum dc_color_space *color_space)
4529 {
4530         bool full_range;
4531
4532         *color_space = COLOR_SPACE_SRGB;
4533
4534         /* DRM color properties only affect non-RGB formats. */
4535         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4536                 return 0;
4537
4538         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4539
4540         switch (plane_state->color_encoding) {
4541         case DRM_COLOR_YCBCR_BT601:
4542                 if (full_range)
4543                         *color_space = COLOR_SPACE_YCBCR601;
4544                 else
4545                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4546                 break;
4547
4548         case DRM_COLOR_YCBCR_BT709:
4549                 if (full_range)
4550                         *color_space = COLOR_SPACE_YCBCR709;
4551                 else
4552                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4553                 break;
4554
4555         case DRM_COLOR_YCBCR_BT2020:
4556                 if (full_range)
4557                         *color_space = COLOR_SPACE_2020_YCBCR;
4558                 else
4559                         return -EINVAL;
4560                 break;
4561
4562         default:
4563                 return -EINVAL;
4564         }
4565
4566         return 0;
4567 }
4568
4569 static int
4570 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4571                             const struct drm_plane_state *plane_state,
4572                             const uint64_t tiling_flags,
4573                             struct dc_plane_info *plane_info,
4574                             struct dc_plane_address *address,
4575                             bool tmz_surface,
4576                             bool force_disable_dcc)
4577 {
4578         const struct drm_framebuffer *fb = plane_state->fb;
4579         const struct amdgpu_framebuffer *afb =
4580                 to_amdgpu_framebuffer(plane_state->fb);
4581         struct drm_format_name_buf format_name;
4582         int ret;
4583
4584         memset(plane_info, 0, sizeof(*plane_info));
4585
4586         switch (fb->format->format) {
4587         case DRM_FORMAT_C8:
4588                 plane_info->format =
4589                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4590                 break;
4591         case DRM_FORMAT_RGB565:
4592                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4593                 break;
4594         case DRM_FORMAT_XRGB8888:
4595         case DRM_FORMAT_ARGB8888:
4596                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4597                 break;
4598         case DRM_FORMAT_XRGB2101010:
4599         case DRM_FORMAT_ARGB2101010:
4600                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4601                 break;
4602         case DRM_FORMAT_XBGR2101010:
4603         case DRM_FORMAT_ABGR2101010:
4604                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4605                 break;
4606         case DRM_FORMAT_XBGR8888:
4607         case DRM_FORMAT_ABGR8888:
4608                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4609                 break;
4610         case DRM_FORMAT_NV21:
4611                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4612                 break;
4613         case DRM_FORMAT_NV12:
4614                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4615                 break;
4616         case DRM_FORMAT_P010:
4617                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4618                 break;
4619         case DRM_FORMAT_XRGB16161616F:
4620         case DRM_FORMAT_ARGB16161616F:
4621                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4622                 break;
4623         case DRM_FORMAT_XBGR16161616F:
4624         case DRM_FORMAT_ABGR16161616F:
4625                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4626                 break;
4627         default:
4628                 DRM_ERROR(
4629                         "Unsupported screen format %s\n",
4630                         drm_get_format_name(fb->format->format, &format_name));
4631                 return -EINVAL;
4632         }
4633
4634         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4635         case DRM_MODE_ROTATE_0:
4636                 plane_info->rotation = ROTATION_ANGLE_0;
4637                 break;
4638         case DRM_MODE_ROTATE_90:
4639                 plane_info->rotation = ROTATION_ANGLE_90;
4640                 break;
4641         case DRM_MODE_ROTATE_180:
4642                 plane_info->rotation = ROTATION_ANGLE_180;
4643                 break;
4644         case DRM_MODE_ROTATE_270:
4645                 plane_info->rotation = ROTATION_ANGLE_270;
4646                 break;
4647         default:
4648                 plane_info->rotation = ROTATION_ANGLE_0;
4649                 break;
4650         }
4651
4652         plane_info->visible = true;
4653         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4654
4655         plane_info->layer_index = 0;
4656
4657         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4658                                           &plane_info->color_space);
4659         if (ret)
4660                 return ret;
4661
4662         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4663                                            plane_info->rotation, tiling_flags,
4664                                            &plane_info->tiling_info,
4665                                            &plane_info->plane_size,
4666                                            &plane_info->dcc, address, tmz_surface,
4667                                            force_disable_dcc);
4668         if (ret)
4669                 return ret;
4670
4671         fill_blending_from_plane_state(
4672                 plane_state, &plane_info->per_pixel_alpha,
4673                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4674
4675         return 0;
4676 }
4677
4678 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4679                                     struct dc_plane_state *dc_plane_state,
4680                                     struct drm_plane_state *plane_state,
4681                                     struct drm_crtc_state *crtc_state)
4682 {
4683         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4684         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4685         struct dc_scaling_info scaling_info;
4686         struct dc_plane_info plane_info;
4687         int ret;
4688         bool force_disable_dcc = false;
4689
4690         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4691         if (ret)
4692                 return ret;
4693
4694         dc_plane_state->src_rect = scaling_info.src_rect;
4695         dc_plane_state->dst_rect = scaling_info.dst_rect;
4696         dc_plane_state->clip_rect = scaling_info.clip_rect;
4697         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4698
4699         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4700         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4701                                           afb->tiling_flags,
4702                                           &plane_info,
4703                                           &dc_plane_state->address,
4704                                           afb->tmz_surface,
4705                                           force_disable_dcc);
4706         if (ret)
4707                 return ret;
4708
4709         dc_plane_state->format = plane_info.format;
4710         dc_plane_state->color_space = plane_info.color_space;
4711         dc_plane_state->format = plane_info.format;
4712         dc_plane_state->plane_size = plane_info.plane_size;
4713         dc_plane_state->rotation = plane_info.rotation;
4714         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4715         dc_plane_state->stereo_format = plane_info.stereo_format;
4716         dc_plane_state->tiling_info = plane_info.tiling_info;
4717         dc_plane_state->visible = plane_info.visible;
4718         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4719         dc_plane_state->global_alpha = plane_info.global_alpha;
4720         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4721         dc_plane_state->dcc = plane_info.dcc;
4722         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4723
4724         /*
4725          * Always set input transfer function, since plane state is refreshed
4726          * every time.
4727          */
4728         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4729         if (ret)
4730                 return ret;
4731
4732         return 0;
4733 }
4734
4735 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4736                                            const struct dm_connector_state *dm_state,
4737                                            struct dc_stream_state *stream)
4738 {
4739         enum amdgpu_rmx_type rmx_type;
4740
4741         struct rect src = { 0 }; /* viewport in composition space*/
4742         struct rect dst = { 0 }; /* stream addressable area */
4743
4744         /* no mode. nothing to be done */
4745         if (!mode)
4746                 return;
4747
4748         /* Full screen scaling by default */
4749         src.width = mode->hdisplay;
4750         src.height = mode->vdisplay;
4751         dst.width = stream->timing.h_addressable;
4752         dst.height = stream->timing.v_addressable;
4753
4754         if (dm_state) {
4755                 rmx_type = dm_state->scaling;
4756                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4757                         if (src.width * dst.height <
4758                                         src.height * dst.width) {
4759                                 /* height needs less upscaling/more downscaling */
4760                                 dst.width = src.width *
4761                                                 dst.height / src.height;
4762                         } else {
4763                                 /* width needs less upscaling/more downscaling */
4764                                 dst.height = src.height *
4765                                                 dst.width / src.width;
4766                         }
4767                 } else if (rmx_type == RMX_CENTER) {
4768                         dst = src;
4769                 }
4770
4771                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4772                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4773
4774                 if (dm_state->underscan_enable) {
4775                         dst.x += dm_state->underscan_hborder / 2;
4776                         dst.y += dm_state->underscan_vborder / 2;
4777                         dst.width -= dm_state->underscan_hborder;
4778                         dst.height -= dm_state->underscan_vborder;
4779                 }
4780         }
4781
4782         stream->src = src;
4783         stream->dst = dst;
4784
4785         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4786                         dst.x, dst.y, dst.width, dst.height);
4787
4788 }
4789
4790 static enum dc_color_depth
4791 convert_color_depth_from_display_info(const struct drm_connector *connector,
4792                                       bool is_y420, int requested_bpc)
4793 {
4794         uint8_t bpc;
4795
4796         if (is_y420) {
4797                 bpc = 8;
4798
4799                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4800                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4801                         bpc = 16;
4802                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4803                         bpc = 12;
4804                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4805                         bpc = 10;
4806         } else {
4807                 bpc = (uint8_t)connector->display_info.bpc;
4808                 /* Assume 8 bpc by default if no bpc is specified. */
4809                 bpc = bpc ? bpc : 8;
4810         }
4811
4812         if (requested_bpc > 0) {
4813                 /*
4814                  * Cap display bpc based on the user requested value.
4815                  *
4816                  * The value for state->max_bpc may not correctly updated
4817                  * depending on when the connector gets added to the state
4818                  * or if this was called outside of atomic check, so it
4819                  * can't be used directly.
4820                  */
4821                 bpc = min_t(u8, bpc, requested_bpc);
4822
4823                 /* Round down to the nearest even number. */
4824                 bpc = bpc - (bpc & 1);
4825         }
4826
4827         switch (bpc) {
4828         case 0:
4829                 /*
4830                  * Temporary Work around, DRM doesn't parse color depth for
4831                  * EDID revision before 1.4
4832                  * TODO: Fix edid parsing
4833                  */
4834                 return COLOR_DEPTH_888;
4835         case 6:
4836                 return COLOR_DEPTH_666;
4837         case 8:
4838                 return COLOR_DEPTH_888;
4839         case 10:
4840                 return COLOR_DEPTH_101010;
4841         case 12:
4842                 return COLOR_DEPTH_121212;
4843         case 14:
4844                 return COLOR_DEPTH_141414;
4845         case 16:
4846                 return COLOR_DEPTH_161616;
4847         default:
4848                 return COLOR_DEPTH_UNDEFINED;
4849         }
4850 }
4851
4852 static enum dc_aspect_ratio
4853 get_aspect_ratio(const struct drm_display_mode *mode_in)
4854 {
4855         /* 1-1 mapping, since both enums follow the HDMI spec. */
4856         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4857 }
4858
4859 static enum dc_color_space
4860 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4861 {
4862         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4863
4864         switch (dc_crtc_timing->pixel_encoding) {
4865         case PIXEL_ENCODING_YCBCR422:
4866         case PIXEL_ENCODING_YCBCR444:
4867         case PIXEL_ENCODING_YCBCR420:
4868         {
4869                 /*
4870                  * 27030khz is the separation point between HDTV and SDTV
4871                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4872                  * respectively
4873                  */
4874                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4875                         if (dc_crtc_timing->flags.Y_ONLY)
4876                                 color_space =
4877                                         COLOR_SPACE_YCBCR709_LIMITED;
4878                         else
4879                                 color_space = COLOR_SPACE_YCBCR709;
4880                 } else {
4881                         if (dc_crtc_timing->flags.Y_ONLY)
4882                                 color_space =
4883                                         COLOR_SPACE_YCBCR601_LIMITED;
4884                         else
4885                                 color_space = COLOR_SPACE_YCBCR601;
4886                 }
4887
4888         }
4889         break;
4890         case PIXEL_ENCODING_RGB:
4891                 color_space = COLOR_SPACE_SRGB;
4892                 break;
4893
4894         default:
4895                 WARN_ON(1);
4896                 break;
4897         }
4898
4899         return color_space;
4900 }
4901
4902 static bool adjust_colour_depth_from_display_info(
4903         struct dc_crtc_timing *timing_out,
4904         const struct drm_display_info *info)
4905 {
4906         enum dc_color_depth depth = timing_out->display_color_depth;
4907         int normalized_clk;
4908         do {
4909                 normalized_clk = timing_out->pix_clk_100hz / 10;
4910                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4911                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4912                         normalized_clk /= 2;
4913                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4914                 switch (depth) {
4915                 case COLOR_DEPTH_888:
4916                         break;
4917                 case COLOR_DEPTH_101010:
4918                         normalized_clk = (normalized_clk * 30) / 24;
4919                         break;
4920                 case COLOR_DEPTH_121212:
4921                         normalized_clk = (normalized_clk * 36) / 24;
4922                         break;
4923                 case COLOR_DEPTH_161616:
4924                         normalized_clk = (normalized_clk * 48) / 24;
4925                         break;
4926                 default:
4927                         /* The above depths are the only ones valid for HDMI. */
4928                         return false;
4929                 }
4930                 if (normalized_clk <= info->max_tmds_clock) {
4931                         timing_out->display_color_depth = depth;
4932                         return true;
4933                 }
4934         } while (--depth > COLOR_DEPTH_666);
4935         return false;
4936 }
4937
4938 static void fill_stream_properties_from_drm_display_mode(
4939         struct dc_stream_state *stream,
4940         const struct drm_display_mode *mode_in,
4941         const struct drm_connector *connector,
4942         const struct drm_connector_state *connector_state,
4943         const struct dc_stream_state *old_stream,
4944         int requested_bpc)
4945 {
4946         struct dc_crtc_timing *timing_out = &stream->timing;
4947         const struct drm_display_info *info = &connector->display_info;
4948         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4949         struct hdmi_vendor_infoframe hv_frame;
4950         struct hdmi_avi_infoframe avi_frame;
4951
4952         memset(&hv_frame, 0, sizeof(hv_frame));
4953         memset(&avi_frame, 0, sizeof(avi_frame));
4954
4955         timing_out->h_border_left = 0;
4956         timing_out->h_border_right = 0;
4957         timing_out->v_border_top = 0;
4958         timing_out->v_border_bottom = 0;
4959         /* TODO: un-hardcode */
4960         if (drm_mode_is_420_only(info, mode_in)
4961                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4962                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4963         else if (drm_mode_is_420_also(info, mode_in)
4964                         && aconnector->force_yuv420_output)
4965                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4966         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4967                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4968                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4969         else
4970                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4971
4972         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4973         timing_out->display_color_depth = convert_color_depth_from_display_info(
4974                 connector,
4975                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4976                 requested_bpc);
4977         timing_out->scan_type = SCANNING_TYPE_NODATA;
4978         timing_out->hdmi_vic = 0;
4979
4980         if(old_stream) {
4981                 timing_out->vic = old_stream->timing.vic;
4982                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4983                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4984         } else {
4985                 timing_out->vic = drm_match_cea_mode(mode_in);
4986                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4987                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4988                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4989                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4990         }
4991
4992         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4993                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4994                 timing_out->vic = avi_frame.video_code;
4995                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4996                 timing_out->hdmi_vic = hv_frame.vic;
4997         }
4998
4999         timing_out->h_addressable = mode_in->crtc_hdisplay;
5000         timing_out->h_total = mode_in->crtc_htotal;
5001         timing_out->h_sync_width =
5002                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5003         timing_out->h_front_porch =
5004                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5005         timing_out->v_total = mode_in->crtc_vtotal;
5006         timing_out->v_addressable = mode_in->crtc_vdisplay;
5007         timing_out->v_front_porch =
5008                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5009         timing_out->v_sync_width =
5010                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5011         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5012         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5013
5014         stream->output_color_space = get_output_color_space(timing_out);
5015
5016         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5017         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5018         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5019                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5020                     drm_mode_is_420_also(info, mode_in) &&
5021                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5022                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5023                         adjust_colour_depth_from_display_info(timing_out, info);
5024                 }
5025         }
5026 }
5027
5028 static void fill_audio_info(struct audio_info *audio_info,
5029                             const struct drm_connector *drm_connector,
5030                             const struct dc_sink *dc_sink)
5031 {
5032         int i = 0;
5033         int cea_revision = 0;
5034         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5035
5036         audio_info->manufacture_id = edid_caps->manufacturer_id;
5037         audio_info->product_id = edid_caps->product_id;
5038
5039         cea_revision = drm_connector->display_info.cea_rev;
5040
5041         strscpy(audio_info->display_name,
5042                 edid_caps->display_name,
5043                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5044
5045         if (cea_revision >= 3) {
5046                 audio_info->mode_count = edid_caps->audio_mode_count;
5047
5048                 for (i = 0; i < audio_info->mode_count; ++i) {
5049                         audio_info->modes[i].format_code =
5050                                         (enum audio_format_code)
5051                                         (edid_caps->audio_modes[i].format_code);
5052                         audio_info->modes[i].channel_count =
5053                                         edid_caps->audio_modes[i].channel_count;
5054                         audio_info->modes[i].sample_rates.all =
5055                                         edid_caps->audio_modes[i].sample_rate;
5056                         audio_info->modes[i].sample_size =
5057                                         edid_caps->audio_modes[i].sample_size;
5058                 }
5059         }
5060
5061         audio_info->flags.all = edid_caps->speaker_flags;
5062
5063         /* TODO: We only check for the progressive mode, check for interlace mode too */
5064         if (drm_connector->latency_present[0]) {
5065                 audio_info->video_latency = drm_connector->video_latency[0];
5066                 audio_info->audio_latency = drm_connector->audio_latency[0];
5067         }
5068
5069         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5070
5071 }
5072
5073 static void
5074 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5075                                       struct drm_display_mode *dst_mode)
5076 {
5077         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5078         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5079         dst_mode->crtc_clock = src_mode->crtc_clock;
5080         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5081         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5082         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5083         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5084         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5085         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5086         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5087         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5088         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5089         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5090         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5091 }
5092
5093 static void
5094 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5095                                         const struct drm_display_mode *native_mode,
5096                                         bool scale_enabled)
5097 {
5098         if (scale_enabled) {
5099                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5100         } else if (native_mode->clock == drm_mode->clock &&
5101                         native_mode->htotal == drm_mode->htotal &&
5102                         native_mode->vtotal == drm_mode->vtotal) {
5103                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5104         } else {
5105                 /* no scaling nor amdgpu inserted, no need to patch */
5106         }
5107 }
5108
5109 static struct dc_sink *
5110 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5111 {
5112         struct dc_sink_init_data sink_init_data = { 0 };
5113         struct dc_sink *sink = NULL;
5114         sink_init_data.link = aconnector->dc_link;
5115         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5116
5117         sink = dc_sink_create(&sink_init_data);
5118         if (!sink) {
5119                 DRM_ERROR("Failed to create sink!\n");
5120                 return NULL;
5121         }
5122         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5123
5124         return sink;
5125 }
5126
5127 static void set_multisync_trigger_params(
5128                 struct dc_stream_state *stream)
5129 {
5130         if (stream->triggered_crtc_reset.enabled) {
5131                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5132                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5133         }
5134 }
5135
5136 static void set_master_stream(struct dc_stream_state *stream_set[],
5137                               int stream_count)
5138 {
5139         int j, highest_rfr = 0, master_stream = 0;
5140
5141         for (j = 0;  j < stream_count; j++) {
5142                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5143                         int refresh_rate = 0;
5144
5145                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5146                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5147                         if (refresh_rate > highest_rfr) {
5148                                 highest_rfr = refresh_rate;
5149                                 master_stream = j;
5150                         }
5151                 }
5152         }
5153         for (j = 0;  j < stream_count; j++) {
5154                 if (stream_set[j])
5155                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5156         }
5157 }
5158
5159 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5160 {
5161         int i = 0;
5162
5163         if (context->stream_count < 2)
5164                 return;
5165         for (i = 0; i < context->stream_count ; i++) {
5166                 if (!context->streams[i])
5167                         continue;
5168                 /*
5169                  * TODO: add a function to read AMD VSDB bits and set
5170                  * crtc_sync_master.multi_sync_enabled flag
5171                  * For now it's set to false
5172                  */
5173                 set_multisync_trigger_params(context->streams[i]);
5174         }
5175         set_master_stream(context->streams, context->stream_count);
5176 }
5177
5178 static struct dc_stream_state *
5179 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5180                        const struct drm_display_mode *drm_mode,
5181                        const struct dm_connector_state *dm_state,
5182                        const struct dc_stream_state *old_stream,
5183                        int requested_bpc)
5184 {
5185         struct drm_display_mode *preferred_mode = NULL;
5186         struct drm_connector *drm_connector;
5187         const struct drm_connector_state *con_state =
5188                 dm_state ? &dm_state->base : NULL;
5189         struct dc_stream_state *stream = NULL;
5190         struct drm_display_mode mode = *drm_mode;
5191         bool native_mode_found = false;
5192         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5193         int mode_refresh;
5194         int preferred_refresh = 0;
5195 #if defined(CONFIG_DRM_AMD_DC_DCN)
5196         struct dsc_dec_dpcd_caps dsc_caps;
5197         uint32_t link_bandwidth_kbps;
5198 #endif
5199         struct dc_sink *sink = NULL;
5200         if (aconnector == NULL) {
5201                 DRM_ERROR("aconnector is NULL!\n");
5202                 return stream;
5203         }
5204
5205         drm_connector = &aconnector->base;
5206
5207         if (!aconnector->dc_sink) {
5208                 sink = create_fake_sink(aconnector);
5209                 if (!sink)
5210                         return stream;
5211         } else {
5212                 sink = aconnector->dc_sink;
5213                 dc_sink_retain(sink);
5214         }
5215
5216         stream = dc_create_stream_for_sink(sink);
5217
5218         if (stream == NULL) {
5219                 DRM_ERROR("Failed to create stream for sink!\n");
5220                 goto finish;
5221         }
5222
5223         stream->dm_stream_context = aconnector;
5224
5225         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5226                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5227
5228         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5229                 /* Search for preferred mode */
5230                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5231                         native_mode_found = true;
5232                         break;
5233                 }
5234         }
5235         if (!native_mode_found)
5236                 preferred_mode = list_first_entry_or_null(
5237                                 &aconnector->base.modes,
5238                                 struct drm_display_mode,
5239                                 head);
5240
5241         mode_refresh = drm_mode_vrefresh(&mode);
5242
5243         if (preferred_mode == NULL) {
5244                 /*
5245                  * This may not be an error, the use case is when we have no
5246                  * usermode calls to reset and set mode upon hotplug. In this
5247                  * case, we call set mode ourselves to restore the previous mode
5248                  * and the modelist may not be filled in in time.
5249                  */
5250                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5251         } else {
5252                 decide_crtc_timing_for_drm_display_mode(
5253                                 &mode, preferred_mode,
5254                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5255                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5256         }
5257
5258         if (!dm_state)
5259                 drm_mode_set_crtcinfo(&mode, 0);
5260
5261         /*
5262         * If scaling is enabled and refresh rate didn't change
5263         * we copy the vic and polarities of the old timings
5264         */
5265         if (!scale || mode_refresh != preferred_refresh)
5266                 fill_stream_properties_from_drm_display_mode(stream,
5267                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
5268         else
5269                 fill_stream_properties_from_drm_display_mode(stream,
5270                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5271
5272         stream->timing.flags.DSC = 0;
5273
5274         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5275 #if defined(CONFIG_DRM_AMD_DC_DCN)
5276                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5277                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5278                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5279                                       &dsc_caps);
5280                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5281                                                              dc_link_get_link_cap(aconnector->dc_link));
5282
5283                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5284                         /* Set DSC policy according to dsc_clock_en */
5285                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5286                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5287
5288                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5289                                                   &dsc_caps,
5290                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5291                                                   0,
5292                                                   link_bandwidth_kbps,
5293                                                   &stream->timing,
5294                                                   &stream->timing.dsc_cfg))
5295                                 stream->timing.flags.DSC = 1;
5296                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5297                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5298                                 stream->timing.flags.DSC = 1;
5299
5300                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5301                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5302
5303                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5304                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5305
5306                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5307                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5308                 }
5309 #endif
5310         }
5311
5312         update_stream_scaling_settings(&mode, dm_state, stream);
5313
5314         fill_audio_info(
5315                 &stream->audio_info,
5316                 drm_connector,
5317                 sink);
5318
5319         update_stream_signal(stream, sink);
5320
5321         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5322                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5323
5324         if (stream->link->psr_settings.psr_feature_enabled) {
5325                 //
5326                 // should decide stream support vsc sdp colorimetry capability
5327                 // before building vsc info packet
5328                 //
5329                 stream->use_vsc_sdp_for_colorimetry = false;
5330                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5331                         stream->use_vsc_sdp_for_colorimetry =
5332                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5333                 } else {
5334                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5335                                 stream->use_vsc_sdp_for_colorimetry = true;
5336                 }
5337                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5338         }
5339 finish:
5340         dc_sink_release(sink);
5341
5342         return stream;
5343 }
5344
5345 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5346 {
5347         drm_crtc_cleanup(crtc);
5348         kfree(crtc);
5349 }
5350
5351 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5352                                   struct drm_crtc_state *state)
5353 {
5354         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5355
5356         /* TODO Destroy dc_stream objects are stream object is flattened */
5357         if (cur->stream)
5358                 dc_stream_release(cur->stream);
5359
5360
5361         __drm_atomic_helper_crtc_destroy_state(state);
5362
5363
5364         kfree(state);
5365 }
5366
5367 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5368 {
5369         struct dm_crtc_state *state;
5370
5371         if (crtc->state)
5372                 dm_crtc_destroy_state(crtc, crtc->state);
5373
5374         state = kzalloc(sizeof(*state), GFP_KERNEL);
5375         if (WARN_ON(!state))
5376                 return;
5377
5378         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5379 }
5380
5381 static struct drm_crtc_state *
5382 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5383 {
5384         struct dm_crtc_state *state, *cur;
5385
5386         cur = to_dm_crtc_state(crtc->state);
5387
5388         if (WARN_ON(!crtc->state))
5389                 return NULL;
5390
5391         state = kzalloc(sizeof(*state), GFP_KERNEL);
5392         if (!state)
5393                 return NULL;
5394
5395         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5396
5397         if (cur->stream) {
5398                 state->stream = cur->stream;
5399                 dc_stream_retain(state->stream);
5400         }
5401
5402         state->active_planes = cur->active_planes;
5403         state->vrr_infopacket = cur->vrr_infopacket;
5404         state->abm_level = cur->abm_level;
5405         state->vrr_supported = cur->vrr_supported;
5406         state->freesync_config = cur->freesync_config;
5407         state->crc_src = cur->crc_src;
5408         state->cm_has_degamma = cur->cm_has_degamma;
5409         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5410
5411         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5412
5413         return &state->base;
5414 }
5415
5416 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5417 {
5418         enum dc_irq_source irq_source;
5419         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5420         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5421         int rc;
5422
5423         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5424
5425         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5426
5427         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5428                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5429         return rc;
5430 }
5431
5432 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5433 {
5434         enum dc_irq_source irq_source;
5435         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5436         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5437         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5438 #if defined(CONFIG_DRM_AMD_DC_DCN)
5439         struct amdgpu_display_manager *dm = &adev->dm;
5440         unsigned long flags;
5441 #endif
5442         int rc = 0;
5443
5444         if (enable) {
5445                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5446                 if (amdgpu_dm_vrr_active(acrtc_state))
5447                         rc = dm_set_vupdate_irq(crtc, true);
5448         } else {
5449                 /* vblank irq off -> vupdate irq off */
5450                 rc = dm_set_vupdate_irq(crtc, false);
5451         }
5452
5453         if (rc)
5454                 return rc;
5455
5456         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5457
5458         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5459                 return -EBUSY;
5460
5461         if (amdgpu_in_reset(adev))
5462                 return 0;
5463
5464 #if defined(CONFIG_DRM_AMD_DC_DCN)
5465         spin_lock_irqsave(&dm->vblank_lock, flags);
5466         dm->vblank_workqueue->dm = dm;
5467         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5468         dm->vblank_workqueue->enable = enable;
5469         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5470         schedule_work(&dm->vblank_workqueue->mall_work);
5471 #endif
5472
5473         return 0;
5474 }
5475
5476 static int dm_enable_vblank(struct drm_crtc *crtc)
5477 {
5478         return dm_set_vblank(crtc, true);
5479 }
5480
5481 static void dm_disable_vblank(struct drm_crtc *crtc)
5482 {
5483         dm_set_vblank(crtc, false);
5484 }
5485
5486 /* Implemented only the options currently availible for the driver */
5487 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5488         .reset = dm_crtc_reset_state,
5489         .destroy = amdgpu_dm_crtc_destroy,
5490         .set_config = drm_atomic_helper_set_config,
5491         .page_flip = drm_atomic_helper_page_flip,
5492         .atomic_duplicate_state = dm_crtc_duplicate_state,
5493         .atomic_destroy_state = dm_crtc_destroy_state,
5494         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5495         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5496         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5497         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5498         .enable_vblank = dm_enable_vblank,
5499         .disable_vblank = dm_disable_vblank,
5500         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5501 };
5502
5503 static enum drm_connector_status
5504 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5505 {
5506         bool connected;
5507         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5508
5509         /*
5510          * Notes:
5511          * 1. This interface is NOT called in context of HPD irq.
5512          * 2. This interface *is called* in context of user-mode ioctl. Which
5513          * makes it a bad place for *any* MST-related activity.
5514          */
5515
5516         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5517             !aconnector->fake_enable)
5518                 connected = (aconnector->dc_sink != NULL);
5519         else
5520                 connected = (aconnector->base.force == DRM_FORCE_ON);
5521
5522         update_subconnector_property(aconnector);
5523
5524         return (connected ? connector_status_connected :
5525                         connector_status_disconnected);
5526 }
5527
5528 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5529                                             struct drm_connector_state *connector_state,
5530                                             struct drm_property *property,
5531                                             uint64_t val)
5532 {
5533         struct drm_device *dev = connector->dev;
5534         struct amdgpu_device *adev = drm_to_adev(dev);
5535         struct dm_connector_state *dm_old_state =
5536                 to_dm_connector_state(connector->state);
5537         struct dm_connector_state *dm_new_state =
5538                 to_dm_connector_state(connector_state);
5539
5540         int ret = -EINVAL;
5541
5542         if (property == dev->mode_config.scaling_mode_property) {
5543                 enum amdgpu_rmx_type rmx_type;
5544
5545                 switch (val) {
5546                 case DRM_MODE_SCALE_CENTER:
5547                         rmx_type = RMX_CENTER;
5548                         break;
5549                 case DRM_MODE_SCALE_ASPECT:
5550                         rmx_type = RMX_ASPECT;
5551                         break;
5552                 case DRM_MODE_SCALE_FULLSCREEN:
5553                         rmx_type = RMX_FULL;
5554                         break;
5555                 case DRM_MODE_SCALE_NONE:
5556                 default:
5557                         rmx_type = RMX_OFF;
5558                         break;
5559                 }
5560
5561                 if (dm_old_state->scaling == rmx_type)
5562                         return 0;
5563
5564                 dm_new_state->scaling = rmx_type;
5565                 ret = 0;
5566         } else if (property == adev->mode_info.underscan_hborder_property) {
5567                 dm_new_state->underscan_hborder = val;
5568                 ret = 0;
5569         } else if (property == adev->mode_info.underscan_vborder_property) {
5570                 dm_new_state->underscan_vborder = val;
5571                 ret = 0;
5572         } else if (property == adev->mode_info.underscan_property) {
5573                 dm_new_state->underscan_enable = val;
5574                 ret = 0;
5575         } else if (property == adev->mode_info.abm_level_property) {
5576                 dm_new_state->abm_level = val;
5577                 ret = 0;
5578         }
5579
5580         return ret;
5581 }
5582
5583 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5584                                             const struct drm_connector_state *state,
5585                                             struct drm_property *property,
5586                                             uint64_t *val)
5587 {
5588         struct drm_device *dev = connector->dev;
5589         struct amdgpu_device *adev = drm_to_adev(dev);
5590         struct dm_connector_state *dm_state =
5591                 to_dm_connector_state(state);
5592         int ret = -EINVAL;
5593
5594         if (property == dev->mode_config.scaling_mode_property) {
5595                 switch (dm_state->scaling) {
5596                 case RMX_CENTER:
5597                         *val = DRM_MODE_SCALE_CENTER;
5598                         break;
5599                 case RMX_ASPECT:
5600                         *val = DRM_MODE_SCALE_ASPECT;
5601                         break;
5602                 case RMX_FULL:
5603                         *val = DRM_MODE_SCALE_FULLSCREEN;
5604                         break;
5605                 case RMX_OFF:
5606                 default:
5607                         *val = DRM_MODE_SCALE_NONE;
5608                         break;
5609                 }
5610                 ret = 0;
5611         } else if (property == adev->mode_info.underscan_hborder_property) {
5612                 *val = dm_state->underscan_hborder;
5613                 ret = 0;
5614         } else if (property == adev->mode_info.underscan_vborder_property) {
5615                 *val = dm_state->underscan_vborder;
5616                 ret = 0;
5617         } else if (property == adev->mode_info.underscan_property) {
5618                 *val = dm_state->underscan_enable;
5619                 ret = 0;
5620         } else if (property == adev->mode_info.abm_level_property) {
5621                 *val = dm_state->abm_level;
5622                 ret = 0;
5623         }
5624
5625         return ret;
5626 }
5627
5628 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5629 {
5630         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5631
5632         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5633 }
5634
5635 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5636 {
5637         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5638         const struct dc_link *link = aconnector->dc_link;
5639         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5640         struct amdgpu_display_manager *dm = &adev->dm;
5641
5642         /*
5643          * Call only if mst_mgr was iniitalized before since it's not done
5644          * for all connector types.
5645          */
5646         if (aconnector->mst_mgr.dev)
5647                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5648
5649 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5650         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5651
5652         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5653             link->type != dc_connection_none &&
5654             dm->backlight_dev) {
5655                 backlight_device_unregister(dm->backlight_dev);
5656                 dm->backlight_dev = NULL;
5657         }
5658 #endif
5659
5660         if (aconnector->dc_em_sink)
5661                 dc_sink_release(aconnector->dc_em_sink);
5662         aconnector->dc_em_sink = NULL;
5663         if (aconnector->dc_sink)
5664                 dc_sink_release(aconnector->dc_sink);
5665         aconnector->dc_sink = NULL;
5666
5667         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5668         drm_connector_unregister(connector);
5669         drm_connector_cleanup(connector);
5670         if (aconnector->i2c) {
5671                 i2c_del_adapter(&aconnector->i2c->base);
5672                 kfree(aconnector->i2c);
5673         }
5674         kfree(aconnector->dm_dp_aux.aux.name);
5675
5676         kfree(connector);
5677 }
5678
5679 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5680 {
5681         struct dm_connector_state *state =
5682                 to_dm_connector_state(connector->state);
5683
5684         if (connector->state)
5685                 __drm_atomic_helper_connector_destroy_state(connector->state);
5686
5687         kfree(state);
5688
5689         state = kzalloc(sizeof(*state), GFP_KERNEL);
5690
5691         if (state) {
5692                 state->scaling = RMX_OFF;
5693                 state->underscan_enable = false;
5694                 state->underscan_hborder = 0;
5695                 state->underscan_vborder = 0;
5696                 state->base.max_requested_bpc = 8;
5697                 state->vcpi_slots = 0;
5698                 state->pbn = 0;
5699                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5700                         state->abm_level = amdgpu_dm_abm_level;
5701
5702                 __drm_atomic_helper_connector_reset(connector, &state->base);
5703         }
5704 }
5705
5706 struct drm_connector_state *
5707 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5708 {
5709         struct dm_connector_state *state =
5710                 to_dm_connector_state(connector->state);
5711
5712         struct dm_connector_state *new_state =
5713                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5714
5715         if (!new_state)
5716                 return NULL;
5717
5718         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5719
5720         new_state->freesync_capable = state->freesync_capable;
5721         new_state->abm_level = state->abm_level;
5722         new_state->scaling = state->scaling;
5723         new_state->underscan_enable = state->underscan_enable;
5724         new_state->underscan_hborder = state->underscan_hborder;
5725         new_state->underscan_vborder = state->underscan_vborder;
5726         new_state->vcpi_slots = state->vcpi_slots;
5727         new_state->pbn = state->pbn;
5728         return &new_state->base;
5729 }
5730
5731 static int
5732 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5733 {
5734         struct amdgpu_dm_connector *amdgpu_dm_connector =
5735                 to_amdgpu_dm_connector(connector);
5736         int r;
5737
5738         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5739             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5740                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5741                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5742                 if (r)
5743                         return r;
5744         }
5745
5746 #if defined(CONFIG_DEBUG_FS)
5747         connector_debugfs_init(amdgpu_dm_connector);
5748 #endif
5749
5750         return 0;
5751 }
5752
5753 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5754         .reset = amdgpu_dm_connector_funcs_reset,
5755         .detect = amdgpu_dm_connector_detect,
5756         .fill_modes = drm_helper_probe_single_connector_modes,
5757         .destroy = amdgpu_dm_connector_destroy,
5758         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5759         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5760         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5761         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5762         .late_register = amdgpu_dm_connector_late_register,
5763         .early_unregister = amdgpu_dm_connector_unregister
5764 };
5765
5766 static int get_modes(struct drm_connector *connector)
5767 {
5768         return amdgpu_dm_connector_get_modes(connector);
5769 }
5770
5771 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5772 {
5773         struct dc_sink_init_data init_params = {
5774                         .link = aconnector->dc_link,
5775                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5776         };
5777         struct edid *edid;
5778
5779         if (!aconnector->base.edid_blob_ptr) {
5780                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5781                                 aconnector->base.name);
5782
5783                 aconnector->base.force = DRM_FORCE_OFF;
5784                 aconnector->base.override_edid = false;
5785                 return;
5786         }
5787
5788         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5789
5790         aconnector->edid = edid;
5791
5792         aconnector->dc_em_sink = dc_link_add_remote_sink(
5793                 aconnector->dc_link,
5794                 (uint8_t *)edid,
5795                 (edid->extensions + 1) * EDID_LENGTH,
5796                 &init_params);
5797
5798         if (aconnector->base.force == DRM_FORCE_ON) {
5799                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5800                 aconnector->dc_link->local_sink :
5801                 aconnector->dc_em_sink;
5802                 dc_sink_retain(aconnector->dc_sink);
5803         }
5804 }
5805
5806 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5807 {
5808         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5809
5810         /*
5811          * In case of headless boot with force on for DP managed connector
5812          * Those settings have to be != 0 to get initial modeset
5813          */
5814         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5815                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5816                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5817         }
5818
5819
5820         aconnector->base.override_edid = true;
5821         create_eml_sink(aconnector);
5822 }
5823
5824 static struct dc_stream_state *
5825 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5826                                 const struct drm_display_mode *drm_mode,
5827                                 const struct dm_connector_state *dm_state,
5828                                 const struct dc_stream_state *old_stream)
5829 {
5830         struct drm_connector *connector = &aconnector->base;
5831         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5832         struct dc_stream_state *stream;
5833         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5834         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5835         enum dc_status dc_result = DC_OK;
5836
5837         do {
5838                 stream = create_stream_for_sink(aconnector, drm_mode,
5839                                                 dm_state, old_stream,
5840                                                 requested_bpc);
5841                 if (stream == NULL) {
5842                         DRM_ERROR("Failed to create stream for sink!\n");
5843                         break;
5844                 }
5845
5846                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5847
5848                 if (dc_result != DC_OK) {
5849                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5850                                       drm_mode->hdisplay,
5851                                       drm_mode->vdisplay,
5852                                       drm_mode->clock,
5853                                       dc_result,
5854                                       dc_status_to_str(dc_result));
5855
5856                         dc_stream_release(stream);
5857                         stream = NULL;
5858                         requested_bpc -= 2; /* lower bpc to retry validation */
5859                 }
5860
5861         } while (stream == NULL && requested_bpc >= 6);
5862
5863         return stream;
5864 }
5865
5866 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5867                                    struct drm_display_mode *mode)
5868 {
5869         int result = MODE_ERROR;
5870         struct dc_sink *dc_sink;
5871         /* TODO: Unhardcode stream count */
5872         struct dc_stream_state *stream;
5873         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5874
5875         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5876                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5877                 return result;
5878
5879         /*
5880          * Only run this the first time mode_valid is called to initilialize
5881          * EDID mgmt
5882          */
5883         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5884                 !aconnector->dc_em_sink)
5885                 handle_edid_mgmt(aconnector);
5886
5887         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5888
5889         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5890                                 aconnector->base.force != DRM_FORCE_ON) {
5891                 DRM_ERROR("dc_sink is NULL!\n");
5892                 goto fail;
5893         }
5894
5895         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5896         if (stream) {
5897                 dc_stream_release(stream);
5898                 result = MODE_OK;
5899         }
5900
5901 fail:
5902         /* TODO: error handling*/
5903         return result;
5904 }
5905
5906 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5907                                 struct dc_info_packet *out)
5908 {
5909         struct hdmi_drm_infoframe frame;
5910         unsigned char buf[30]; /* 26 + 4 */
5911         ssize_t len;
5912         int ret, i;
5913
5914         memset(out, 0, sizeof(*out));
5915
5916         if (!state->hdr_output_metadata)
5917                 return 0;
5918
5919         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5920         if (ret)
5921                 return ret;
5922
5923         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5924         if (len < 0)
5925                 return (int)len;
5926
5927         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5928         if (len != 30)
5929                 return -EINVAL;
5930
5931         /* Prepare the infopacket for DC. */
5932         switch (state->connector->connector_type) {
5933         case DRM_MODE_CONNECTOR_HDMIA:
5934                 out->hb0 = 0x87; /* type */
5935                 out->hb1 = 0x01; /* version */
5936                 out->hb2 = 0x1A; /* length */
5937                 out->sb[0] = buf[3]; /* checksum */
5938                 i = 1;
5939                 break;
5940
5941         case DRM_MODE_CONNECTOR_DisplayPort:
5942         case DRM_MODE_CONNECTOR_eDP:
5943                 out->hb0 = 0x00; /* sdp id, zero */
5944                 out->hb1 = 0x87; /* type */
5945                 out->hb2 = 0x1D; /* payload len - 1 */
5946                 out->hb3 = (0x13 << 2); /* sdp version */
5947                 out->sb[0] = 0x01; /* version */
5948                 out->sb[1] = 0x1A; /* length */
5949                 i = 2;
5950                 break;
5951
5952         default:
5953                 return -EINVAL;
5954         }
5955
5956         memcpy(&out->sb[i], &buf[4], 26);
5957         out->valid = true;
5958
5959         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5960                        sizeof(out->sb), false);
5961
5962         return 0;
5963 }
5964
5965 static bool
5966 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5967                           const struct drm_connector_state *new_state)
5968 {
5969         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5970         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5971
5972         if (old_blob != new_blob) {
5973                 if (old_blob && new_blob &&
5974                     old_blob->length == new_blob->length)
5975                         return memcmp(old_blob->data, new_blob->data,
5976                                       old_blob->length);
5977
5978                 return true;
5979         }
5980
5981         return false;
5982 }
5983
5984 static int
5985 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5986                                  struct drm_atomic_state *state)
5987 {
5988         struct drm_connector_state *new_con_state =
5989                 drm_atomic_get_new_connector_state(state, conn);
5990         struct drm_connector_state *old_con_state =
5991                 drm_atomic_get_old_connector_state(state, conn);
5992         struct drm_crtc *crtc = new_con_state->crtc;
5993         struct drm_crtc_state *new_crtc_state;
5994         int ret;
5995
5996         trace_amdgpu_dm_connector_atomic_check(new_con_state);
5997
5998         if (!crtc)
5999                 return 0;
6000
6001         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6002                 struct dc_info_packet hdr_infopacket;
6003
6004                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6005                 if (ret)
6006                         return ret;
6007
6008                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6009                 if (IS_ERR(new_crtc_state))
6010                         return PTR_ERR(new_crtc_state);
6011
6012                 /*
6013                  * DC considers the stream backends changed if the
6014                  * static metadata changes. Forcing the modeset also
6015                  * gives a simple way for userspace to switch from
6016                  * 8bpc to 10bpc when setting the metadata to enter
6017                  * or exit HDR.
6018                  *
6019                  * Changing the static metadata after it's been
6020                  * set is permissible, however. So only force a
6021                  * modeset if we're entering or exiting HDR.
6022                  */
6023                 new_crtc_state->mode_changed =
6024                         !old_con_state->hdr_output_metadata ||
6025                         !new_con_state->hdr_output_metadata;
6026         }
6027
6028         return 0;
6029 }
6030
6031 static const struct drm_connector_helper_funcs
6032 amdgpu_dm_connector_helper_funcs = {
6033         /*
6034          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6035          * modes will be filtered by drm_mode_validate_size(), and those modes
6036          * are missing after user start lightdm. So we need to renew modes list.
6037          * in get_modes call back, not just return the modes count
6038          */
6039         .get_modes = get_modes,
6040         .mode_valid = amdgpu_dm_connector_mode_valid,
6041         .atomic_check = amdgpu_dm_connector_atomic_check,
6042 };
6043
6044 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6045 {
6046 }
6047
6048 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6049 {
6050         struct drm_atomic_state *state = new_crtc_state->state;
6051         struct drm_plane *plane;
6052         int num_active = 0;
6053
6054         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6055                 struct drm_plane_state *new_plane_state;
6056
6057                 /* Cursor planes are "fake". */
6058                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6059                         continue;
6060
6061                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6062
6063                 if (!new_plane_state) {
6064                         /*
6065                          * The plane is enable on the CRTC and hasn't changed
6066                          * state. This means that it previously passed
6067                          * validation and is therefore enabled.
6068                          */
6069                         num_active += 1;
6070                         continue;
6071                 }
6072
6073                 /* We need a framebuffer to be considered enabled. */
6074                 num_active += (new_plane_state->fb != NULL);
6075         }
6076
6077         return num_active;
6078 }
6079
6080 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6081                                          struct drm_crtc_state *new_crtc_state)
6082 {
6083         struct dm_crtc_state *dm_new_crtc_state =
6084                 to_dm_crtc_state(new_crtc_state);
6085
6086         dm_new_crtc_state->active_planes = 0;
6087
6088         if (!dm_new_crtc_state->stream)
6089                 return;
6090
6091         dm_new_crtc_state->active_planes =
6092                 count_crtc_active_planes(new_crtc_state);
6093 }
6094
6095 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6096                                        struct drm_atomic_state *state)
6097 {
6098         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6099                                                                           crtc);
6100         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6101         struct dc *dc = adev->dm.dc;
6102         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6103         int ret = -EINVAL;
6104
6105         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6106
6107         dm_update_crtc_active_planes(crtc, crtc_state);
6108
6109         if (unlikely(!dm_crtc_state->stream &&
6110                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6111                 WARN_ON(1);
6112                 return ret;
6113         }
6114
6115         /*
6116          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6117          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6118          * planes are disabled, which is not supported by the hardware. And there is legacy
6119          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6120          */
6121         if (crtc_state->enable &&
6122             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6123                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6124                 return -EINVAL;
6125         }
6126
6127         /* In some use cases, like reset, no stream is attached */
6128         if (!dm_crtc_state->stream)
6129                 return 0;
6130
6131         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6132                 return 0;
6133
6134         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6135         return ret;
6136 }
6137
6138 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6139                                       const struct drm_display_mode *mode,
6140                                       struct drm_display_mode *adjusted_mode)
6141 {
6142         return true;
6143 }
6144
6145 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6146         .disable = dm_crtc_helper_disable,
6147         .atomic_check = dm_crtc_helper_atomic_check,
6148         .mode_fixup = dm_crtc_helper_mode_fixup,
6149         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6150 };
6151
6152 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6153 {
6154
6155 }
6156
6157 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6158 {
6159         switch (display_color_depth) {
6160                 case COLOR_DEPTH_666:
6161                         return 6;
6162                 case COLOR_DEPTH_888:
6163                         return 8;
6164                 case COLOR_DEPTH_101010:
6165                         return 10;
6166                 case COLOR_DEPTH_121212:
6167                         return 12;
6168                 case COLOR_DEPTH_141414:
6169                         return 14;
6170                 case COLOR_DEPTH_161616:
6171                         return 16;
6172                 default:
6173                         break;
6174                 }
6175         return 0;
6176 }
6177
6178 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6179                                           struct drm_crtc_state *crtc_state,
6180                                           struct drm_connector_state *conn_state)
6181 {
6182         struct drm_atomic_state *state = crtc_state->state;
6183         struct drm_connector *connector = conn_state->connector;
6184         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6185         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6186         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6187         struct drm_dp_mst_topology_mgr *mst_mgr;
6188         struct drm_dp_mst_port *mst_port;
6189         enum dc_color_depth color_depth;
6190         int clock, bpp = 0;
6191         bool is_y420 = false;
6192
6193         if (!aconnector->port || !aconnector->dc_sink)
6194                 return 0;
6195
6196         mst_port = aconnector->port;
6197         mst_mgr = &aconnector->mst_port->mst_mgr;
6198
6199         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6200                 return 0;
6201
6202         if (!state->duplicated) {
6203                 int max_bpc = conn_state->max_requested_bpc;
6204                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6205                                 aconnector->force_yuv420_output;
6206                 color_depth = convert_color_depth_from_display_info(connector,
6207                                                                     is_y420,
6208                                                                     max_bpc);
6209                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6210                 clock = adjusted_mode->clock;
6211                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6212         }
6213         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6214                                                                            mst_mgr,
6215                                                                            mst_port,
6216                                                                            dm_new_connector_state->pbn,
6217                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6218         if (dm_new_connector_state->vcpi_slots < 0) {
6219                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6220                 return dm_new_connector_state->vcpi_slots;
6221         }
6222         return 0;
6223 }
6224
6225 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6226         .disable = dm_encoder_helper_disable,
6227         .atomic_check = dm_encoder_helper_atomic_check
6228 };
6229
6230 #if defined(CONFIG_DRM_AMD_DC_DCN)
6231 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6232                                             struct dc_state *dc_state)
6233 {
6234         struct dc_stream_state *stream = NULL;
6235         struct drm_connector *connector;
6236         struct drm_connector_state *new_con_state, *old_con_state;
6237         struct amdgpu_dm_connector *aconnector;
6238         struct dm_connector_state *dm_conn_state;
6239         int i, j, clock, bpp;
6240         int vcpi, pbn_div, pbn = 0;
6241
6242         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6243
6244                 aconnector = to_amdgpu_dm_connector(connector);
6245
6246                 if (!aconnector->port)
6247                         continue;
6248
6249                 if (!new_con_state || !new_con_state->crtc)
6250                         continue;
6251
6252                 dm_conn_state = to_dm_connector_state(new_con_state);
6253
6254                 for (j = 0; j < dc_state->stream_count; j++) {
6255                         stream = dc_state->streams[j];
6256                         if (!stream)
6257                                 continue;
6258
6259                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6260                                 break;
6261
6262                         stream = NULL;
6263                 }
6264
6265                 if (!stream)
6266                         continue;
6267
6268                 if (stream->timing.flags.DSC != 1) {
6269                         drm_dp_mst_atomic_enable_dsc(state,
6270                                                      aconnector->port,
6271                                                      dm_conn_state->pbn,
6272                                                      0,
6273                                                      false);
6274                         continue;
6275                 }
6276
6277                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6278                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6279                 clock = stream->timing.pix_clk_100hz / 10;
6280                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6281                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6282                                                     aconnector->port,
6283                                                     pbn, pbn_div,
6284                                                     true);
6285                 if (vcpi < 0)
6286                         return vcpi;
6287
6288                 dm_conn_state->pbn = pbn;
6289                 dm_conn_state->vcpi_slots = vcpi;
6290         }
6291         return 0;
6292 }
6293 #endif
6294
6295 static void dm_drm_plane_reset(struct drm_plane *plane)
6296 {
6297         struct dm_plane_state *amdgpu_state = NULL;
6298
6299         if (plane->state)
6300                 plane->funcs->atomic_destroy_state(plane, plane->state);
6301
6302         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6303         WARN_ON(amdgpu_state == NULL);
6304
6305         if (amdgpu_state)
6306                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6307 }
6308
6309 static struct drm_plane_state *
6310 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6311 {
6312         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6313
6314         old_dm_plane_state = to_dm_plane_state(plane->state);
6315         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6316         if (!dm_plane_state)
6317                 return NULL;
6318
6319         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6320
6321         if (old_dm_plane_state->dc_state) {
6322                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6323                 dc_plane_state_retain(dm_plane_state->dc_state);
6324         }
6325
6326         return &dm_plane_state->base;
6327 }
6328
6329 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6330                                 struct drm_plane_state *state)
6331 {
6332         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6333
6334         if (dm_plane_state->dc_state)
6335                 dc_plane_state_release(dm_plane_state->dc_state);
6336
6337         drm_atomic_helper_plane_destroy_state(plane, state);
6338 }
6339
6340 static const struct drm_plane_funcs dm_plane_funcs = {
6341         .update_plane   = drm_atomic_helper_update_plane,
6342         .disable_plane  = drm_atomic_helper_disable_plane,
6343         .destroy        = drm_primary_helper_destroy,
6344         .reset = dm_drm_plane_reset,
6345         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6346         .atomic_destroy_state = dm_drm_plane_destroy_state,
6347         .format_mod_supported = dm_plane_format_mod_supported,
6348 };
6349
6350 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6351                                       struct drm_plane_state *new_state)
6352 {
6353         struct amdgpu_framebuffer *afb;
6354         struct drm_gem_object *obj;
6355         struct amdgpu_device *adev;
6356         struct amdgpu_bo *rbo;
6357         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6358         struct list_head list;
6359         struct ttm_validate_buffer tv;
6360         struct ww_acquire_ctx ticket;
6361         uint32_t domain;
6362         int r;
6363
6364         if (!new_state->fb) {
6365                 DRM_DEBUG_DRIVER("No FB bound\n");
6366                 return 0;
6367         }
6368
6369         afb = to_amdgpu_framebuffer(new_state->fb);
6370         obj = new_state->fb->obj[0];
6371         rbo = gem_to_amdgpu_bo(obj);
6372         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6373         INIT_LIST_HEAD(&list);
6374
6375         tv.bo = &rbo->tbo;
6376         tv.num_shared = 1;
6377         list_add(&tv.head, &list);
6378
6379         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6380         if (r) {
6381                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6382                 return r;
6383         }
6384
6385         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6386                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6387         else
6388                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6389
6390         r = amdgpu_bo_pin(rbo, domain);
6391         if (unlikely(r != 0)) {
6392                 if (r != -ERESTARTSYS)
6393                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6394                 ttm_eu_backoff_reservation(&ticket, &list);
6395                 return r;
6396         }
6397
6398         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6399         if (unlikely(r != 0)) {
6400                 amdgpu_bo_unpin(rbo);
6401                 ttm_eu_backoff_reservation(&ticket, &list);
6402                 DRM_ERROR("%p bind failed\n", rbo);
6403                 return r;
6404         }
6405
6406         ttm_eu_backoff_reservation(&ticket, &list);
6407
6408         afb->address = amdgpu_bo_gpu_offset(rbo);
6409
6410         amdgpu_bo_ref(rbo);
6411
6412         /**
6413          * We don't do surface updates on planes that have been newly created,
6414          * but we also don't have the afb->address during atomic check.
6415          *
6416          * Fill in buffer attributes depending on the address here, but only on
6417          * newly created planes since they're not being used by DC yet and this
6418          * won't modify global state.
6419          */
6420         dm_plane_state_old = to_dm_plane_state(plane->state);
6421         dm_plane_state_new = to_dm_plane_state(new_state);
6422
6423         if (dm_plane_state_new->dc_state &&
6424             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6425                 struct dc_plane_state *plane_state =
6426                         dm_plane_state_new->dc_state;
6427                 bool force_disable_dcc = !plane_state->dcc.enable;
6428
6429                 fill_plane_buffer_attributes(
6430                         adev, afb, plane_state->format, plane_state->rotation,
6431                         afb->tiling_flags,
6432                         &plane_state->tiling_info, &plane_state->plane_size,
6433                         &plane_state->dcc, &plane_state->address,
6434                         afb->tmz_surface, force_disable_dcc);
6435         }
6436
6437         return 0;
6438 }
6439
6440 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6441                                        struct drm_plane_state *old_state)
6442 {
6443         struct amdgpu_bo *rbo;
6444         int r;
6445
6446         if (!old_state->fb)
6447                 return;
6448
6449         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6450         r = amdgpu_bo_reserve(rbo, false);
6451         if (unlikely(r)) {
6452                 DRM_ERROR("failed to reserve rbo before unpin\n");
6453                 return;
6454         }
6455
6456         amdgpu_bo_unpin(rbo);
6457         amdgpu_bo_unreserve(rbo);
6458         amdgpu_bo_unref(&rbo);
6459 }
6460
6461 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6462                                        struct drm_crtc_state *new_crtc_state)
6463 {
6464         struct drm_framebuffer *fb = state->fb;
6465         int min_downscale, max_upscale;
6466         int min_scale = 0;
6467         int max_scale = INT_MAX;
6468
6469         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6470         if (fb && state->crtc) {
6471                 /* Validate viewport to cover the case when only the position changes */
6472                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6473                         int viewport_width = state->crtc_w;
6474                         int viewport_height = state->crtc_h;
6475
6476                         if (state->crtc_x < 0)
6477                                 viewport_width += state->crtc_x;
6478                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6479                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6480
6481                         if (state->crtc_y < 0)
6482                                 viewport_height += state->crtc_y;
6483                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6484                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6485
6486                         /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6487                          * which is still OK to satisfy the condition below, thereby also covering these cases
6488                          * (when plane is completely outside of screen).
6489                          * x2 for width is because of pipe-split.
6490                          */
6491                         if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6492                                 return -EINVAL;
6493                 }
6494
6495                 /* Get min/max allowed scaling factors from plane caps. */
6496                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6497                                              &min_downscale, &max_upscale);
6498                 /*
6499                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6500                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6501                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6502                  */
6503                 min_scale = (1000 << 16) / max_upscale;
6504                 max_scale = (1000 << 16) / min_downscale;
6505         }
6506
6507         return drm_atomic_helper_check_plane_state(
6508                 state, new_crtc_state, min_scale, max_scale, true, true);
6509 }
6510
6511 static int dm_plane_atomic_check(struct drm_plane *plane,
6512                                  struct drm_plane_state *state)
6513 {
6514         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6515         struct dc *dc = adev->dm.dc;
6516         struct dm_plane_state *dm_plane_state;
6517         struct dc_scaling_info scaling_info;
6518         struct drm_crtc_state *new_crtc_state;
6519         int ret;
6520
6521         trace_amdgpu_dm_plane_atomic_check(state);
6522
6523         dm_plane_state = to_dm_plane_state(state);
6524
6525         if (!dm_plane_state->dc_state)
6526                 return 0;
6527
6528         new_crtc_state =
6529                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6530         if (!new_crtc_state)
6531                 return -EINVAL;
6532
6533         ret = dm_plane_helper_check_state(state, new_crtc_state);
6534         if (ret)
6535                 return ret;
6536
6537         ret = fill_dc_scaling_info(state, &scaling_info);
6538         if (ret)
6539                 return ret;
6540
6541         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6542                 return 0;
6543
6544         return -EINVAL;
6545 }
6546
6547 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6548                                        struct drm_plane_state *new_plane_state)
6549 {
6550         /* Only support async updates on cursor planes. */
6551         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6552                 return -EINVAL;
6553
6554         return 0;
6555 }
6556
6557 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6558                                          struct drm_plane_state *new_state)
6559 {
6560         struct drm_plane_state *old_state =
6561                 drm_atomic_get_old_plane_state(new_state->state, plane);
6562
6563         trace_amdgpu_dm_atomic_update_cursor(new_state);
6564
6565         swap(plane->state->fb, new_state->fb);
6566
6567         plane->state->src_x = new_state->src_x;
6568         plane->state->src_y = new_state->src_y;
6569         plane->state->src_w = new_state->src_w;
6570         plane->state->src_h = new_state->src_h;
6571         plane->state->crtc_x = new_state->crtc_x;
6572         plane->state->crtc_y = new_state->crtc_y;
6573         plane->state->crtc_w = new_state->crtc_w;
6574         plane->state->crtc_h = new_state->crtc_h;
6575
6576         handle_cursor_update(plane, old_state);
6577 }
6578
6579 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6580         .prepare_fb = dm_plane_helper_prepare_fb,
6581         .cleanup_fb = dm_plane_helper_cleanup_fb,
6582         .atomic_check = dm_plane_atomic_check,
6583         .atomic_async_check = dm_plane_atomic_async_check,
6584         .atomic_async_update = dm_plane_atomic_async_update
6585 };
6586
6587 /*
6588  * TODO: these are currently initialized to rgb formats only.
6589  * For future use cases we should either initialize them dynamically based on
6590  * plane capabilities, or initialize this array to all formats, so internal drm
6591  * check will succeed, and let DC implement proper check
6592  */
6593 static const uint32_t rgb_formats[] = {
6594         DRM_FORMAT_XRGB8888,
6595         DRM_FORMAT_ARGB8888,
6596         DRM_FORMAT_RGBA8888,
6597         DRM_FORMAT_XRGB2101010,
6598         DRM_FORMAT_XBGR2101010,
6599         DRM_FORMAT_ARGB2101010,
6600         DRM_FORMAT_ABGR2101010,
6601         DRM_FORMAT_XBGR8888,
6602         DRM_FORMAT_ABGR8888,
6603         DRM_FORMAT_RGB565,
6604 };
6605
6606 static const uint32_t overlay_formats[] = {
6607         DRM_FORMAT_XRGB8888,
6608         DRM_FORMAT_ARGB8888,
6609         DRM_FORMAT_RGBA8888,
6610         DRM_FORMAT_XBGR8888,
6611         DRM_FORMAT_ABGR8888,
6612         DRM_FORMAT_RGB565
6613 };
6614
6615 static const u32 cursor_formats[] = {
6616         DRM_FORMAT_ARGB8888
6617 };
6618
6619 static int get_plane_formats(const struct drm_plane *plane,
6620                              const struct dc_plane_cap *plane_cap,
6621                              uint32_t *formats, int max_formats)
6622 {
6623         int i, num_formats = 0;
6624
6625         /*
6626          * TODO: Query support for each group of formats directly from
6627          * DC plane caps. This will require adding more formats to the
6628          * caps list.
6629          */
6630
6631         switch (plane->type) {
6632         case DRM_PLANE_TYPE_PRIMARY:
6633                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6634                         if (num_formats >= max_formats)
6635                                 break;
6636
6637                         formats[num_formats++] = rgb_formats[i];
6638                 }
6639
6640                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6641                         formats[num_formats++] = DRM_FORMAT_NV12;
6642                 if (plane_cap && plane_cap->pixel_format_support.p010)
6643                         formats[num_formats++] = DRM_FORMAT_P010;
6644                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6645                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6646                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6647                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6648                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6649                 }
6650                 break;
6651
6652         case DRM_PLANE_TYPE_OVERLAY:
6653                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6654                         if (num_formats >= max_formats)
6655                                 break;
6656
6657                         formats[num_formats++] = overlay_formats[i];
6658                 }
6659                 break;
6660
6661         case DRM_PLANE_TYPE_CURSOR:
6662                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6663                         if (num_formats >= max_formats)
6664                                 break;
6665
6666                         formats[num_formats++] = cursor_formats[i];
6667                 }
6668                 break;
6669         }
6670
6671         return num_formats;
6672 }
6673
6674 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6675                                 struct drm_plane *plane,
6676                                 unsigned long possible_crtcs,
6677                                 const struct dc_plane_cap *plane_cap)
6678 {
6679         uint32_t formats[32];
6680         int num_formats;
6681         int res = -EPERM;
6682         unsigned int supported_rotations;
6683         uint64_t *modifiers = NULL;
6684
6685         num_formats = get_plane_formats(plane, plane_cap, formats,
6686                                         ARRAY_SIZE(formats));
6687
6688         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6689         if (res)
6690                 return res;
6691
6692         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6693                                        &dm_plane_funcs, formats, num_formats,
6694                                        modifiers, plane->type, NULL);
6695         kfree(modifiers);
6696         if (res)
6697                 return res;
6698
6699         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6700             plane_cap && plane_cap->per_pixel_alpha) {
6701                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6702                                           BIT(DRM_MODE_BLEND_PREMULTI);
6703
6704                 drm_plane_create_alpha_property(plane);
6705                 drm_plane_create_blend_mode_property(plane, blend_caps);
6706         }
6707
6708         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6709             plane_cap &&
6710             (plane_cap->pixel_format_support.nv12 ||
6711              plane_cap->pixel_format_support.p010)) {
6712                 /* This only affects YUV formats. */
6713                 drm_plane_create_color_properties(
6714                         plane,
6715                         BIT(DRM_COLOR_YCBCR_BT601) |
6716                         BIT(DRM_COLOR_YCBCR_BT709) |
6717                         BIT(DRM_COLOR_YCBCR_BT2020),
6718                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6719                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6720                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6721         }
6722
6723         supported_rotations =
6724                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6725                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6726
6727         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6728             plane->type != DRM_PLANE_TYPE_CURSOR)
6729                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6730                                                    supported_rotations);
6731
6732         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6733
6734         /* Create (reset) the plane state */
6735         if (plane->funcs->reset)
6736                 plane->funcs->reset(plane);
6737
6738         return 0;
6739 }
6740
6741 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6742                                struct drm_plane *plane,
6743                                uint32_t crtc_index)
6744 {
6745         struct amdgpu_crtc *acrtc = NULL;
6746         struct drm_plane *cursor_plane;
6747
6748         int res = -ENOMEM;
6749
6750         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6751         if (!cursor_plane)
6752                 goto fail;
6753
6754         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6755         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6756
6757         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6758         if (!acrtc)
6759                 goto fail;
6760
6761         res = drm_crtc_init_with_planes(
6762                         dm->ddev,
6763                         &acrtc->base,
6764                         plane,
6765                         cursor_plane,
6766                         &amdgpu_dm_crtc_funcs, NULL);
6767
6768         if (res)
6769                 goto fail;
6770
6771         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6772
6773         /* Create (reset) the plane state */
6774         if (acrtc->base.funcs->reset)
6775                 acrtc->base.funcs->reset(&acrtc->base);
6776
6777         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6778         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6779
6780         acrtc->crtc_id = crtc_index;
6781         acrtc->base.enabled = false;
6782         acrtc->otg_inst = -1;
6783
6784         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6785         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6786                                    true, MAX_COLOR_LUT_ENTRIES);
6787         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6788
6789         return 0;
6790
6791 fail:
6792         kfree(acrtc);
6793         kfree(cursor_plane);
6794         return res;
6795 }
6796
6797
6798 static int to_drm_connector_type(enum signal_type st)
6799 {
6800         switch (st) {
6801         case SIGNAL_TYPE_HDMI_TYPE_A:
6802                 return DRM_MODE_CONNECTOR_HDMIA;
6803         case SIGNAL_TYPE_EDP:
6804                 return DRM_MODE_CONNECTOR_eDP;
6805         case SIGNAL_TYPE_LVDS:
6806                 return DRM_MODE_CONNECTOR_LVDS;
6807         case SIGNAL_TYPE_RGB:
6808                 return DRM_MODE_CONNECTOR_VGA;
6809         case SIGNAL_TYPE_DISPLAY_PORT:
6810         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6811                 return DRM_MODE_CONNECTOR_DisplayPort;
6812         case SIGNAL_TYPE_DVI_DUAL_LINK:
6813         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6814                 return DRM_MODE_CONNECTOR_DVID;
6815         case SIGNAL_TYPE_VIRTUAL:
6816                 return DRM_MODE_CONNECTOR_VIRTUAL;
6817
6818         default:
6819                 return DRM_MODE_CONNECTOR_Unknown;
6820         }
6821 }
6822
6823 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6824 {
6825         struct drm_encoder *encoder;
6826
6827         /* There is only one encoder per connector */
6828         drm_connector_for_each_possible_encoder(connector, encoder)
6829                 return encoder;
6830
6831         return NULL;
6832 }
6833
6834 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6835 {
6836         struct drm_encoder *encoder;
6837         struct amdgpu_encoder *amdgpu_encoder;
6838
6839         encoder = amdgpu_dm_connector_to_encoder(connector);
6840
6841         if (encoder == NULL)
6842                 return;
6843
6844         amdgpu_encoder = to_amdgpu_encoder(encoder);
6845
6846         amdgpu_encoder->native_mode.clock = 0;
6847
6848         if (!list_empty(&connector->probed_modes)) {
6849                 struct drm_display_mode *preferred_mode = NULL;
6850
6851                 list_for_each_entry(preferred_mode,
6852                                     &connector->probed_modes,
6853                                     head) {
6854                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6855                                 amdgpu_encoder->native_mode = *preferred_mode;
6856
6857                         break;
6858                 }
6859
6860         }
6861 }
6862
6863 static struct drm_display_mode *
6864 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6865                              char *name,
6866                              int hdisplay, int vdisplay)
6867 {
6868         struct drm_device *dev = encoder->dev;
6869         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6870         struct drm_display_mode *mode = NULL;
6871         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6872
6873         mode = drm_mode_duplicate(dev, native_mode);
6874
6875         if (mode == NULL)
6876                 return NULL;
6877
6878         mode->hdisplay = hdisplay;
6879         mode->vdisplay = vdisplay;
6880         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6881         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6882
6883         return mode;
6884
6885 }
6886
6887 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6888                                                  struct drm_connector *connector)
6889 {
6890         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6891         struct drm_display_mode *mode = NULL;
6892         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6893         struct amdgpu_dm_connector *amdgpu_dm_connector =
6894                                 to_amdgpu_dm_connector(connector);
6895         int i;
6896         int n;
6897         struct mode_size {
6898                 char name[DRM_DISPLAY_MODE_LEN];
6899                 int w;
6900                 int h;
6901         } common_modes[] = {
6902                 {  "640x480",  640,  480},
6903                 {  "800x600",  800,  600},
6904                 { "1024x768", 1024,  768},
6905                 { "1280x720", 1280,  720},
6906                 { "1280x800", 1280,  800},
6907                 {"1280x1024", 1280, 1024},
6908                 { "1440x900", 1440,  900},
6909                 {"1680x1050", 1680, 1050},
6910                 {"1600x1200", 1600, 1200},
6911                 {"1920x1080", 1920, 1080},
6912                 {"1920x1200", 1920, 1200}
6913         };
6914
6915         n = ARRAY_SIZE(common_modes);
6916
6917         for (i = 0; i < n; i++) {
6918                 struct drm_display_mode *curmode = NULL;
6919                 bool mode_existed = false;
6920
6921                 if (common_modes[i].w > native_mode->hdisplay ||
6922                     common_modes[i].h > native_mode->vdisplay ||
6923                    (common_modes[i].w == native_mode->hdisplay &&
6924                     common_modes[i].h == native_mode->vdisplay))
6925                         continue;
6926
6927                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6928                         if (common_modes[i].w == curmode->hdisplay &&
6929                             common_modes[i].h == curmode->vdisplay) {
6930                                 mode_existed = true;
6931                                 break;
6932                         }
6933                 }
6934
6935                 if (mode_existed)
6936                         continue;
6937
6938                 mode = amdgpu_dm_create_common_mode(encoder,
6939                                 common_modes[i].name, common_modes[i].w,
6940                                 common_modes[i].h);
6941                 drm_mode_probed_add(connector, mode);
6942                 amdgpu_dm_connector->num_modes++;
6943         }
6944 }
6945
6946 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6947                                               struct edid *edid)
6948 {
6949         struct amdgpu_dm_connector *amdgpu_dm_connector =
6950                         to_amdgpu_dm_connector(connector);
6951
6952         if (edid) {
6953                 /* empty probed_modes */
6954                 INIT_LIST_HEAD(&connector->probed_modes);
6955                 amdgpu_dm_connector->num_modes =
6956                                 drm_add_edid_modes(connector, edid);
6957
6958                 /* sorting the probed modes before calling function
6959                  * amdgpu_dm_get_native_mode() since EDID can have
6960                  * more than one preferred mode. The modes that are
6961                  * later in the probed mode list could be of higher
6962                  * and preferred resolution. For example, 3840x2160
6963                  * resolution in base EDID preferred timing and 4096x2160
6964                  * preferred resolution in DID extension block later.
6965                  */
6966                 drm_mode_sort(&connector->probed_modes);
6967                 amdgpu_dm_get_native_mode(connector);
6968         } else {
6969                 amdgpu_dm_connector->num_modes = 0;
6970         }
6971 }
6972
6973 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6974 {
6975         struct amdgpu_dm_connector *amdgpu_dm_connector =
6976                         to_amdgpu_dm_connector(connector);
6977         struct drm_encoder *encoder;
6978         struct edid *edid = amdgpu_dm_connector->edid;
6979
6980         encoder = amdgpu_dm_connector_to_encoder(connector);
6981
6982         if (!drm_edid_is_valid(edid)) {
6983                 amdgpu_dm_connector->num_modes =
6984                                 drm_add_modes_noedid(connector, 640, 480);
6985         } else {
6986                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6987                 amdgpu_dm_connector_add_common_modes(encoder, connector);
6988         }
6989         amdgpu_dm_fbc_init(connector);
6990
6991         return amdgpu_dm_connector->num_modes;
6992 }
6993
6994 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6995                                      struct amdgpu_dm_connector *aconnector,
6996                                      int connector_type,
6997                                      struct dc_link *link,
6998                                      int link_index)
6999 {
7000         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7001
7002         /*
7003          * Some of the properties below require access to state, like bpc.
7004          * Allocate some default initial connector state with our reset helper.
7005          */
7006         if (aconnector->base.funcs->reset)
7007                 aconnector->base.funcs->reset(&aconnector->base);
7008
7009         aconnector->connector_id = link_index;
7010         aconnector->dc_link = link;
7011         aconnector->base.interlace_allowed = false;
7012         aconnector->base.doublescan_allowed = false;
7013         aconnector->base.stereo_allowed = false;
7014         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7015         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7016         aconnector->audio_inst = -1;
7017         mutex_init(&aconnector->hpd_lock);
7018
7019         /*
7020          * configure support HPD hot plug connector_>polled default value is 0
7021          * which means HPD hot plug not supported
7022          */
7023         switch (connector_type) {
7024         case DRM_MODE_CONNECTOR_HDMIA:
7025                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7026                 aconnector->base.ycbcr_420_allowed =
7027                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7028                 break;
7029         case DRM_MODE_CONNECTOR_DisplayPort:
7030                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7031                 aconnector->base.ycbcr_420_allowed =
7032                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7033                 break;
7034         case DRM_MODE_CONNECTOR_DVID:
7035                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7036                 break;
7037         default:
7038                 break;
7039         }
7040
7041         drm_object_attach_property(&aconnector->base.base,
7042                                 dm->ddev->mode_config.scaling_mode_property,
7043                                 DRM_MODE_SCALE_NONE);
7044
7045         drm_object_attach_property(&aconnector->base.base,
7046                                 adev->mode_info.underscan_property,
7047                                 UNDERSCAN_OFF);
7048         drm_object_attach_property(&aconnector->base.base,
7049                                 adev->mode_info.underscan_hborder_property,
7050                                 0);
7051         drm_object_attach_property(&aconnector->base.base,
7052                                 adev->mode_info.underscan_vborder_property,
7053                                 0);
7054
7055         if (!aconnector->mst_port)
7056                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7057
7058         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7059         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7060         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7061
7062         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7063             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7064                 drm_object_attach_property(&aconnector->base.base,
7065                                 adev->mode_info.abm_level_property, 0);
7066         }
7067
7068         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7069             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7070             connector_type == DRM_MODE_CONNECTOR_eDP) {
7071                 drm_object_attach_property(
7072                         &aconnector->base.base,
7073                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7074
7075                 if (!aconnector->mst_port)
7076                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7077
7078 #ifdef CONFIG_DRM_AMD_DC_HDCP
7079                 if (adev->dm.hdcp_workqueue)
7080                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7081 #endif
7082         }
7083 }
7084
7085 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7086                               struct i2c_msg *msgs, int num)
7087 {
7088         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7089         struct ddc_service *ddc_service = i2c->ddc_service;
7090         struct i2c_command cmd;
7091         int i;
7092         int result = -EIO;
7093
7094         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7095
7096         if (!cmd.payloads)
7097                 return result;
7098
7099         cmd.number_of_payloads = num;
7100         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7101         cmd.speed = 100;
7102
7103         for (i = 0; i < num; i++) {
7104                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7105                 cmd.payloads[i].address = msgs[i].addr;
7106                 cmd.payloads[i].length = msgs[i].len;
7107                 cmd.payloads[i].data = msgs[i].buf;
7108         }
7109
7110         if (dc_submit_i2c(
7111                         ddc_service->ctx->dc,
7112                         ddc_service->ddc_pin->hw_info.ddc_channel,
7113                         &cmd))
7114                 result = num;
7115
7116         kfree(cmd.payloads);
7117         return result;
7118 }
7119
7120 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7121 {
7122         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7123 }
7124
7125 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7126         .master_xfer = amdgpu_dm_i2c_xfer,
7127         .functionality = amdgpu_dm_i2c_func,
7128 };
7129
7130 static struct amdgpu_i2c_adapter *
7131 create_i2c(struct ddc_service *ddc_service,
7132            int link_index,
7133            int *res)
7134 {
7135         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7136         struct amdgpu_i2c_adapter *i2c;
7137
7138         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7139         if (!i2c)
7140                 return NULL;
7141         i2c->base.owner = THIS_MODULE;
7142         i2c->base.class = I2C_CLASS_DDC;
7143         i2c->base.dev.parent = &adev->pdev->dev;
7144         i2c->base.algo = &amdgpu_dm_i2c_algo;
7145         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7146         i2c_set_adapdata(&i2c->base, i2c);
7147         i2c->ddc_service = ddc_service;
7148         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7149
7150         return i2c;
7151 }
7152
7153
7154 /*
7155  * Note: this function assumes that dc_link_detect() was called for the
7156  * dc_link which will be represented by this aconnector.
7157  */
7158 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7159                                     struct amdgpu_dm_connector *aconnector,
7160                                     uint32_t link_index,
7161                                     struct amdgpu_encoder *aencoder)
7162 {
7163         int res = 0;
7164         int connector_type;
7165         struct dc *dc = dm->dc;
7166         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7167         struct amdgpu_i2c_adapter *i2c;
7168
7169         link->priv = aconnector;
7170
7171         DRM_DEBUG_DRIVER("%s()\n", __func__);
7172
7173         i2c = create_i2c(link->ddc, link->link_index, &res);
7174         if (!i2c) {
7175                 DRM_ERROR("Failed to create i2c adapter data\n");
7176                 return -ENOMEM;
7177         }
7178
7179         aconnector->i2c = i2c;
7180         res = i2c_add_adapter(&i2c->base);
7181
7182         if (res) {
7183                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7184                 goto out_free;
7185         }
7186
7187         connector_type = to_drm_connector_type(link->connector_signal);
7188
7189         res = drm_connector_init_with_ddc(
7190                         dm->ddev,
7191                         &aconnector->base,
7192                         &amdgpu_dm_connector_funcs,
7193                         connector_type,
7194                         &i2c->base);
7195
7196         if (res) {
7197                 DRM_ERROR("connector_init failed\n");
7198                 aconnector->connector_id = -1;
7199                 goto out_free;
7200         }
7201
7202         drm_connector_helper_add(
7203                         &aconnector->base,
7204                         &amdgpu_dm_connector_helper_funcs);
7205
7206         amdgpu_dm_connector_init_helper(
7207                 dm,
7208                 aconnector,
7209                 connector_type,
7210                 link,
7211                 link_index);
7212
7213         drm_connector_attach_encoder(
7214                 &aconnector->base, &aencoder->base);
7215
7216         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7217                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7218                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7219
7220 out_free:
7221         if (res) {
7222                 kfree(i2c);
7223                 aconnector->i2c = NULL;
7224         }
7225         return res;
7226 }
7227
7228 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7229 {
7230         switch (adev->mode_info.num_crtc) {
7231         case 1:
7232                 return 0x1;
7233         case 2:
7234                 return 0x3;
7235         case 3:
7236                 return 0x7;
7237         case 4:
7238                 return 0xf;
7239         case 5:
7240                 return 0x1f;
7241         case 6:
7242         default:
7243                 return 0x3f;
7244         }
7245 }
7246
7247 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7248                                   struct amdgpu_encoder *aencoder,
7249                                   uint32_t link_index)
7250 {
7251         struct amdgpu_device *adev = drm_to_adev(dev);
7252
7253         int res = drm_encoder_init(dev,
7254                                    &aencoder->base,
7255                                    &amdgpu_dm_encoder_funcs,
7256                                    DRM_MODE_ENCODER_TMDS,
7257                                    NULL);
7258
7259         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7260
7261         if (!res)
7262                 aencoder->encoder_id = link_index;
7263         else
7264                 aencoder->encoder_id = -1;
7265
7266         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7267
7268         return res;
7269 }
7270
7271 static void manage_dm_interrupts(struct amdgpu_device *adev,
7272                                  struct amdgpu_crtc *acrtc,
7273                                  bool enable)
7274 {
7275         /*
7276          * We have no guarantee that the frontend index maps to the same
7277          * backend index - some even map to more than one.
7278          *
7279          * TODO: Use a different interrupt or check DC itself for the mapping.
7280          */
7281         int irq_type =
7282                 amdgpu_display_crtc_idx_to_irq_type(
7283                         adev,
7284                         acrtc->crtc_id);
7285
7286         if (enable) {
7287                 drm_crtc_vblank_on(&acrtc->base);
7288                 amdgpu_irq_get(
7289                         adev,
7290                         &adev->pageflip_irq,
7291                         irq_type);
7292         } else {
7293
7294                 amdgpu_irq_put(
7295                         adev,
7296                         &adev->pageflip_irq,
7297                         irq_type);
7298                 drm_crtc_vblank_off(&acrtc->base);
7299         }
7300 }
7301
7302 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7303                                       struct amdgpu_crtc *acrtc)
7304 {
7305         int irq_type =
7306                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7307
7308         /**
7309          * This reads the current state for the IRQ and force reapplies
7310          * the setting to hardware.
7311          */
7312         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7313 }
7314
7315 static bool
7316 is_scaling_state_different(const struct dm_connector_state *dm_state,
7317                            const struct dm_connector_state *old_dm_state)
7318 {
7319         if (dm_state->scaling != old_dm_state->scaling)
7320                 return true;
7321         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7322                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7323                         return true;
7324         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7325                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7326                         return true;
7327         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7328                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7329                 return true;
7330         return false;
7331 }
7332
7333 #ifdef CONFIG_DRM_AMD_DC_HDCP
7334 static bool is_content_protection_different(struct drm_connector_state *state,
7335                                             const struct drm_connector_state *old_state,
7336                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7337 {
7338         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7339         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7340
7341         /* Handle: Type0/1 change */
7342         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7343             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7344                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7345                 return true;
7346         }
7347
7348         /* CP is being re enabled, ignore this
7349          *
7350          * Handles:     ENABLED -> DESIRED
7351          */
7352         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7353             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7354                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7355                 return false;
7356         }
7357
7358         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7359          *
7360          * Handles:     UNDESIRED -> ENABLED
7361          */
7362         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7363             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7364                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7365
7366         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7367          * hot-plug, headless s3, dpms
7368          *
7369          * Handles:     DESIRED -> DESIRED (Special case)
7370          */
7371         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7372             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7373                 dm_con_state->update_hdcp = false;
7374                 return true;
7375         }
7376
7377         /*
7378          * Handles:     UNDESIRED -> UNDESIRED
7379          *              DESIRED -> DESIRED
7380          *              ENABLED -> ENABLED
7381          */
7382         if (old_state->content_protection == state->content_protection)
7383                 return false;
7384
7385         /*
7386          * Handles:     UNDESIRED -> DESIRED
7387          *              DESIRED -> UNDESIRED
7388          *              ENABLED -> UNDESIRED
7389          */
7390         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7391                 return true;
7392
7393         /*
7394          * Handles:     DESIRED -> ENABLED
7395          */
7396         return false;
7397 }
7398
7399 #endif
7400 static void remove_stream(struct amdgpu_device *adev,
7401                           struct amdgpu_crtc *acrtc,
7402                           struct dc_stream_state *stream)
7403 {
7404         /* this is the update mode case */
7405
7406         acrtc->otg_inst = -1;
7407         acrtc->enabled = false;
7408 }
7409
7410 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7411                                struct dc_cursor_position *position)
7412 {
7413         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7414         int x, y;
7415         int xorigin = 0, yorigin = 0;
7416
7417         position->enable = false;
7418         position->x = 0;
7419         position->y = 0;
7420
7421         if (!crtc || !plane->state->fb)
7422                 return 0;
7423
7424         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7425             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7426                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7427                           __func__,
7428                           plane->state->crtc_w,
7429                           plane->state->crtc_h);
7430                 return -EINVAL;
7431         }
7432
7433         x = plane->state->crtc_x;
7434         y = plane->state->crtc_y;
7435
7436         if (x <= -amdgpu_crtc->max_cursor_width ||
7437             y <= -amdgpu_crtc->max_cursor_height)
7438                 return 0;
7439
7440         if (x < 0) {
7441                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7442                 x = 0;
7443         }
7444         if (y < 0) {
7445                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7446                 y = 0;
7447         }
7448         position->enable = true;
7449         position->translate_by_source = true;
7450         position->x = x;
7451         position->y = y;
7452         position->x_hotspot = xorigin;
7453         position->y_hotspot = yorigin;
7454
7455         return 0;
7456 }
7457
7458 static void handle_cursor_update(struct drm_plane *plane,
7459                                  struct drm_plane_state *old_plane_state)
7460 {
7461         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7462         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7463         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7464         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7465         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7466         uint64_t address = afb ? afb->address : 0;
7467         struct dc_cursor_position position;
7468         struct dc_cursor_attributes attributes;
7469         int ret;
7470
7471         if (!plane->state->fb && !old_plane_state->fb)
7472                 return;
7473
7474         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7475                          __func__,
7476                          amdgpu_crtc->crtc_id,
7477                          plane->state->crtc_w,
7478                          plane->state->crtc_h);
7479
7480         ret = get_cursor_position(plane, crtc, &position);
7481         if (ret)
7482                 return;
7483
7484         if (!position.enable) {
7485                 /* turn off cursor */
7486                 if (crtc_state && crtc_state->stream) {
7487                         mutex_lock(&adev->dm.dc_lock);
7488                         dc_stream_set_cursor_position(crtc_state->stream,
7489                                                       &position);
7490                         mutex_unlock(&adev->dm.dc_lock);
7491                 }
7492                 return;
7493         }
7494
7495         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7496         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7497
7498         memset(&attributes, 0, sizeof(attributes));
7499         attributes.address.high_part = upper_32_bits(address);
7500         attributes.address.low_part  = lower_32_bits(address);
7501         attributes.width             = plane->state->crtc_w;
7502         attributes.height            = plane->state->crtc_h;
7503         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7504         attributes.rotation_angle    = 0;
7505         attributes.attribute_flags.value = 0;
7506
7507         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7508
7509         if (crtc_state->stream) {
7510                 mutex_lock(&adev->dm.dc_lock);
7511                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7512                                                          &attributes))
7513                         DRM_ERROR("DC failed to set cursor attributes\n");
7514
7515                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7516                                                    &position))
7517                         DRM_ERROR("DC failed to set cursor position\n");
7518                 mutex_unlock(&adev->dm.dc_lock);
7519         }
7520 }
7521
7522 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7523 {
7524
7525         assert_spin_locked(&acrtc->base.dev->event_lock);
7526         WARN_ON(acrtc->event);
7527
7528         acrtc->event = acrtc->base.state->event;
7529
7530         /* Set the flip status */
7531         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7532
7533         /* Mark this event as consumed */
7534         acrtc->base.state->event = NULL;
7535
7536         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7537                                                  acrtc->crtc_id);
7538 }
7539
7540 static void update_freesync_state_on_stream(
7541         struct amdgpu_display_manager *dm,
7542         struct dm_crtc_state *new_crtc_state,
7543         struct dc_stream_state *new_stream,
7544         struct dc_plane_state *surface,
7545         u32 flip_timestamp_in_us)
7546 {
7547         struct mod_vrr_params vrr_params;
7548         struct dc_info_packet vrr_infopacket = {0};
7549         struct amdgpu_device *adev = dm->adev;
7550         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7551         unsigned long flags;
7552
7553         if (!new_stream)
7554                 return;
7555
7556         /*
7557          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7558          * For now it's sufficient to just guard against these conditions.
7559          */
7560
7561         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7562                 return;
7563
7564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7565         vrr_params = acrtc->dm_irq_params.vrr_params;
7566
7567         if (surface) {
7568                 mod_freesync_handle_preflip(
7569                         dm->freesync_module,
7570                         surface,
7571                         new_stream,
7572                         flip_timestamp_in_us,
7573                         &vrr_params);
7574
7575                 if (adev->family < AMDGPU_FAMILY_AI &&
7576                     amdgpu_dm_vrr_active(new_crtc_state)) {
7577                         mod_freesync_handle_v_update(dm->freesync_module,
7578                                                      new_stream, &vrr_params);
7579
7580                         /* Need to call this before the frame ends. */
7581                         dc_stream_adjust_vmin_vmax(dm->dc,
7582                                                    new_crtc_state->stream,
7583                                                    &vrr_params.adjust);
7584                 }
7585         }
7586
7587         mod_freesync_build_vrr_infopacket(
7588                 dm->freesync_module,
7589                 new_stream,
7590                 &vrr_params,
7591                 PACKET_TYPE_VRR,
7592                 TRANSFER_FUNC_UNKNOWN,
7593                 &vrr_infopacket);
7594
7595         new_crtc_state->freesync_timing_changed |=
7596                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7597                         &vrr_params.adjust,
7598                         sizeof(vrr_params.adjust)) != 0);
7599
7600         new_crtc_state->freesync_vrr_info_changed |=
7601                 (memcmp(&new_crtc_state->vrr_infopacket,
7602                         &vrr_infopacket,
7603                         sizeof(vrr_infopacket)) != 0);
7604
7605         acrtc->dm_irq_params.vrr_params = vrr_params;
7606         new_crtc_state->vrr_infopacket = vrr_infopacket;
7607
7608         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7609         new_stream->vrr_infopacket = vrr_infopacket;
7610
7611         if (new_crtc_state->freesync_vrr_info_changed)
7612                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7613                               new_crtc_state->base.crtc->base.id,
7614                               (int)new_crtc_state->base.vrr_enabled,
7615                               (int)vrr_params.state);
7616
7617         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7618 }
7619
7620 static void update_stream_irq_parameters(
7621         struct amdgpu_display_manager *dm,
7622         struct dm_crtc_state *new_crtc_state)
7623 {
7624         struct dc_stream_state *new_stream = new_crtc_state->stream;
7625         struct mod_vrr_params vrr_params;
7626         struct mod_freesync_config config = new_crtc_state->freesync_config;
7627         struct amdgpu_device *adev = dm->adev;
7628         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7629         unsigned long flags;
7630
7631         if (!new_stream)
7632                 return;
7633
7634         /*
7635          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7636          * For now it's sufficient to just guard against these conditions.
7637          */
7638         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7639                 return;
7640
7641         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7642         vrr_params = acrtc->dm_irq_params.vrr_params;
7643
7644         if (new_crtc_state->vrr_supported &&
7645             config.min_refresh_in_uhz &&
7646             config.max_refresh_in_uhz) {
7647                 config.state = new_crtc_state->base.vrr_enabled ?
7648                         VRR_STATE_ACTIVE_VARIABLE :
7649                         VRR_STATE_INACTIVE;
7650         } else {
7651                 config.state = VRR_STATE_UNSUPPORTED;
7652         }
7653
7654         mod_freesync_build_vrr_params(dm->freesync_module,
7655                                       new_stream,
7656                                       &config, &vrr_params);
7657
7658         new_crtc_state->freesync_timing_changed |=
7659                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7660                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7661
7662         new_crtc_state->freesync_config = config;
7663         /* Copy state for access from DM IRQ handler */
7664         acrtc->dm_irq_params.freesync_config = config;
7665         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7666         acrtc->dm_irq_params.vrr_params = vrr_params;
7667         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7668 }
7669
7670 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7671                                             struct dm_crtc_state *new_state)
7672 {
7673         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7674         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7675
7676         if (!old_vrr_active && new_vrr_active) {
7677                 /* Transition VRR inactive -> active:
7678                  * While VRR is active, we must not disable vblank irq, as a
7679                  * reenable after disable would compute bogus vblank/pflip
7680                  * timestamps if it likely happened inside display front-porch.
7681                  *
7682                  * We also need vupdate irq for the actual core vblank handling
7683                  * at end of vblank.
7684                  */
7685                 dm_set_vupdate_irq(new_state->base.crtc, true);
7686                 drm_crtc_vblank_get(new_state->base.crtc);
7687                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7688                                  __func__, new_state->base.crtc->base.id);
7689         } else if (old_vrr_active && !new_vrr_active) {
7690                 /* Transition VRR active -> inactive:
7691                  * Allow vblank irq disable again for fixed refresh rate.
7692                  */
7693                 dm_set_vupdate_irq(new_state->base.crtc, false);
7694                 drm_crtc_vblank_put(new_state->base.crtc);
7695                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7696                                  __func__, new_state->base.crtc->base.id);
7697         }
7698 }
7699
7700 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7701 {
7702         struct drm_plane *plane;
7703         struct drm_plane_state *old_plane_state, *new_plane_state;
7704         int i;
7705
7706         /*
7707          * TODO: Make this per-stream so we don't issue redundant updates for
7708          * commits with multiple streams.
7709          */
7710         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7711                                        new_plane_state, i)
7712                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7713                         handle_cursor_update(plane, old_plane_state);
7714 }
7715
7716 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7717                                     struct dc_state *dc_state,
7718                                     struct drm_device *dev,
7719                                     struct amdgpu_display_manager *dm,
7720                                     struct drm_crtc *pcrtc,
7721                                     bool wait_for_vblank)
7722 {
7723         uint32_t i;
7724         uint64_t timestamp_ns;
7725         struct drm_plane *plane;
7726         struct drm_plane_state *old_plane_state, *new_plane_state;
7727         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7728         struct drm_crtc_state *new_pcrtc_state =
7729                         drm_atomic_get_new_crtc_state(state, pcrtc);
7730         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7731         struct dm_crtc_state *dm_old_crtc_state =
7732                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7733         int planes_count = 0, vpos, hpos;
7734         long r;
7735         unsigned long flags;
7736         struct amdgpu_bo *abo;
7737         uint32_t target_vblank, last_flip_vblank;
7738         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7739         bool pflip_present = false;
7740         struct {
7741                 struct dc_surface_update surface_updates[MAX_SURFACES];
7742                 struct dc_plane_info plane_infos[MAX_SURFACES];
7743                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7744                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7745                 struct dc_stream_update stream_update;
7746         } *bundle;
7747
7748         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7749
7750         if (!bundle) {
7751                 dm_error("Failed to allocate update bundle\n");
7752                 goto cleanup;
7753         }
7754
7755         /*
7756          * Disable the cursor first if we're disabling all the planes.
7757          * It'll remain on the screen after the planes are re-enabled
7758          * if we don't.
7759          */
7760         if (acrtc_state->active_planes == 0)
7761                 amdgpu_dm_commit_cursors(state);
7762
7763         /* update planes when needed */
7764         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7765                 struct drm_crtc *crtc = new_plane_state->crtc;
7766                 struct drm_crtc_state *new_crtc_state;
7767                 struct drm_framebuffer *fb = new_plane_state->fb;
7768                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7769                 bool plane_needs_flip;
7770                 struct dc_plane_state *dc_plane;
7771                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7772
7773                 /* Cursor plane is handled after stream updates */
7774                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7775                         continue;
7776
7777                 if (!fb || !crtc || pcrtc != crtc)
7778                         continue;
7779
7780                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7781                 if (!new_crtc_state->active)
7782                         continue;
7783
7784                 dc_plane = dm_new_plane_state->dc_state;
7785
7786                 bundle->surface_updates[planes_count].surface = dc_plane;
7787                 if (new_pcrtc_state->color_mgmt_changed) {
7788                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7789                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7790                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7791                 }
7792
7793                 fill_dc_scaling_info(new_plane_state,
7794                                      &bundle->scaling_infos[planes_count]);
7795
7796                 bundle->surface_updates[planes_count].scaling_info =
7797                         &bundle->scaling_infos[planes_count];
7798
7799                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7800
7801                 pflip_present = pflip_present || plane_needs_flip;
7802
7803                 if (!plane_needs_flip) {
7804                         planes_count += 1;
7805                         continue;
7806                 }
7807
7808                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7809
7810                 /*
7811                  * Wait for all fences on this FB. Do limited wait to avoid
7812                  * deadlock during GPU reset when this fence will not signal
7813                  * but we hold reservation lock for the BO.
7814                  */
7815                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7816                                                         false,
7817                                                         msecs_to_jiffies(5000));
7818                 if (unlikely(r <= 0))
7819                         DRM_ERROR("Waiting for fences timed out!");
7820
7821                 fill_dc_plane_info_and_addr(
7822                         dm->adev, new_plane_state,
7823                         afb->tiling_flags,
7824                         &bundle->plane_infos[planes_count],
7825                         &bundle->flip_addrs[planes_count].address,
7826                         afb->tmz_surface, false);
7827
7828                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7829                                  new_plane_state->plane->index,
7830                                  bundle->plane_infos[planes_count].dcc.enable);
7831
7832                 bundle->surface_updates[planes_count].plane_info =
7833                         &bundle->plane_infos[planes_count];
7834
7835                 /*
7836                  * Only allow immediate flips for fast updates that don't
7837                  * change FB pitch, DCC state, rotation or mirroing.
7838                  */
7839                 bundle->flip_addrs[planes_count].flip_immediate =
7840                         crtc->state->async_flip &&
7841                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7842
7843                 timestamp_ns = ktime_get_ns();
7844                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7845                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7846                 bundle->surface_updates[planes_count].surface = dc_plane;
7847
7848                 if (!bundle->surface_updates[planes_count].surface) {
7849                         DRM_ERROR("No surface for CRTC: id=%d\n",
7850                                         acrtc_attach->crtc_id);
7851                         continue;
7852                 }
7853
7854                 if (plane == pcrtc->primary)
7855                         update_freesync_state_on_stream(
7856                                 dm,
7857                                 acrtc_state,
7858                                 acrtc_state->stream,
7859                                 dc_plane,
7860                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7861
7862                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7863                                  __func__,
7864                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7865                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7866
7867                 planes_count += 1;
7868
7869         }
7870
7871         if (pflip_present) {
7872                 if (!vrr_active) {
7873                         /* Use old throttling in non-vrr fixed refresh rate mode
7874                          * to keep flip scheduling based on target vblank counts
7875                          * working in a backwards compatible way, e.g., for
7876                          * clients using the GLX_OML_sync_control extension or
7877                          * DRI3/Present extension with defined target_msc.
7878                          */
7879                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7880                 }
7881                 else {
7882                         /* For variable refresh rate mode only:
7883                          * Get vblank of last completed flip to avoid > 1 vrr
7884                          * flips per video frame by use of throttling, but allow
7885                          * flip programming anywhere in the possibly large
7886                          * variable vrr vblank interval for fine-grained flip
7887                          * timing control and more opportunity to avoid stutter
7888                          * on late submission of flips.
7889                          */
7890                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7891                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7892                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7893                 }
7894
7895                 target_vblank = last_flip_vblank + wait_for_vblank;
7896
7897                 /*
7898                  * Wait until we're out of the vertical blank period before the one
7899                  * targeted by the flip
7900                  */
7901                 while ((acrtc_attach->enabled &&
7902                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7903                                                             0, &vpos, &hpos, NULL,
7904                                                             NULL, &pcrtc->hwmode)
7905                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7906                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7907                         (int)(target_vblank -
7908                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7909                         usleep_range(1000, 1100);
7910                 }
7911
7912                 /**
7913                  * Prepare the flip event for the pageflip interrupt to handle.
7914                  *
7915                  * This only works in the case where we've already turned on the
7916                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7917                  * from 0 -> n planes we have to skip a hardware generated event
7918                  * and rely on sending it from software.
7919                  */
7920                 if (acrtc_attach->base.state->event &&
7921                     acrtc_state->active_planes > 0) {
7922                         drm_crtc_vblank_get(pcrtc);
7923
7924                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7925
7926                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7927                         prepare_flip_isr(acrtc_attach);
7928
7929                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7930                 }
7931
7932                 if (acrtc_state->stream) {
7933                         if (acrtc_state->freesync_vrr_info_changed)
7934                                 bundle->stream_update.vrr_infopacket =
7935                                         &acrtc_state->stream->vrr_infopacket;
7936                 }
7937         }
7938
7939         /* Update the planes if changed or disable if we don't have any. */
7940         if ((planes_count || acrtc_state->active_planes == 0) &&
7941                 acrtc_state->stream) {
7942                 bundle->stream_update.stream = acrtc_state->stream;
7943                 if (new_pcrtc_state->mode_changed) {
7944                         bundle->stream_update.src = acrtc_state->stream->src;
7945                         bundle->stream_update.dst = acrtc_state->stream->dst;
7946                 }
7947
7948                 if (new_pcrtc_state->color_mgmt_changed) {
7949                         /*
7950                          * TODO: This isn't fully correct since we've actually
7951                          * already modified the stream in place.
7952                          */
7953                         bundle->stream_update.gamut_remap =
7954                                 &acrtc_state->stream->gamut_remap_matrix;
7955                         bundle->stream_update.output_csc_transform =
7956                                 &acrtc_state->stream->csc_color_matrix;
7957                         bundle->stream_update.out_transfer_func =
7958                                 acrtc_state->stream->out_transfer_func;
7959                 }
7960
7961                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7962                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7963                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7964
7965                 /*
7966                  * If FreeSync state on the stream has changed then we need to
7967                  * re-adjust the min/max bounds now that DC doesn't handle this
7968                  * as part of commit.
7969                  */
7970                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7971                     amdgpu_dm_vrr_active(acrtc_state)) {
7972                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7973                         dc_stream_adjust_vmin_vmax(
7974                                 dm->dc, acrtc_state->stream,
7975                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7976                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7977                 }
7978                 mutex_lock(&dm->dc_lock);
7979                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7980                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
7981                         amdgpu_dm_psr_disable(acrtc_state->stream);
7982
7983                 dc_commit_updates_for_stream(dm->dc,
7984                                                      bundle->surface_updates,
7985                                                      planes_count,
7986                                                      acrtc_state->stream,
7987                                                      &bundle->stream_update,
7988                                                      dc_state);
7989
7990                 /**
7991                  * Enable or disable the interrupts on the backend.
7992                  *
7993                  * Most pipes are put into power gating when unused.
7994                  *
7995                  * When power gating is enabled on a pipe we lose the
7996                  * interrupt enablement state when power gating is disabled.
7997                  *
7998                  * So we need to update the IRQ control state in hardware
7999                  * whenever the pipe turns on (since it could be previously
8000                  * power gated) or off (since some pipes can't be power gated
8001                  * on some ASICs).
8002                  */
8003                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8004                         dm_update_pflip_irq_state(drm_to_adev(dev),
8005                                                   acrtc_attach);
8006
8007                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8008                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8009                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8010                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8011                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8012                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8013                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8014                         amdgpu_dm_psr_enable(acrtc_state->stream);
8015                 }
8016
8017                 mutex_unlock(&dm->dc_lock);
8018         }
8019
8020         /*
8021          * Update cursor state *after* programming all the planes.
8022          * This avoids redundant programming in the case where we're going
8023          * to be disabling a single plane - those pipes are being disabled.
8024          */
8025         if (acrtc_state->active_planes)
8026                 amdgpu_dm_commit_cursors(state);
8027
8028 cleanup:
8029         kfree(bundle);
8030 }
8031
8032 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8033                                    struct drm_atomic_state *state)
8034 {
8035         struct amdgpu_device *adev = drm_to_adev(dev);
8036         struct amdgpu_dm_connector *aconnector;
8037         struct drm_connector *connector;
8038         struct drm_connector_state *old_con_state, *new_con_state;
8039         struct drm_crtc_state *new_crtc_state;
8040         struct dm_crtc_state *new_dm_crtc_state;
8041         const struct dc_stream_status *status;
8042         int i, inst;
8043
8044         /* Notify device removals. */
8045         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8046                 if (old_con_state->crtc != new_con_state->crtc) {
8047                         /* CRTC changes require notification. */
8048                         goto notify;
8049                 }
8050
8051                 if (!new_con_state->crtc)
8052                         continue;
8053
8054                 new_crtc_state = drm_atomic_get_new_crtc_state(
8055                         state, new_con_state->crtc);
8056
8057                 if (!new_crtc_state)
8058                         continue;
8059
8060                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8061                         continue;
8062
8063         notify:
8064                 aconnector = to_amdgpu_dm_connector(connector);
8065
8066                 mutex_lock(&adev->dm.audio_lock);
8067                 inst = aconnector->audio_inst;
8068                 aconnector->audio_inst = -1;
8069                 mutex_unlock(&adev->dm.audio_lock);
8070
8071                 amdgpu_dm_audio_eld_notify(adev, inst);
8072         }
8073
8074         /* Notify audio device additions. */
8075         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8076                 if (!new_con_state->crtc)
8077                         continue;
8078
8079                 new_crtc_state = drm_atomic_get_new_crtc_state(
8080                         state, new_con_state->crtc);
8081
8082                 if (!new_crtc_state)
8083                         continue;
8084
8085                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8086                         continue;
8087
8088                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8089                 if (!new_dm_crtc_state->stream)
8090                         continue;
8091
8092                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8093                 if (!status)
8094                         continue;
8095
8096                 aconnector = to_amdgpu_dm_connector(connector);
8097
8098                 mutex_lock(&adev->dm.audio_lock);
8099                 inst = status->audio_inst;
8100                 aconnector->audio_inst = inst;
8101                 mutex_unlock(&adev->dm.audio_lock);
8102
8103                 amdgpu_dm_audio_eld_notify(adev, inst);
8104         }
8105 }
8106
8107 /*
8108  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8109  * @crtc_state: the DRM CRTC state
8110  * @stream_state: the DC stream state.
8111  *
8112  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8113  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8114  */
8115 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8116                                                 struct dc_stream_state *stream_state)
8117 {
8118         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8119 }
8120
8121 /**
8122  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8123  * @state: The atomic state to commit
8124  *
8125  * This will tell DC to commit the constructed DC state from atomic_check,
8126  * programming the hardware. Any failures here implies a hardware failure, since
8127  * atomic check should have filtered anything non-kosher.
8128  */
8129 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8130 {
8131         struct drm_device *dev = state->dev;
8132         struct amdgpu_device *adev = drm_to_adev(dev);
8133         struct amdgpu_display_manager *dm = &adev->dm;
8134         struct dm_atomic_state *dm_state;
8135         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8136         uint32_t i, j;
8137         struct drm_crtc *crtc;
8138         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8139         unsigned long flags;
8140         bool wait_for_vblank = true;
8141         struct drm_connector *connector;
8142         struct drm_connector_state *old_con_state, *new_con_state;
8143         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8144         int crtc_disable_count = 0;
8145         bool mode_set_reset_required = false;
8146
8147         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8148
8149         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8150
8151         dm_state = dm_atomic_get_new_state(state);
8152         if (dm_state && dm_state->context) {
8153                 dc_state = dm_state->context;
8154         } else {
8155                 /* No state changes, retain current state. */
8156                 dc_state_temp = dc_create_state(dm->dc);
8157                 ASSERT(dc_state_temp);
8158                 dc_state = dc_state_temp;
8159                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8160         }
8161
8162         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8163                                        new_crtc_state, i) {
8164                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8165
8166                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8167
8168                 if (old_crtc_state->active &&
8169                     (!new_crtc_state->active ||
8170                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8171                         manage_dm_interrupts(adev, acrtc, false);
8172                         dc_stream_release(dm_old_crtc_state->stream);
8173                 }
8174         }
8175
8176         drm_atomic_helper_calc_timestamping_constants(state);
8177
8178         /* update changed items */
8179         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8180                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8181
8182                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8183                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8184
8185                 DRM_DEBUG_DRIVER(
8186                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8187                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8188                         "connectors_changed:%d\n",
8189                         acrtc->crtc_id,
8190                         new_crtc_state->enable,
8191                         new_crtc_state->active,
8192                         new_crtc_state->planes_changed,
8193                         new_crtc_state->mode_changed,
8194                         new_crtc_state->active_changed,
8195                         new_crtc_state->connectors_changed);
8196
8197                 /* Disable cursor if disabling crtc */
8198                 if (old_crtc_state->active && !new_crtc_state->active) {
8199                         struct dc_cursor_position position;
8200
8201                         memset(&position, 0, sizeof(position));
8202                         mutex_lock(&dm->dc_lock);
8203                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8204                         mutex_unlock(&dm->dc_lock);
8205                 }
8206
8207                 /* Copy all transient state flags into dc state */
8208                 if (dm_new_crtc_state->stream) {
8209                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8210                                                             dm_new_crtc_state->stream);
8211                 }
8212
8213                 /* handles headless hotplug case, updating new_state and
8214                  * aconnector as needed
8215                  */
8216
8217                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8218
8219                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8220
8221                         if (!dm_new_crtc_state->stream) {
8222                                 /*
8223                                  * this could happen because of issues with
8224                                  * userspace notifications delivery.
8225                                  * In this case userspace tries to set mode on
8226                                  * display which is disconnected in fact.
8227                                  * dc_sink is NULL in this case on aconnector.
8228                                  * We expect reset mode will come soon.
8229                                  *
8230                                  * This can also happen when unplug is done
8231                                  * during resume sequence ended
8232                                  *
8233                                  * In this case, we want to pretend we still
8234                                  * have a sink to keep the pipe running so that
8235                                  * hw state is consistent with the sw state
8236                                  */
8237                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8238                                                 __func__, acrtc->base.base.id);
8239                                 continue;
8240                         }
8241
8242                         if (dm_old_crtc_state->stream)
8243                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8244
8245                         pm_runtime_get_noresume(dev->dev);
8246
8247                         acrtc->enabled = true;
8248                         acrtc->hw_mode = new_crtc_state->mode;
8249                         crtc->hwmode = new_crtc_state->mode;
8250                         mode_set_reset_required = true;
8251                 } else if (modereset_required(new_crtc_state)) {
8252                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8253                         /* i.e. reset mode */
8254                         if (dm_old_crtc_state->stream)
8255                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8256                         mode_set_reset_required = true;
8257                 }
8258         } /* for_each_crtc_in_state() */
8259
8260         if (dc_state) {
8261                 /* if there mode set or reset, disable eDP PSR */
8262                 if (mode_set_reset_required)
8263                         amdgpu_dm_psr_disable_all(dm);
8264
8265                 dm_enable_per_frame_crtc_master_sync(dc_state);
8266                 mutex_lock(&dm->dc_lock);
8267                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8268                 mutex_unlock(&dm->dc_lock);
8269         }
8270
8271         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8272                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8273
8274                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8275
8276                 if (dm_new_crtc_state->stream != NULL) {
8277                         const struct dc_stream_status *status =
8278                                         dc_stream_get_status(dm_new_crtc_state->stream);
8279
8280                         if (!status)
8281                                 status = dc_stream_get_status_from_state(dc_state,
8282                                                                          dm_new_crtc_state->stream);
8283                         if (!status)
8284                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8285                         else
8286                                 acrtc->otg_inst = status->primary_otg_inst;
8287                 }
8288         }
8289 #ifdef CONFIG_DRM_AMD_DC_HDCP
8290         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8291                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8292                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8293                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8294
8295                 new_crtc_state = NULL;
8296
8297                 if (acrtc)
8298                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8299
8300                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8301
8302                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8303                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8304                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8305                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8306                         dm_new_con_state->update_hdcp = true;
8307                         continue;
8308                 }
8309
8310                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8311                         hdcp_update_display(
8312                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8313                                 new_con_state->hdcp_content_type,
8314                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8315                                                                                                          : false);
8316         }
8317 #endif
8318
8319         /* Handle connector state changes */
8320         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8321                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8322                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8323                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8324                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8325                 struct dc_stream_update stream_update;
8326                 struct dc_info_packet hdr_packet;
8327                 struct dc_stream_status *status = NULL;
8328                 bool abm_changed, hdr_changed, scaling_changed;
8329
8330                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8331                 memset(&stream_update, 0, sizeof(stream_update));
8332
8333                 if (acrtc) {
8334                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8335                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8336                 }
8337
8338                 /* Skip any modesets/resets */
8339                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8340                         continue;
8341
8342                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8343                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8344
8345                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8346                                                              dm_old_con_state);
8347
8348                 abm_changed = dm_new_crtc_state->abm_level !=
8349                               dm_old_crtc_state->abm_level;
8350
8351                 hdr_changed =
8352                         is_hdr_metadata_different(old_con_state, new_con_state);
8353
8354                 if (!scaling_changed && !abm_changed && !hdr_changed)
8355                         continue;
8356
8357                 stream_update.stream = dm_new_crtc_state->stream;
8358                 if (scaling_changed) {
8359                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8360                                         dm_new_con_state, dm_new_crtc_state->stream);
8361
8362                         stream_update.src = dm_new_crtc_state->stream->src;
8363                         stream_update.dst = dm_new_crtc_state->stream->dst;
8364                 }
8365
8366                 if (abm_changed) {
8367                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8368
8369                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8370                 }
8371
8372                 if (hdr_changed) {
8373                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8374                         stream_update.hdr_static_metadata = &hdr_packet;
8375                 }
8376
8377                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8378                 WARN_ON(!status);
8379                 WARN_ON(!status->plane_count);
8380
8381                 /*
8382                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8383                  * Here we create an empty update on each plane.
8384                  * To fix this, DC should permit updating only stream properties.
8385                  */
8386                 for (j = 0; j < status->plane_count; j++)
8387                         dummy_updates[j].surface = status->plane_states[0];
8388
8389
8390                 mutex_lock(&dm->dc_lock);
8391                 dc_commit_updates_for_stream(dm->dc,
8392                                                      dummy_updates,
8393                                                      status->plane_count,
8394                                                      dm_new_crtc_state->stream,
8395                                                      &stream_update,
8396                                                      dc_state);
8397                 mutex_unlock(&dm->dc_lock);
8398         }
8399
8400         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8401         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8402                                       new_crtc_state, i) {
8403                 if (old_crtc_state->active && !new_crtc_state->active)
8404                         crtc_disable_count++;
8405
8406                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8407                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8408
8409                 /* For freesync config update on crtc state and params for irq */
8410                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8411
8412                 /* Handle vrr on->off / off->on transitions */
8413                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8414                                                 dm_new_crtc_state);
8415         }
8416
8417         /**
8418          * Enable interrupts for CRTCs that are newly enabled or went through
8419          * a modeset. It was intentionally deferred until after the front end
8420          * state was modified to wait until the OTG was on and so the IRQ
8421          * handlers didn't access stale or invalid state.
8422          */
8423         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8424                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8425
8426                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8427
8428                 if (new_crtc_state->active &&
8429                     (!old_crtc_state->active ||
8430                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8431                         dc_stream_retain(dm_new_crtc_state->stream);
8432                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8433                         manage_dm_interrupts(adev, acrtc, true);
8434
8435 #ifdef CONFIG_DEBUG_FS
8436                         /**
8437                          * Frontend may have changed so reapply the CRC capture
8438                          * settings for the stream.
8439                          */
8440                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8441
8442                         if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8443                                 amdgpu_dm_crtc_configure_crc_source(
8444                                         crtc, dm_new_crtc_state,
8445                                         dm_new_crtc_state->crc_src);
8446                         }
8447 #endif
8448                 }
8449         }
8450
8451         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8452                 if (new_crtc_state->async_flip)
8453                         wait_for_vblank = false;
8454
8455         /* update planes when needed per crtc*/
8456         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8457                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8458
8459                 if (dm_new_crtc_state->stream)
8460                         amdgpu_dm_commit_planes(state, dc_state, dev,
8461                                                 dm, crtc, wait_for_vblank);
8462         }
8463
8464         /* Update audio instances for each connector. */
8465         amdgpu_dm_commit_audio(dev, state);
8466
8467         /*
8468          * send vblank event on all events not handled in flip and
8469          * mark consumed event for drm_atomic_helper_commit_hw_done
8470          */
8471         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8472         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8473
8474                 if (new_crtc_state->event)
8475                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8476
8477                 new_crtc_state->event = NULL;
8478         }
8479         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8480
8481         /* Signal HW programming completion */
8482         drm_atomic_helper_commit_hw_done(state);
8483
8484         if (wait_for_vblank)
8485                 drm_atomic_helper_wait_for_flip_done(dev, state);
8486
8487         drm_atomic_helper_cleanup_planes(dev, state);
8488
8489         /* return the stolen vga memory back to VRAM */
8490         if (!adev->mman.keep_stolen_vga_memory)
8491                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8492         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8493
8494         /*
8495          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8496          * so we can put the GPU into runtime suspend if we're not driving any
8497          * displays anymore
8498          */
8499         for (i = 0; i < crtc_disable_count; i++)
8500                 pm_runtime_put_autosuspend(dev->dev);
8501         pm_runtime_mark_last_busy(dev->dev);
8502
8503         if (dc_state_temp)
8504                 dc_release_state(dc_state_temp);
8505 }
8506
8507
8508 static int dm_force_atomic_commit(struct drm_connector *connector)
8509 {
8510         int ret = 0;
8511         struct drm_device *ddev = connector->dev;
8512         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8513         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8514         struct drm_plane *plane = disconnected_acrtc->base.primary;
8515         struct drm_connector_state *conn_state;
8516         struct drm_crtc_state *crtc_state;
8517         struct drm_plane_state *plane_state;
8518
8519         if (!state)
8520                 return -ENOMEM;
8521
8522         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8523
8524         /* Construct an atomic state to restore previous display setting */
8525
8526         /*
8527          * Attach connectors to drm_atomic_state
8528          */
8529         conn_state = drm_atomic_get_connector_state(state, connector);
8530
8531         ret = PTR_ERR_OR_ZERO(conn_state);
8532         if (ret)
8533                 goto out;
8534
8535         /* Attach crtc to drm_atomic_state*/
8536         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8537
8538         ret = PTR_ERR_OR_ZERO(crtc_state);
8539         if (ret)
8540                 goto out;
8541
8542         /* force a restore */
8543         crtc_state->mode_changed = true;
8544
8545         /* Attach plane to drm_atomic_state */
8546         plane_state = drm_atomic_get_plane_state(state, plane);
8547
8548         ret = PTR_ERR_OR_ZERO(plane_state);
8549         if (ret)
8550                 goto out;
8551
8552         /* Call commit internally with the state we just constructed */
8553         ret = drm_atomic_commit(state);
8554
8555 out:
8556         drm_atomic_state_put(state);
8557         if (ret)
8558                 DRM_ERROR("Restoring old state failed with %i\n", ret);
8559
8560         return ret;
8561 }
8562
8563 /*
8564  * This function handles all cases when set mode does not come upon hotplug.
8565  * This includes when a display is unplugged then plugged back into the
8566  * same port and when running without usermode desktop manager supprot
8567  */
8568 void dm_restore_drm_connector_state(struct drm_device *dev,
8569                                     struct drm_connector *connector)
8570 {
8571         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8572         struct amdgpu_crtc *disconnected_acrtc;
8573         struct dm_crtc_state *acrtc_state;
8574
8575         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8576                 return;
8577
8578         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8579         if (!disconnected_acrtc)
8580                 return;
8581
8582         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8583         if (!acrtc_state->stream)
8584                 return;
8585
8586         /*
8587          * If the previous sink is not released and different from the current,
8588          * we deduce we are in a state where we can not rely on usermode call
8589          * to turn on the display, so we do it here
8590          */
8591         if (acrtc_state->stream->sink != aconnector->dc_sink)
8592                 dm_force_atomic_commit(&aconnector->base);
8593 }
8594
8595 /*
8596  * Grabs all modesetting locks to serialize against any blocking commits,
8597  * Waits for completion of all non blocking commits.
8598  */
8599 static int do_aquire_global_lock(struct drm_device *dev,
8600                                  struct drm_atomic_state *state)
8601 {
8602         struct drm_crtc *crtc;
8603         struct drm_crtc_commit *commit;
8604         long ret;
8605
8606         /*
8607          * Adding all modeset locks to aquire_ctx will
8608          * ensure that when the framework release it the
8609          * extra locks we are locking here will get released to
8610          */
8611         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8612         if (ret)
8613                 return ret;
8614
8615         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8616                 spin_lock(&crtc->commit_lock);
8617                 commit = list_first_entry_or_null(&crtc->commit_list,
8618                                 struct drm_crtc_commit, commit_entry);
8619                 if (commit)
8620                         drm_crtc_commit_get(commit);
8621                 spin_unlock(&crtc->commit_lock);
8622
8623                 if (!commit)
8624                         continue;
8625
8626                 /*
8627                  * Make sure all pending HW programming completed and
8628                  * page flips done
8629                  */
8630                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8631
8632                 if (ret > 0)
8633                         ret = wait_for_completion_interruptible_timeout(
8634                                         &commit->flip_done, 10*HZ);
8635
8636                 if (ret == 0)
8637                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8638                                   "timed out\n", crtc->base.id, crtc->name);
8639
8640                 drm_crtc_commit_put(commit);
8641         }
8642
8643         return ret < 0 ? ret : 0;
8644 }
8645
8646 static void get_freesync_config_for_crtc(
8647         struct dm_crtc_state *new_crtc_state,
8648         struct dm_connector_state *new_con_state)
8649 {
8650         struct mod_freesync_config config = {0};
8651         struct amdgpu_dm_connector *aconnector =
8652                         to_amdgpu_dm_connector(new_con_state->base.connector);
8653         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8654         int vrefresh = drm_mode_vrefresh(mode);
8655
8656         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8657                                         vrefresh >= aconnector->min_vfreq &&
8658                                         vrefresh <= aconnector->max_vfreq;
8659
8660         if (new_crtc_state->vrr_supported) {
8661                 new_crtc_state->stream->ignore_msa_timing_param = true;
8662                 config.state = new_crtc_state->base.vrr_enabled ?
8663                                 VRR_STATE_ACTIVE_VARIABLE :
8664                                 VRR_STATE_INACTIVE;
8665                 config.min_refresh_in_uhz =
8666                                 aconnector->min_vfreq * 1000000;
8667                 config.max_refresh_in_uhz =
8668                                 aconnector->max_vfreq * 1000000;
8669                 config.vsif_supported = true;
8670                 config.btr = true;
8671         }
8672
8673         new_crtc_state->freesync_config = config;
8674 }
8675
8676 static void reset_freesync_config_for_crtc(
8677         struct dm_crtc_state *new_crtc_state)
8678 {
8679         new_crtc_state->vrr_supported = false;
8680
8681         memset(&new_crtc_state->vrr_infopacket, 0,
8682                sizeof(new_crtc_state->vrr_infopacket));
8683 }
8684
8685 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8686                                 struct drm_atomic_state *state,
8687                                 struct drm_crtc *crtc,
8688                                 struct drm_crtc_state *old_crtc_state,
8689                                 struct drm_crtc_state *new_crtc_state,
8690                                 bool enable,
8691                                 bool *lock_and_validation_needed)
8692 {
8693         struct dm_atomic_state *dm_state = NULL;
8694         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8695         struct dc_stream_state *new_stream;
8696         int ret = 0;
8697
8698         /*
8699          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8700          * update changed items
8701          */
8702         struct amdgpu_crtc *acrtc = NULL;
8703         struct amdgpu_dm_connector *aconnector = NULL;
8704         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8705         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8706
8707         new_stream = NULL;
8708
8709         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8710         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8711         acrtc = to_amdgpu_crtc(crtc);
8712         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8713
8714         /* TODO This hack should go away */
8715         if (aconnector && enable) {
8716                 /* Make sure fake sink is created in plug-in scenario */
8717                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8718                                                             &aconnector->base);
8719                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8720                                                             &aconnector->base);
8721
8722                 if (IS_ERR(drm_new_conn_state)) {
8723                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8724                         goto fail;
8725                 }
8726
8727                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8728                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8729
8730                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8731                         goto skip_modeset;
8732
8733                 new_stream = create_validate_stream_for_sink(aconnector,
8734                                                              &new_crtc_state->mode,
8735                                                              dm_new_conn_state,
8736                                                              dm_old_crtc_state->stream);
8737
8738                 /*
8739                  * we can have no stream on ACTION_SET if a display
8740                  * was disconnected during S3, in this case it is not an
8741                  * error, the OS will be updated after detection, and
8742                  * will do the right thing on next atomic commit
8743                  */
8744
8745                 if (!new_stream) {
8746                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8747                                         __func__, acrtc->base.base.id);
8748                         ret = -ENOMEM;
8749                         goto fail;
8750                 }
8751
8752                 /*
8753                  * TODO: Check VSDB bits to decide whether this should
8754                  * be enabled or not.
8755                  */
8756                 new_stream->triggered_crtc_reset.enabled =
8757                         dm->force_timing_sync;
8758
8759                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8760
8761                 ret = fill_hdr_info_packet(drm_new_conn_state,
8762                                            &new_stream->hdr_static_metadata);
8763                 if (ret)
8764                         goto fail;
8765
8766                 /*
8767                  * If we already removed the old stream from the context
8768                  * (and set the new stream to NULL) then we can't reuse
8769                  * the old stream even if the stream and scaling are unchanged.
8770                  * We'll hit the BUG_ON and black screen.
8771                  *
8772                  * TODO: Refactor this function to allow this check to work
8773                  * in all conditions.
8774                  */
8775                 if (dm_new_crtc_state->stream &&
8776                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8777                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8778                         new_crtc_state->mode_changed = false;
8779                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8780                                          new_crtc_state->mode_changed);
8781                 }
8782         }
8783
8784         /* mode_changed flag may get updated above, need to check again */
8785         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8786                 goto skip_modeset;
8787
8788         DRM_DEBUG_DRIVER(
8789                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8790                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8791                 "connectors_changed:%d\n",
8792                 acrtc->crtc_id,
8793                 new_crtc_state->enable,
8794                 new_crtc_state->active,
8795                 new_crtc_state->planes_changed,
8796                 new_crtc_state->mode_changed,
8797                 new_crtc_state->active_changed,
8798                 new_crtc_state->connectors_changed);
8799
8800         /* Remove stream for any changed/disabled CRTC */
8801         if (!enable) {
8802
8803                 if (!dm_old_crtc_state->stream)
8804                         goto skip_modeset;
8805
8806                 ret = dm_atomic_get_state(state, &dm_state);
8807                 if (ret)
8808                         goto fail;
8809
8810                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8811                                 crtc->base.id);
8812
8813                 /* i.e. reset mode */
8814                 if (dc_remove_stream_from_ctx(
8815                                 dm->dc,
8816                                 dm_state->context,
8817                                 dm_old_crtc_state->stream) != DC_OK) {
8818                         ret = -EINVAL;
8819                         goto fail;
8820                 }
8821
8822                 dc_stream_release(dm_old_crtc_state->stream);
8823                 dm_new_crtc_state->stream = NULL;
8824
8825                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8826
8827                 *lock_and_validation_needed = true;
8828
8829         } else {/* Add stream for any updated/enabled CRTC */
8830                 /*
8831                  * Quick fix to prevent NULL pointer on new_stream when
8832                  * added MST connectors not found in existing crtc_state in the chained mode
8833                  * TODO: need to dig out the root cause of that
8834                  */
8835                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8836                         goto skip_modeset;
8837
8838                 if (modereset_required(new_crtc_state))
8839                         goto skip_modeset;
8840
8841                 if (modeset_required(new_crtc_state, new_stream,
8842                                      dm_old_crtc_state->stream)) {
8843
8844                         WARN_ON(dm_new_crtc_state->stream);
8845
8846                         ret = dm_atomic_get_state(state, &dm_state);
8847                         if (ret)
8848                                 goto fail;
8849
8850                         dm_new_crtc_state->stream = new_stream;
8851
8852                         dc_stream_retain(new_stream);
8853
8854                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8855                                                 crtc->base.id);
8856
8857                         if (dc_add_stream_to_ctx(
8858                                         dm->dc,
8859                                         dm_state->context,
8860                                         dm_new_crtc_state->stream) != DC_OK) {
8861                                 ret = -EINVAL;
8862                                 goto fail;
8863                         }
8864
8865                         *lock_and_validation_needed = true;
8866                 }
8867         }
8868
8869 skip_modeset:
8870         /* Release extra reference */
8871         if (new_stream)
8872                  dc_stream_release(new_stream);
8873
8874         /*
8875          * We want to do dc stream updates that do not require a
8876          * full modeset below.
8877          */
8878         if (!(enable && aconnector && new_crtc_state->active))
8879                 return 0;
8880         /*
8881          * Given above conditions, the dc state cannot be NULL because:
8882          * 1. We're in the process of enabling CRTCs (just been added
8883          *    to the dc context, or already is on the context)
8884          * 2. Has a valid connector attached, and
8885          * 3. Is currently active and enabled.
8886          * => The dc stream state currently exists.
8887          */
8888         BUG_ON(dm_new_crtc_state->stream == NULL);
8889
8890         /* Scaling or underscan settings */
8891         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8892                 update_stream_scaling_settings(
8893                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8894
8895         /* ABM settings */
8896         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8897
8898         /*
8899          * Color management settings. We also update color properties
8900          * when a modeset is needed, to ensure it gets reprogrammed.
8901          */
8902         if (dm_new_crtc_state->base.color_mgmt_changed ||
8903             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8904                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8905                 if (ret)
8906                         goto fail;
8907         }
8908
8909         /* Update Freesync settings. */
8910         get_freesync_config_for_crtc(dm_new_crtc_state,
8911                                      dm_new_conn_state);
8912
8913         return ret;
8914
8915 fail:
8916         if (new_stream)
8917                 dc_stream_release(new_stream);
8918         return ret;
8919 }
8920
8921 static bool should_reset_plane(struct drm_atomic_state *state,
8922                                struct drm_plane *plane,
8923                                struct drm_plane_state *old_plane_state,
8924                                struct drm_plane_state *new_plane_state)
8925 {
8926         struct drm_plane *other;
8927         struct drm_plane_state *old_other_state, *new_other_state;
8928         struct drm_crtc_state *new_crtc_state;
8929         int i;
8930
8931         /*
8932          * TODO: Remove this hack once the checks below are sufficient
8933          * enough to determine when we need to reset all the planes on
8934          * the stream.
8935          */
8936         if (state->allow_modeset)
8937                 return true;
8938
8939         /* Exit early if we know that we're adding or removing the plane. */
8940         if (old_plane_state->crtc != new_plane_state->crtc)
8941                 return true;
8942
8943         /* old crtc == new_crtc == NULL, plane not in context. */
8944         if (!new_plane_state->crtc)
8945                 return false;
8946
8947         new_crtc_state =
8948                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8949
8950         if (!new_crtc_state)
8951                 return true;
8952
8953         /* CRTC Degamma changes currently require us to recreate planes. */
8954         if (new_crtc_state->color_mgmt_changed)
8955                 return true;
8956
8957         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8958                 return true;
8959
8960         /*
8961          * If there are any new primary or overlay planes being added or
8962          * removed then the z-order can potentially change. To ensure
8963          * correct z-order and pipe acquisition the current DC architecture
8964          * requires us to remove and recreate all existing planes.
8965          *
8966          * TODO: Come up with a more elegant solution for this.
8967          */
8968         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8969                 struct amdgpu_framebuffer *old_afb, *new_afb;
8970                 if (other->type == DRM_PLANE_TYPE_CURSOR)
8971                         continue;
8972
8973                 if (old_other_state->crtc != new_plane_state->crtc &&
8974                     new_other_state->crtc != new_plane_state->crtc)
8975                         continue;
8976
8977                 if (old_other_state->crtc != new_other_state->crtc)
8978                         return true;
8979
8980                 /* Src/dst size and scaling updates. */
8981                 if (old_other_state->src_w != new_other_state->src_w ||
8982                     old_other_state->src_h != new_other_state->src_h ||
8983                     old_other_state->crtc_w != new_other_state->crtc_w ||
8984                     old_other_state->crtc_h != new_other_state->crtc_h)
8985                         return true;
8986
8987                 /* Rotation / mirroring updates. */
8988                 if (old_other_state->rotation != new_other_state->rotation)
8989                         return true;
8990
8991                 /* Blending updates. */
8992                 if (old_other_state->pixel_blend_mode !=
8993                     new_other_state->pixel_blend_mode)
8994                         return true;
8995
8996                 /* Alpha updates. */
8997                 if (old_other_state->alpha != new_other_state->alpha)
8998                         return true;
8999
9000                 /* Colorspace changes. */
9001                 if (old_other_state->color_range != new_other_state->color_range ||
9002                     old_other_state->color_encoding != new_other_state->color_encoding)
9003                         return true;
9004
9005                 /* Framebuffer checks fall at the end. */
9006                 if (!old_other_state->fb || !new_other_state->fb)
9007                         continue;
9008
9009                 /* Pixel format changes can require bandwidth updates. */
9010                 if (old_other_state->fb->format != new_other_state->fb->format)
9011                         return true;
9012
9013                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9014                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9015
9016                 /* Tiling and DCC changes also require bandwidth updates. */
9017                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9018                     old_afb->base.modifier != new_afb->base.modifier)
9019                         return true;
9020         }
9021
9022         return false;
9023 }
9024
9025 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9026                               struct drm_plane_state *new_plane_state,
9027                               struct drm_framebuffer *fb)
9028 {
9029         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9030         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9031         unsigned int pitch;
9032         bool linear;
9033
9034         if (fb->width > new_acrtc->max_cursor_width ||
9035             fb->height > new_acrtc->max_cursor_height) {
9036                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9037                                  new_plane_state->fb->width,
9038                                  new_plane_state->fb->height);
9039                 return -EINVAL;
9040         }
9041         if (new_plane_state->src_w != fb->width << 16 ||
9042             new_plane_state->src_h != fb->height << 16) {
9043                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9044                 return -EINVAL;
9045         }
9046
9047         /* Pitch in pixels */
9048         pitch = fb->pitches[0] / fb->format->cpp[0];
9049
9050         if (fb->width != pitch) {
9051                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9052                                  fb->width, pitch);
9053                 return -EINVAL;
9054         }
9055
9056         switch (pitch) {
9057         case 64:
9058         case 128:
9059         case 256:
9060                 /* FB pitch is supported by cursor plane */
9061                 break;
9062         default:
9063                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9064                 return -EINVAL;
9065         }
9066
9067         /* Core DRM takes care of checking FB modifiers, so we only need to
9068          * check tiling flags when the FB doesn't have a modifier. */
9069         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9070                 if (adev->family < AMDGPU_FAMILY_AI) {
9071                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9072                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9073                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9074                 } else {
9075                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9076                 }
9077                 if (!linear) {
9078                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9079                         return -EINVAL;
9080                 }
9081         }
9082
9083         return 0;
9084 }
9085
9086 static int dm_update_plane_state(struct dc *dc,
9087                                  struct drm_atomic_state *state,
9088                                  struct drm_plane *plane,
9089                                  struct drm_plane_state *old_plane_state,
9090                                  struct drm_plane_state *new_plane_state,
9091                                  bool enable,
9092                                  bool *lock_and_validation_needed)
9093 {
9094
9095         struct dm_atomic_state *dm_state = NULL;
9096         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9097         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9098         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9099         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9100         struct amdgpu_crtc *new_acrtc;
9101         bool needs_reset;
9102         int ret = 0;
9103
9104
9105         new_plane_crtc = new_plane_state->crtc;
9106         old_plane_crtc = old_plane_state->crtc;
9107         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9108         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9109
9110         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9111                 if (!enable || !new_plane_crtc ||
9112                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9113                         return 0;
9114
9115                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9116
9117                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9118                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9119                         return -EINVAL;
9120                 }
9121
9122                 if (new_plane_state->fb) {
9123                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9124                                                  new_plane_state->fb);
9125                         if (ret)
9126                                 return ret;
9127                 }
9128
9129                 return 0;
9130         }
9131
9132         needs_reset = should_reset_plane(state, plane, old_plane_state,
9133                                          new_plane_state);
9134
9135         /* Remove any changed/removed planes */
9136         if (!enable) {
9137                 if (!needs_reset)
9138                         return 0;
9139
9140                 if (!old_plane_crtc)
9141                         return 0;
9142
9143                 old_crtc_state = drm_atomic_get_old_crtc_state(
9144                                 state, old_plane_crtc);
9145                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9146
9147                 if (!dm_old_crtc_state->stream)
9148                         return 0;
9149
9150                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9151                                 plane->base.id, old_plane_crtc->base.id);
9152
9153                 ret = dm_atomic_get_state(state, &dm_state);
9154                 if (ret)
9155                         return ret;
9156
9157                 if (!dc_remove_plane_from_context(
9158                                 dc,
9159                                 dm_old_crtc_state->stream,
9160                                 dm_old_plane_state->dc_state,
9161                                 dm_state->context)) {
9162
9163                         return -EINVAL;
9164                 }
9165
9166
9167                 dc_plane_state_release(dm_old_plane_state->dc_state);
9168                 dm_new_plane_state->dc_state = NULL;
9169
9170                 *lock_and_validation_needed = true;
9171
9172         } else { /* Add new planes */
9173                 struct dc_plane_state *dc_new_plane_state;
9174
9175                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9176                         return 0;
9177
9178                 if (!new_plane_crtc)
9179                         return 0;
9180
9181                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9182                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9183
9184                 if (!dm_new_crtc_state->stream)
9185                         return 0;
9186
9187                 if (!needs_reset)
9188                         return 0;
9189
9190                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9191                 if (ret)
9192                         return ret;
9193
9194                 WARN_ON(dm_new_plane_state->dc_state);
9195
9196                 dc_new_plane_state = dc_create_plane_state(dc);
9197                 if (!dc_new_plane_state)
9198                         return -ENOMEM;
9199
9200                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9201                                 plane->base.id, new_plane_crtc->base.id);
9202
9203                 ret = fill_dc_plane_attributes(
9204                         drm_to_adev(new_plane_crtc->dev),
9205                         dc_new_plane_state,
9206                         new_plane_state,
9207                         new_crtc_state);
9208                 if (ret) {
9209                         dc_plane_state_release(dc_new_plane_state);
9210                         return ret;
9211                 }
9212
9213                 ret = dm_atomic_get_state(state, &dm_state);
9214                 if (ret) {
9215                         dc_plane_state_release(dc_new_plane_state);
9216                         return ret;
9217                 }
9218
9219                 /*
9220                  * Any atomic check errors that occur after this will
9221                  * not need a release. The plane state will be attached
9222                  * to the stream, and therefore part of the atomic
9223                  * state. It'll be released when the atomic state is
9224                  * cleaned.
9225                  */
9226                 if (!dc_add_plane_to_context(
9227                                 dc,
9228                                 dm_new_crtc_state->stream,
9229                                 dc_new_plane_state,
9230                                 dm_state->context)) {
9231
9232                         dc_plane_state_release(dc_new_plane_state);
9233                         return -EINVAL;
9234                 }
9235
9236                 dm_new_plane_state->dc_state = dc_new_plane_state;
9237
9238                 /* Tell DC to do a full surface update every time there
9239                  * is a plane change. Inefficient, but works for now.
9240                  */
9241                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9242
9243                 *lock_and_validation_needed = true;
9244         }
9245
9246
9247         return ret;
9248 }
9249
9250 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9251                                 struct drm_crtc *crtc,
9252                                 struct drm_crtc_state *new_crtc_state)
9253 {
9254         struct drm_plane_state *new_cursor_state, *new_primary_state;
9255         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9256
9257         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9258          * cursor per pipe but it's going to inherit the scaling and
9259          * positioning from the underlying pipe. Check the cursor plane's
9260          * blending properties match the primary plane's. */
9261
9262         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9263         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9264         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9265                 return 0;
9266         }
9267
9268         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9269                          (new_cursor_state->src_w >> 16);
9270         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9271                          (new_cursor_state->src_h >> 16);
9272
9273         primary_scale_w = new_primary_state->crtc_w * 1000 /
9274                          (new_primary_state->src_w >> 16);
9275         primary_scale_h = new_primary_state->crtc_h * 1000 /
9276                          (new_primary_state->src_h >> 16);
9277
9278         if (cursor_scale_w != primary_scale_w ||
9279             cursor_scale_h != primary_scale_h) {
9280                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9281                 return -EINVAL;
9282         }
9283
9284         return 0;
9285 }
9286
9287 #if defined(CONFIG_DRM_AMD_DC_DCN)
9288 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9289 {
9290         struct drm_connector *connector;
9291         struct drm_connector_state *conn_state;
9292         struct amdgpu_dm_connector *aconnector = NULL;
9293         int i;
9294         for_each_new_connector_in_state(state, connector, conn_state, i) {
9295                 if (conn_state->crtc != crtc)
9296                         continue;
9297
9298                 aconnector = to_amdgpu_dm_connector(connector);
9299                 if (!aconnector->port || !aconnector->mst_port)
9300                         aconnector = NULL;
9301                 else
9302                         break;
9303         }
9304
9305         if (!aconnector)
9306                 return 0;
9307
9308         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9309 }
9310 #endif
9311
9312 /**
9313  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9314  * @dev: The DRM device
9315  * @state: The atomic state to commit
9316  *
9317  * Validate that the given atomic state is programmable by DC into hardware.
9318  * This involves constructing a &struct dc_state reflecting the new hardware
9319  * state we wish to commit, then querying DC to see if it is programmable. It's
9320  * important not to modify the existing DC state. Otherwise, atomic_check
9321  * may unexpectedly commit hardware changes.
9322  *
9323  * When validating the DC state, it's important that the right locks are
9324  * acquired. For full updates case which removes/adds/updates streams on one
9325  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9326  * that any such full update commit will wait for completion of any outstanding
9327  * flip using DRMs synchronization events.
9328  *
9329  * Note that DM adds the affected connectors for all CRTCs in state, when that
9330  * might not seem necessary. This is because DC stream creation requires the
9331  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9332  * be possible but non-trivial - a possible TODO item.
9333  *
9334  * Return: -Error code if validation failed.
9335  */
9336 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9337                                   struct drm_atomic_state *state)
9338 {
9339         struct amdgpu_device *adev = drm_to_adev(dev);
9340         struct dm_atomic_state *dm_state = NULL;
9341         struct dc *dc = adev->dm.dc;
9342         struct drm_connector *connector;
9343         struct drm_connector_state *old_con_state, *new_con_state;
9344         struct drm_crtc *crtc;
9345         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9346         struct drm_plane *plane;
9347         struct drm_plane_state *old_plane_state, *new_plane_state;
9348         enum dc_status status;
9349         int ret, i;
9350         bool lock_and_validation_needed = false;
9351         struct dm_crtc_state *dm_old_crtc_state;
9352
9353         trace_amdgpu_dm_atomic_check_begin(state);
9354
9355         ret = drm_atomic_helper_check_modeset(dev, state);
9356         if (ret)
9357                 goto fail;
9358
9359         /* Check connector changes */
9360         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9361                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9362                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9363
9364                 /* Skip connectors that are disabled or part of modeset already. */
9365                 if (!old_con_state->crtc && !new_con_state->crtc)
9366                         continue;
9367
9368                 if (!new_con_state->crtc)
9369                         continue;
9370
9371                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9372                 if (IS_ERR(new_crtc_state)) {
9373                         ret = PTR_ERR(new_crtc_state);
9374                         goto fail;
9375                 }
9376
9377                 if (dm_old_con_state->abm_level !=
9378                     dm_new_con_state->abm_level)
9379                         new_crtc_state->connectors_changed = true;
9380         }
9381
9382 #if defined(CONFIG_DRM_AMD_DC_DCN)
9383         if (adev->asic_type >= CHIP_NAVI10) {
9384                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9385                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9386                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9387                                 if (ret)
9388                                         goto fail;
9389                         }
9390                 }
9391         }
9392 #endif
9393         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9394                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9395
9396                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9397                     !new_crtc_state->color_mgmt_changed &&
9398                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9399                         dm_old_crtc_state->dsc_force_changed == false)
9400                         continue;
9401
9402                 if (!new_crtc_state->enable)
9403                         continue;
9404
9405                 ret = drm_atomic_add_affected_connectors(state, crtc);
9406                 if (ret)
9407                         return ret;
9408
9409                 ret = drm_atomic_add_affected_planes(state, crtc);
9410                 if (ret)
9411                         goto fail;
9412
9413                 if (dm_old_crtc_state->dsc_force_changed)
9414                         new_crtc_state->mode_changed = true;
9415         }
9416
9417         /*
9418          * Add all primary and overlay planes on the CRTC to the state
9419          * whenever a plane is enabled to maintain correct z-ordering
9420          * and to enable fast surface updates.
9421          */
9422         drm_for_each_crtc(crtc, dev) {
9423                 bool modified = false;
9424
9425                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9426                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9427                                 continue;
9428
9429                         if (new_plane_state->crtc == crtc ||
9430                             old_plane_state->crtc == crtc) {
9431                                 modified = true;
9432                                 break;
9433                         }
9434                 }
9435
9436                 if (!modified)
9437                         continue;
9438
9439                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9440                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9441                                 continue;
9442
9443                         new_plane_state =
9444                                 drm_atomic_get_plane_state(state, plane);
9445
9446                         if (IS_ERR(new_plane_state)) {
9447                                 ret = PTR_ERR(new_plane_state);
9448                                 goto fail;
9449                         }
9450                 }
9451         }
9452
9453         /* Remove exiting planes if they are modified */
9454         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9455                 ret = dm_update_plane_state(dc, state, plane,
9456                                             old_plane_state,
9457                                             new_plane_state,
9458                                             false,
9459                                             &lock_and_validation_needed);
9460                 if (ret)
9461                         goto fail;
9462         }
9463
9464         /* Disable all crtcs which require disable */
9465         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9466                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9467                                            old_crtc_state,
9468                                            new_crtc_state,
9469                                            false,
9470                                            &lock_and_validation_needed);
9471                 if (ret)
9472                         goto fail;
9473         }
9474
9475         /* Enable all crtcs which require enable */
9476         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9477                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9478                                            old_crtc_state,
9479                                            new_crtc_state,
9480                                            true,
9481                                            &lock_and_validation_needed);
9482                 if (ret)
9483                         goto fail;
9484         }
9485
9486         /* Add new/modified planes */
9487         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9488                 ret = dm_update_plane_state(dc, state, plane,
9489                                             old_plane_state,
9490                                             new_plane_state,
9491                                             true,
9492                                             &lock_and_validation_needed);
9493                 if (ret)
9494                         goto fail;
9495         }
9496
9497         /* Run this here since we want to validate the streams we created */
9498         ret = drm_atomic_helper_check_planes(dev, state);
9499         if (ret)
9500                 goto fail;
9501
9502         /* Check cursor planes scaling */
9503         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9504                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9505                 if (ret)
9506                         goto fail;
9507         }
9508
9509         if (state->legacy_cursor_update) {
9510                 /*
9511                  * This is a fast cursor update coming from the plane update
9512                  * helper, check if it can be done asynchronously for better
9513                  * performance.
9514                  */
9515                 state->async_update =
9516                         !drm_atomic_helper_async_check(dev, state);
9517
9518                 /*
9519                  * Skip the remaining global validation if this is an async
9520                  * update. Cursor updates can be done without affecting
9521                  * state or bandwidth calcs and this avoids the performance
9522                  * penalty of locking the private state object and
9523                  * allocating a new dc_state.
9524                  */
9525                 if (state->async_update)
9526                         return 0;
9527         }
9528
9529         /* Check scaling and underscan changes*/
9530         /* TODO Removed scaling changes validation due to inability to commit
9531          * new stream into context w\o causing full reset. Need to
9532          * decide how to handle.
9533          */
9534         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9535                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9536                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9537                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9538
9539                 /* Skip any modesets/resets */
9540                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9541                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9542                         continue;
9543
9544                 /* Skip any thing not scale or underscan changes */
9545                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9546                         continue;
9547
9548                 lock_and_validation_needed = true;
9549         }
9550
9551         /**
9552          * Streams and planes are reset when there are changes that affect
9553          * bandwidth. Anything that affects bandwidth needs to go through
9554          * DC global validation to ensure that the configuration can be applied
9555          * to hardware.
9556          *
9557          * We have to currently stall out here in atomic_check for outstanding
9558          * commits to finish in this case because our IRQ handlers reference
9559          * DRM state directly - we can end up disabling interrupts too early
9560          * if we don't.
9561          *
9562          * TODO: Remove this stall and drop DM state private objects.
9563          */
9564         if (lock_and_validation_needed) {
9565                 ret = dm_atomic_get_state(state, &dm_state);
9566                 if (ret)
9567                         goto fail;
9568
9569                 ret = do_aquire_global_lock(dev, state);
9570                 if (ret)
9571                         goto fail;
9572
9573 #if defined(CONFIG_DRM_AMD_DC_DCN)
9574                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9575                         goto fail;
9576
9577                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9578                 if (ret)
9579                         goto fail;
9580 #endif
9581
9582                 /*
9583                  * Perform validation of MST topology in the state:
9584                  * We need to perform MST atomic check before calling
9585                  * dc_validate_global_state(), or there is a chance
9586                  * to get stuck in an infinite loop and hang eventually.
9587                  */
9588                 ret = drm_dp_mst_atomic_check(state);
9589                 if (ret)
9590                         goto fail;
9591                 status = dc_validate_global_state(dc, dm_state->context, false);
9592                 if (status != DC_OK) {
9593                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
9594                                        dc_status_to_str(status), status);
9595                         ret = -EINVAL;
9596                         goto fail;
9597                 }
9598         } else {
9599                 /*
9600                  * The commit is a fast update. Fast updates shouldn't change
9601                  * the DC context, affect global validation, and can have their
9602                  * commit work done in parallel with other commits not touching
9603                  * the same resource. If we have a new DC context as part of
9604                  * the DM atomic state from validation we need to free it and
9605                  * retain the existing one instead.
9606                  *
9607                  * Furthermore, since the DM atomic state only contains the DC
9608                  * context and can safely be annulled, we can free the state
9609                  * and clear the associated private object now to free
9610                  * some memory and avoid a possible use-after-free later.
9611                  */
9612
9613                 for (i = 0; i < state->num_private_objs; i++) {
9614                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9615
9616                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9617                                 int j = state->num_private_objs-1;
9618
9619                                 dm_atomic_destroy_state(obj,
9620                                                 state->private_objs[i].state);
9621
9622                                 /* If i is not at the end of the array then the
9623                                  * last element needs to be moved to where i was
9624                                  * before the array can safely be truncated.
9625                                  */
9626                                 if (i != j)
9627                                         state->private_objs[i] =
9628                                                 state->private_objs[j];
9629
9630                                 state->private_objs[j].ptr = NULL;
9631                                 state->private_objs[j].state = NULL;
9632                                 state->private_objs[j].old_state = NULL;
9633                                 state->private_objs[j].new_state = NULL;
9634
9635                                 state->num_private_objs = j;
9636                                 break;
9637                         }
9638                 }
9639         }
9640
9641         /* Store the overall update type for use later in atomic check. */
9642         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9643                 struct dm_crtc_state *dm_new_crtc_state =
9644                         to_dm_crtc_state(new_crtc_state);
9645
9646                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9647                                                          UPDATE_TYPE_FULL :
9648                                                          UPDATE_TYPE_FAST;
9649         }
9650
9651         /* Must be success */
9652         WARN_ON(ret);
9653
9654         trace_amdgpu_dm_atomic_check_finish(state, ret);
9655
9656         return ret;
9657
9658 fail:
9659         if (ret == -EDEADLK)
9660                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9661         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9662                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9663         else
9664                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9665
9666         trace_amdgpu_dm_atomic_check_finish(state, ret);
9667
9668         return ret;
9669 }
9670
9671 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9672                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9673 {
9674         uint8_t dpcd_data;
9675         bool capable = false;
9676
9677         if (amdgpu_dm_connector->dc_link &&
9678                 dm_helpers_dp_read_dpcd(
9679                                 NULL,
9680                                 amdgpu_dm_connector->dc_link,
9681                                 DP_DOWN_STREAM_PORT_COUNT,
9682                                 &dpcd_data,
9683                                 sizeof(dpcd_data))) {
9684                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9685         }
9686
9687         return capable;
9688 }
9689 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9690                                         struct edid *edid)
9691 {
9692         int i;
9693         bool edid_check_required;
9694         struct detailed_timing *timing;
9695         struct detailed_non_pixel *data;
9696         struct detailed_data_monitor_range *range;
9697         struct amdgpu_dm_connector *amdgpu_dm_connector =
9698                         to_amdgpu_dm_connector(connector);
9699         struct dm_connector_state *dm_con_state = NULL;
9700
9701         struct drm_device *dev = connector->dev;
9702         struct amdgpu_device *adev = drm_to_adev(dev);
9703         bool freesync_capable = false;
9704
9705         if (!connector->state) {
9706                 DRM_ERROR("%s - Connector has no state", __func__);
9707                 goto update;
9708         }
9709
9710         if (!edid) {
9711                 dm_con_state = to_dm_connector_state(connector->state);
9712
9713                 amdgpu_dm_connector->min_vfreq = 0;
9714                 amdgpu_dm_connector->max_vfreq = 0;
9715                 amdgpu_dm_connector->pixel_clock_mhz = 0;
9716
9717                 goto update;
9718         }
9719
9720         dm_con_state = to_dm_connector_state(connector->state);
9721
9722         edid_check_required = false;
9723         if (!amdgpu_dm_connector->dc_sink) {
9724                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9725                 goto update;
9726         }
9727         if (!adev->dm.freesync_module)
9728                 goto update;
9729         /*
9730          * if edid non zero restrict freesync only for dp and edp
9731          */
9732         if (edid) {
9733                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9734                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9735                         edid_check_required = is_dp_capable_without_timing_msa(
9736                                                 adev->dm.dc,
9737                                                 amdgpu_dm_connector);
9738                 }
9739         }
9740         if (edid_check_required == true && (edid->version > 1 ||
9741            (edid->version == 1 && edid->revision > 1))) {
9742                 for (i = 0; i < 4; i++) {
9743
9744                         timing  = &edid->detailed_timings[i];
9745                         data    = &timing->data.other_data;
9746                         range   = &data->data.range;
9747                         /*
9748                          * Check if monitor has continuous frequency mode
9749                          */
9750                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
9751                                 continue;
9752                         /*
9753                          * Check for flag range limits only. If flag == 1 then
9754                          * no additional timing information provided.
9755                          * Default GTF, GTF Secondary curve and CVT are not
9756                          * supported
9757                          */
9758                         if (range->flags != 1)
9759                                 continue;
9760
9761                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9762                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9763                         amdgpu_dm_connector->pixel_clock_mhz =
9764                                 range->pixel_clock_mhz * 10;
9765
9766                         connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9767                         connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9768
9769                         break;
9770                 }
9771
9772                 if (amdgpu_dm_connector->max_vfreq -
9773                     amdgpu_dm_connector->min_vfreq > 10) {
9774
9775                         freesync_capable = true;
9776                 }
9777         }
9778
9779 update:
9780         if (dm_con_state)
9781                 dm_con_state->freesync_capable = freesync_capable;
9782
9783         if (connector->vrr_capable_property)
9784                 drm_connector_set_vrr_capable_property(connector,
9785                                                        freesync_capable);
9786 }
9787
9788 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9789 {
9790         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9791
9792         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9793                 return;
9794         if (link->type == dc_connection_none)
9795                 return;
9796         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9797                                         dpcd_data, sizeof(dpcd_data))) {
9798                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9799
9800                 if (dpcd_data[0] == 0) {
9801                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9802                         link->psr_settings.psr_feature_enabled = false;
9803                 } else {
9804                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9805                         link->psr_settings.psr_feature_enabled = true;
9806                 }
9807
9808                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9809         }
9810 }
9811
9812 /*
9813  * amdgpu_dm_link_setup_psr() - configure psr link
9814  * @stream: stream state
9815  *
9816  * Return: true if success
9817  */
9818 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9819 {
9820         struct dc_link *link = NULL;
9821         struct psr_config psr_config = {0};
9822         struct psr_context psr_context = {0};
9823         bool ret = false;
9824
9825         if (stream == NULL)
9826                 return false;
9827
9828         link = stream->link;
9829
9830         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9831
9832         if (psr_config.psr_version > 0) {
9833                 psr_config.psr_exit_link_training_required = 0x1;
9834                 psr_config.psr_frame_capture_indication_req = 0;
9835                 psr_config.psr_rfb_setup_time = 0x37;
9836                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9837                 psr_config.allow_smu_optimizations = 0x0;
9838
9839                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9840
9841         }
9842         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9843
9844         return ret;
9845 }
9846
9847 /*
9848  * amdgpu_dm_psr_enable() - enable psr f/w
9849  * @stream: stream state
9850  *
9851  * Return: true if success
9852  */
9853 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9854 {
9855         struct dc_link *link = stream->link;
9856         unsigned int vsync_rate_hz = 0;
9857         struct dc_static_screen_params params = {0};
9858         /* Calculate number of static frames before generating interrupt to
9859          * enter PSR.
9860          */
9861         // Init fail safe of 2 frames static
9862         unsigned int num_frames_static = 2;
9863
9864         DRM_DEBUG_DRIVER("Enabling psr...\n");
9865
9866         vsync_rate_hz = div64_u64(div64_u64((
9867                         stream->timing.pix_clk_100hz * 100),
9868                         stream->timing.v_total),
9869                         stream->timing.h_total);
9870
9871         /* Round up
9872          * Calculate number of frames such that at least 30 ms of time has
9873          * passed.
9874          */
9875         if (vsync_rate_hz != 0) {
9876                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9877                 num_frames_static = (30000 / frame_time_microsec) + 1;
9878         }
9879
9880         params.triggers.cursor_update = true;
9881         params.triggers.overlay_update = true;
9882         params.triggers.surface_update = true;
9883         params.num_frames = num_frames_static;
9884
9885         dc_stream_set_static_screen_params(link->ctx->dc,
9886                                            &stream, 1,
9887                                            &params);
9888
9889         return dc_link_set_psr_allow_active(link, true, false, false);
9890 }
9891
9892 /*
9893  * amdgpu_dm_psr_disable() - disable psr f/w
9894  * @stream:  stream state
9895  *
9896  * Return: true if success
9897  */
9898 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9899 {
9900
9901         DRM_DEBUG_DRIVER("Disabling psr...\n");
9902
9903         return dc_link_set_psr_allow_active(stream->link, false, true, false);
9904 }
9905
9906 /*
9907  * amdgpu_dm_psr_disable() - disable psr f/w
9908  * if psr is enabled on any stream
9909  *
9910  * Return: true if success
9911  */
9912 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9913 {
9914         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9915         return dc_set_psr_allow_active(dm->dc, false);
9916 }
9917
9918 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9919 {
9920         struct amdgpu_device *adev = drm_to_adev(dev);
9921         struct dc *dc = adev->dm.dc;
9922         int i;
9923
9924         mutex_lock(&adev->dm.dc_lock);
9925         if (dc->current_state) {
9926                 for (i = 0; i < dc->current_state->stream_count; ++i)
9927                         dc->current_state->streams[i]
9928                                 ->triggered_crtc_reset.enabled =
9929                                 adev->dm.force_timing_sync;
9930
9931                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9932                 dc_trigger_sync(dc, dc->current_state);
9933         }
9934         mutex_unlock(&adev->dm.dc_lock);
9935 }
9936
9937 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9938                        uint32_t value, const char *func_name)
9939 {
9940 #ifdef DM_CHECK_ADDR_0
9941         if (address == 0) {
9942                 DC_ERR("invalid register write. address = 0");
9943                 return;
9944         }
9945 #endif
9946         cgs_write_register(ctx->cgs_device, address, value);
9947         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9948 }
9949
9950 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9951                           const char *func_name)
9952 {
9953         uint32_t value;
9954 #ifdef DM_CHECK_ADDR_0
9955         if (address == 0) {
9956                 DC_ERR("invalid register read; address = 0\n");
9957                 return 0;
9958         }
9959 #endif
9960
9961         if (ctx->dmub_srv &&
9962             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9963             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9964                 ASSERT(false);
9965                 return 0;
9966         }
9967
9968         value = cgs_read_register(ctx->cgs_device, address);
9969
9970         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9971
9972         return value;
9973 }