a47370058088812cc8acaa86d5663fce9959dad5
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
121
122 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
124
125 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
130
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
133
134 /**
135  * DOC: overview
136  *
137  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139  * requests into DC requests, and DC responses into DRM responses.
140  *
141  * The root control structure is &struct amdgpu_display_manager.
142  */
143
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
148
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150 {
151         switch (link->dpcd_caps.dongle_type) {
152         case DISPLAY_DONGLE_NONE:
153                 return DRM_MODE_SUBCONNECTOR_Native;
154         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155                 return DRM_MODE_SUBCONNECTOR_VGA;
156         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157         case DISPLAY_DONGLE_DP_DVI_DONGLE:
158                 return DRM_MODE_SUBCONNECTOR_DVID;
159         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161                 return DRM_MODE_SUBCONNECTOR_HDMIA;
162         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163         default:
164                 return DRM_MODE_SUBCONNECTOR_Unknown;
165         }
166 }
167
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169 {
170         struct dc_link *link = aconnector->dc_link;
171         struct drm_connector *connector = &aconnector->base;
172         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173
174         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175                 return;
176
177         if (aconnector->dc_sink)
178                 subconnector = get_subconnector_type(link);
179
180         drm_object_property_set_value(&connector->base,
181                         connector->dev->mode_config.dp_subconnector_property,
182                         subconnector);
183 }
184
185 /*
186  * initializes drm_device display related structures, based on the information
187  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188  * drm_encoder, drm_mode_config
189  *
190  * Returns 0 on success
191  */
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197                                 struct drm_plane *plane,
198                                 unsigned long possible_crtcs,
199                                 const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201                                struct drm_plane *plane,
202                                uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
205                                     uint32_t link_index,
206                                     struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208                                   struct amdgpu_encoder *aencoder,
209                                   uint32_t link_index);
210
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216                                   struct drm_atomic_state *state);
217
218 static void handle_cursor_update(struct drm_plane *plane,
219                                  struct drm_plane_state *old_plane_state);
220
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
226
227 static bool
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229                                  struct drm_crtc_state *new_crtc_state);
230 /*
231  * dm_vblank_get_counter
232  *
233  * @brief
234  * Get counter for number of vertical blanks
235  *
236  * @param
237  * struct amdgpu_device *adev - [in] desired amdgpu device
238  * int disp_idx - [in] which CRTC to get the counter from
239  *
240  * @return
241  * Counter for vertical blanks
242  */
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244 {
245         if (crtc >= adev->mode_info.num_crtc)
246                 return 0;
247         else {
248                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249
250                 if (acrtc->dm_irq_params.stream == NULL) {
251                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252                                   crtc);
253                         return 0;
254                 }
255
256                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
257         }
258 }
259
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261                                   u32 *vbl, u32 *position)
262 {
263         uint32_t v_blank_start, v_blank_end, h_position, v_position;
264
265         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266                 return -EINVAL;
267         else {
268                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269
270                 if (acrtc->dm_irq_params.stream ==  NULL) {
271                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272                                   crtc);
273                         return 0;
274                 }
275
276                 /*
277                  * TODO rework base driver to use values directly.
278                  * for now parse it back into reg-format
279                  */
280                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
281                                          &v_blank_start,
282                                          &v_blank_end,
283                                          &h_position,
284                                          &v_position);
285
286                 *position = v_position | (h_position << 16);
287                 *vbl = v_blank_start | (v_blank_end << 16);
288         }
289
290         return 0;
291 }
292
293 static bool dm_is_idle(void *handle)
294 {
295         /* XXX todo */
296         return true;
297 }
298
299 static int dm_wait_for_idle(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static bool dm_check_soft_reset(void *handle)
306 {
307         return false;
308 }
309
310 static int dm_soft_reset(void *handle)
311 {
312         /* XXX todo */
313         return 0;
314 }
315
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
318                      int otg_inst)
319 {
320         struct drm_device *dev = adev_to_drm(adev);
321         struct drm_crtc *crtc;
322         struct amdgpu_crtc *amdgpu_crtc;
323
324         if (WARN_ON(otg_inst == -1))
325                 return adev->mode_info.crtcs[0];
326
327         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328                 amdgpu_crtc = to_amdgpu_crtc(crtc);
329
330                 if (amdgpu_crtc->otg_inst == otg_inst)
331                         return amdgpu_crtc;
332         }
333
334         return NULL;
335 }
336
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338 {
339         return acrtc->dm_irq_params.freesync_config.state ==
340                        VRR_STATE_ACTIVE_VARIABLE ||
341                acrtc->dm_irq_params.freesync_config.state ==
342                        VRR_STATE_ACTIVE_FIXED;
343 }
344
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346 {
347         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349 }
350
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352                                               struct dm_crtc_state *new_state)
353 {
354         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
355                 return true;
356         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357                 return true;
358         else
359                 return false;
360 }
361
362 /**
363  * dm_pflip_high_irq() - Handle pageflip interrupt
364  * @interrupt_params: ignored
365  *
366  * Handles the pageflip interrupt by notifying all interested parties
367  * that the pageflip has been completed.
368  */
369 static void dm_pflip_high_irq(void *interrupt_params)
370 {
371         struct amdgpu_crtc *amdgpu_crtc;
372         struct common_irq_params *irq_params = interrupt_params;
373         struct amdgpu_device *adev = irq_params->adev;
374         unsigned long flags;
375         struct drm_pending_vblank_event *e;
376         uint32_t vpos, hpos, v_blank_start, v_blank_end;
377         bool vrr_active;
378
379         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380
381         /* IRQ could occur when in initial stage */
382         /* TODO work and BO cleanup */
383         if (amdgpu_crtc == NULL) {
384                 DC_LOG_PFLIP("CRTC is null, returning.\n");
385                 return;
386         }
387
388         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
389
390         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392                                                  amdgpu_crtc->pflip_status,
393                                                  AMDGPU_FLIP_SUBMITTED,
394                                                  amdgpu_crtc->crtc_id,
395                                                  amdgpu_crtc);
396                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397                 return;
398         }
399
400         /* page flip completed. */
401         e = amdgpu_crtc->event;
402         amdgpu_crtc->event = NULL;
403
404         WARN_ON(!e);
405
406         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
407
408         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
409         if (!vrr_active ||
410             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411                                       &v_blank_end, &hpos, &vpos) ||
412             (vpos < v_blank_start)) {
413                 /* Update to correct count and vblank timestamp if racing with
414                  * vblank irq. This also updates to the correct vblank timestamp
415                  * even in VRR mode, as scanout is past the front-porch atm.
416                  */
417                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
418
419                 /* Wake up userspace by sending the pageflip event with proper
420                  * count and timestamp of vblank of flip completion.
421                  */
422                 if (e) {
423                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424
425                         /* Event sent, so done with vblank for this flip */
426                         drm_crtc_vblank_put(&amdgpu_crtc->base);
427                 }
428         } else if (e) {
429                 /* VRR active and inside front-porch: vblank count and
430                  * timestamp for pageflip event will only be up to date after
431                  * drm_crtc_handle_vblank() has been executed from late vblank
432                  * irq handler after start of back-porch (vline 0). We queue the
433                  * pageflip event for send-out by drm_crtc_handle_vblank() with
434                  * updated timestamp and count, once it runs after us.
435                  *
436                  * We need to open-code this instead of using the helper
437                  * drm_crtc_arm_vblank_event(), as that helper would
438                  * call drm_crtc_accurate_vblank_count(), which we must
439                  * not call in VRR mode while we are in front-porch!
440                  */
441
442                 /* sequence will be replaced by real count during send-out. */
443                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444                 e->pipe = amdgpu_crtc->crtc_id;
445
446                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447                 e = NULL;
448         }
449
450         /* Keep track of vblank of this flip for flip throttling. We use the
451          * cooked hw counter, as that one incremented at start of this vblank
452          * of pageflip completion, so last_flip_vblank is the forbidden count
453          * for queueing new pageflips if vsync + VRR is enabled.
454          */
455         amdgpu_crtc->dm_irq_params.last_flip_vblank =
456                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
457
458         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
460
461         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462                      amdgpu_crtc->crtc_id, amdgpu_crtc,
463                      vrr_active, (int) !e);
464 }
465
466 static void dm_vupdate_high_irq(void *interrupt_params)
467 {
468         struct common_irq_params *irq_params = interrupt_params;
469         struct amdgpu_device *adev = irq_params->adev;
470         struct amdgpu_crtc *acrtc;
471         struct drm_device *drm_dev;
472         struct drm_vblank_crtc *vblank;
473         ktime_t frame_duration_ns, previous_timestamp;
474         unsigned long flags;
475         int vrr_active;
476
477         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
478
479         if (acrtc) {
480                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
481                 drm_dev = acrtc->base.dev;
482                 vblank = &drm_dev->vblank[acrtc->base.index];
483                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
484                 frame_duration_ns = vblank->time - previous_timestamp;
485
486                 if (frame_duration_ns > 0) {
487                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
488                                                 frame_duration_ns,
489                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
490                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
491                 }
492
493                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
494                               acrtc->crtc_id,
495                               vrr_active);
496
497                 /* Core vblank handling is done here after end of front-porch in
498                  * vrr mode, as vblank timestamping will give valid results
499                  * while now done after front-porch. This will also deliver
500                  * page-flip completion events that have been queued to us
501                  * if a pageflip happened inside front-porch.
502                  */
503                 if (vrr_active) {
504                         drm_crtc_handle_vblank(&acrtc->base);
505
506                         /* BTR processing for pre-DCE12 ASICs */
507                         if (acrtc->dm_irq_params.stream &&
508                             adev->family < AMDGPU_FAMILY_AI) {
509                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
510                                 mod_freesync_handle_v_update(
511                                     adev->dm.freesync_module,
512                                     acrtc->dm_irq_params.stream,
513                                     &acrtc->dm_irq_params.vrr_params);
514
515                                 dc_stream_adjust_vmin_vmax(
516                                     adev->dm.dc,
517                                     acrtc->dm_irq_params.stream,
518                                     &acrtc->dm_irq_params.vrr_params.adjust);
519                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520                         }
521                 }
522         }
523 }
524
525 /**
526  * dm_crtc_high_irq() - Handles CRTC interrupt
527  * @interrupt_params: used for determining the CRTC instance
528  *
529  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
530  * event handler.
531  */
532 static void dm_crtc_high_irq(void *interrupt_params)
533 {
534         struct common_irq_params *irq_params = interrupt_params;
535         struct amdgpu_device *adev = irq_params->adev;
536         struct amdgpu_crtc *acrtc;
537         unsigned long flags;
538         int vrr_active;
539
540         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
541         if (!acrtc)
542                 return;
543
544         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
545
546         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
547                       vrr_active, acrtc->dm_irq_params.active_planes);
548
549         /**
550          * Core vblank handling at start of front-porch is only possible
551          * in non-vrr mode, as only there vblank timestamping will give
552          * valid results while done in front-porch. Otherwise defer it
553          * to dm_vupdate_high_irq after end of front-porch.
554          */
555         if (!vrr_active)
556                 drm_crtc_handle_vblank(&acrtc->base);
557
558         /**
559          * Following stuff must happen at start of vblank, for crc
560          * computation and below-the-range btr support in vrr mode.
561          */
562         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
563
564         /* BTR updates need to happen before VUPDATE on Vega and above. */
565         if (adev->family < AMDGPU_FAMILY_AI)
566                 return;
567
568         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
569
570         if (acrtc->dm_irq_params.stream &&
571             acrtc->dm_irq_params.vrr_params.supported &&
572             acrtc->dm_irq_params.freesync_config.state ==
573                     VRR_STATE_ACTIVE_VARIABLE) {
574                 mod_freesync_handle_v_update(adev->dm.freesync_module,
575                                              acrtc->dm_irq_params.stream,
576                                              &acrtc->dm_irq_params.vrr_params);
577
578                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
579                                            &acrtc->dm_irq_params.vrr_params.adjust);
580         }
581
582         /*
583          * If there aren't any active_planes then DCH HUBP may be clock-gated.
584          * In that case, pageflip completion interrupts won't fire and pageflip
585          * completion events won't get delivered. Prevent this by sending
586          * pending pageflip events from here if a flip is still pending.
587          *
588          * If any planes are enabled, use dm_pflip_high_irq() instead, to
589          * avoid race conditions between flip programming and completion,
590          * which could cause too early flip completion events.
591          */
592         if (adev->family >= AMDGPU_FAMILY_RV &&
593             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
594             acrtc->dm_irq_params.active_planes == 0) {
595                 if (acrtc->event) {
596                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
597                         acrtc->event = NULL;
598                         drm_crtc_vblank_put(&acrtc->base);
599                 }
600                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
601         }
602
603         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
604 }
605
606 #if defined(CONFIG_DRM_AMD_DC_DCN)
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
608 /**
609  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610  * DCN generation ASICs
611  * @interrupt_params: interrupt parameters
612  *
613  * Used to set crc window/read out crc value at vertical line 0 position
614  */
615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
616 {
617         struct common_irq_params *irq_params = interrupt_params;
618         struct amdgpu_device *adev = irq_params->adev;
619         struct amdgpu_crtc *acrtc;
620
621         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622
623         if (!acrtc)
624                 return;
625
626         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 }
628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
629
630 /**
631  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
632  * @adev: amdgpu_device pointer
633  * @notify: dmub notification structure
634  *
635  * Dmub AUX or SET_CONFIG command completion processing callback
636  * Copies dmub notification to DM which is to be read by AUX command.
637  * issuing thread and also signals the event to wake up the thread.
638  */
639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640                                         struct dmub_notification *notify)
641 {
642         if (adev->dm.dmub_notify)
643                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645                 complete(&adev->dm.dmub_aux_transfer_done);
646 }
647
648 /**
649  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650  * @adev: amdgpu_device pointer
651  * @notify: dmub notification structure
652  *
653  * Dmub Hpd interrupt processing callback. Gets displayindex through the
654  * ink index and calls helper to do the processing.
655  */
656 static void dmub_hpd_callback(struct amdgpu_device *adev,
657                               struct dmub_notification *notify)
658 {
659         struct amdgpu_dm_connector *aconnector;
660         struct amdgpu_dm_connector *hpd_aconnector = NULL;
661         struct drm_connector *connector;
662         struct drm_connector_list_iter iter;
663         struct dc_link *link;
664         uint8_t link_index = 0;
665         struct drm_device *dev;
666
667         if (adev == NULL)
668                 return;
669
670         if (notify == NULL) {
671                 DRM_ERROR("DMUB HPD callback notification was NULL");
672                 return;
673         }
674
675         if (notify->link_index > adev->dm.dc->link_count) {
676                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
677                 return;
678         }
679
680         link_index = notify->link_index;
681         link = adev->dm.dc->links[link_index];
682         dev = adev->dm.ddev;
683
684         drm_connector_list_iter_begin(dev, &iter);
685         drm_for_each_connector_iter(connector, &iter) {
686                 aconnector = to_amdgpu_dm_connector(connector);
687                 if (link && aconnector->dc_link == link) {
688                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
689                         hpd_aconnector = aconnector;
690                         break;
691                 }
692         }
693         drm_connector_list_iter_end(&iter);
694
695         if (hpd_aconnector) {
696                 if (notify->type == DMUB_NOTIFICATION_HPD)
697                         handle_hpd_irq_helper(hpd_aconnector);
698                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699                         handle_hpd_rx_irq(hpd_aconnector);
700         }
701 }
702
703 /**
704  * register_dmub_notify_callback - Sets callback for DMUB notify
705  * @adev: amdgpu_device pointer
706  * @type: Type of dmub notification
707  * @callback: Dmub interrupt callback function
708  * @dmub_int_thread_offload: offload indicator
709  *
710  * API to register a dmub callback handler for a dmub notification
711  * Also sets indicator whether callback processing to be offloaded.
712  * to dmub interrupt handling thread
713  * Return: true if successfully registered, false if there is existing registration
714  */
715 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716                                           enum dmub_notification_type type,
717                                           dmub_notify_interrupt_callback_t callback,
718                                           bool dmub_int_thread_offload)
719 {
720         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721                 adev->dm.dmub_callback[type] = callback;
722                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
723         } else
724                 return false;
725
726         return true;
727 }
728
729 static void dm_handle_hpd_work(struct work_struct *work)
730 {
731         struct dmub_hpd_work *dmub_hpd_wrk;
732
733         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
734
735         if (!dmub_hpd_wrk->dmub_notify) {
736                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
737                 return;
738         }
739
740         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742                 dmub_hpd_wrk->dmub_notify);
743         }
744
745         kfree(dmub_hpd_wrk->dmub_notify);
746         kfree(dmub_hpd_wrk);
747
748 }
749
750 #define DMUB_TRACE_MAX_READ 64
751 /**
752  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753  * @interrupt_params: used for determining the Outbox instance
754  *
755  * Handles the Outbox Interrupt
756  * event handler.
757  */
758 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
759 {
760         struct dmub_notification notify;
761         struct common_irq_params *irq_params = interrupt_params;
762         struct amdgpu_device *adev = irq_params->adev;
763         struct amdgpu_display_manager *dm = &adev->dm;
764         struct dmcub_trace_buf_entry entry = { 0 };
765         uint32_t count = 0;
766         struct dmub_hpd_work *dmub_hpd_wrk;
767         struct dc_link *plink = NULL;
768
769         if (dc_enable_dmub_notifications(adev->dm.dc) &&
770                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
771
772                 do {
773                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
774                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
775                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
776                                 continue;
777                         }
778                         if (!dm->dmub_callback[notify.type]) {
779                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
780                                 continue;
781                         }
782                         if (dm->dmub_thread_offload[notify.type] == true) {
783                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
784                                 if (!dmub_hpd_wrk) {
785                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
786                                         return;
787                                 }
788                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789                                 if (!dmub_hpd_wrk->dmub_notify) {
790                                         kfree(dmub_hpd_wrk);
791                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
792                                         return;
793                                 }
794                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795                                 if (dmub_hpd_wrk->dmub_notify)
796                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
797                                 dmub_hpd_wrk->adev = adev;
798                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
799                                         plink = adev->dm.dc->links[notify.link_index];
800                                         if (plink) {
801                                                 plink->hpd_status =
802                                                         notify.hpd_status == DP_HPD_PLUG;
803                                         }
804                                 }
805                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
806                         } else {
807                                 dm->dmub_callback[notify.type](adev, &notify);
808                         }
809                 } while (notify.pending_notification);
810         }
811
812
813         do {
814                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816                                                         entry.param0, entry.param1);
817
818                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
820                 } else
821                         break;
822
823                 count++;
824
825         } while (count <= DMUB_TRACE_MAX_READ);
826
827         if (count > DMUB_TRACE_MAX_READ)
828                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
829 }
830 #endif /* CONFIG_DRM_AMD_DC_DCN */
831
832 static int dm_set_clockgating_state(void *handle,
833                   enum amd_clockgating_state state)
834 {
835         return 0;
836 }
837
838 static int dm_set_powergating_state(void *handle,
839                   enum amd_powergating_state state)
840 {
841         return 0;
842 }
843
844 /* Prototypes of private functions */
845 static int dm_early_init(void* handle);
846
847 /* Allocate memory for FBC compressed data  */
848 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
849 {
850         struct drm_device *dev = connector->dev;
851         struct amdgpu_device *adev = drm_to_adev(dev);
852         struct dm_compressor_info *compressor = &adev->dm.compressor;
853         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
854         struct drm_display_mode *mode;
855         unsigned long max_size = 0;
856
857         if (adev->dm.dc->fbc_compressor == NULL)
858                 return;
859
860         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
861                 return;
862
863         if (compressor->bo_ptr)
864                 return;
865
866
867         list_for_each_entry(mode, &connector->modes, head) {
868                 if (max_size < mode->htotal * mode->vtotal)
869                         max_size = mode->htotal * mode->vtotal;
870         }
871
872         if (max_size) {
873                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
874                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
875                             &compressor->gpu_addr, &compressor->cpu_addr);
876
877                 if (r)
878                         DRM_ERROR("DM: Failed to initialize FBC\n");
879                 else {
880                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
881                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
882                 }
883
884         }
885
886 }
887
888 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
889                                           int pipe, bool *enabled,
890                                           unsigned char *buf, int max_bytes)
891 {
892         struct drm_device *dev = dev_get_drvdata(kdev);
893         struct amdgpu_device *adev = drm_to_adev(dev);
894         struct drm_connector *connector;
895         struct drm_connector_list_iter conn_iter;
896         struct amdgpu_dm_connector *aconnector;
897         int ret = 0;
898
899         *enabled = false;
900
901         mutex_lock(&adev->dm.audio_lock);
902
903         drm_connector_list_iter_begin(dev, &conn_iter);
904         drm_for_each_connector_iter(connector, &conn_iter) {
905                 aconnector = to_amdgpu_dm_connector(connector);
906                 if (aconnector->audio_inst != port)
907                         continue;
908
909                 *enabled = true;
910                 ret = drm_eld_size(connector->eld);
911                 memcpy(buf, connector->eld, min(max_bytes, ret));
912
913                 break;
914         }
915         drm_connector_list_iter_end(&conn_iter);
916
917         mutex_unlock(&adev->dm.audio_lock);
918
919         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
920
921         return ret;
922 }
923
924 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
925         .get_eld = amdgpu_dm_audio_component_get_eld,
926 };
927
928 static int amdgpu_dm_audio_component_bind(struct device *kdev,
929                                        struct device *hda_kdev, void *data)
930 {
931         struct drm_device *dev = dev_get_drvdata(kdev);
932         struct amdgpu_device *adev = drm_to_adev(dev);
933         struct drm_audio_component *acomp = data;
934
935         acomp->ops = &amdgpu_dm_audio_component_ops;
936         acomp->dev = kdev;
937         adev->dm.audio_component = acomp;
938
939         return 0;
940 }
941
942 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
943                                           struct device *hda_kdev, void *data)
944 {
945         struct drm_device *dev = dev_get_drvdata(kdev);
946         struct amdgpu_device *adev = drm_to_adev(dev);
947         struct drm_audio_component *acomp = data;
948
949         acomp->ops = NULL;
950         acomp->dev = NULL;
951         adev->dm.audio_component = NULL;
952 }
953
954 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
955         .bind   = amdgpu_dm_audio_component_bind,
956         .unbind = amdgpu_dm_audio_component_unbind,
957 };
958
959 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
960 {
961         int i, ret;
962
963         if (!amdgpu_audio)
964                 return 0;
965
966         adev->mode_info.audio.enabled = true;
967
968         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
969
970         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
971                 adev->mode_info.audio.pin[i].channels = -1;
972                 adev->mode_info.audio.pin[i].rate = -1;
973                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
974                 adev->mode_info.audio.pin[i].status_bits = 0;
975                 adev->mode_info.audio.pin[i].category_code = 0;
976                 adev->mode_info.audio.pin[i].connected = false;
977                 adev->mode_info.audio.pin[i].id =
978                         adev->dm.dc->res_pool->audios[i]->inst;
979                 adev->mode_info.audio.pin[i].offset = 0;
980         }
981
982         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
983         if (ret < 0)
984                 return ret;
985
986         adev->dm.audio_registered = true;
987
988         return 0;
989 }
990
991 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
992 {
993         if (!amdgpu_audio)
994                 return;
995
996         if (!adev->mode_info.audio.enabled)
997                 return;
998
999         if (adev->dm.audio_registered) {
1000                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001                 adev->dm.audio_registered = false;
1002         }
1003
1004         /* TODO: Disable audio? */
1005
1006         adev->mode_info.audio.enabled = false;
1007 }
1008
1009 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1010 {
1011         struct drm_audio_component *acomp = adev->dm.audio_component;
1012
1013         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1014                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1015
1016                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1017                                                  pin, -1);
1018         }
1019 }
1020
1021 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1022 {
1023         const struct dmcub_firmware_header_v1_0 *hdr;
1024         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1025         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1026         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1027         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1028         struct abm *abm = adev->dm.dc->res_pool->abm;
1029         struct dmub_srv_hw_params hw_params;
1030         enum dmub_status status;
1031         const unsigned char *fw_inst_const, *fw_bss_data;
1032         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1033         bool has_hw_support;
1034
1035         if (!dmub_srv)
1036                 /* DMUB isn't supported on the ASIC. */
1037                 return 0;
1038
1039         if (!fb_info) {
1040                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1041                 return -EINVAL;
1042         }
1043
1044         if (!dmub_fw) {
1045                 /* Firmware required for DMUB support. */
1046                 DRM_ERROR("No firmware provided for DMUB.\n");
1047                 return -EINVAL;
1048         }
1049
1050         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1051         if (status != DMUB_STATUS_OK) {
1052                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1053                 return -EINVAL;
1054         }
1055
1056         if (!has_hw_support) {
1057                 DRM_INFO("DMUB unsupported on ASIC\n");
1058                 return 0;
1059         }
1060
1061         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1062         status = dmub_srv_hw_reset(dmub_srv);
1063         if (status != DMUB_STATUS_OK)
1064                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1065
1066         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1067
1068         fw_inst_const = dmub_fw->data +
1069                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070                         PSP_HEADER_BYTES;
1071
1072         fw_bss_data = dmub_fw->data +
1073                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1074                       le32_to_cpu(hdr->inst_const_bytes);
1075
1076         /* Copy firmware and bios info into FB memory. */
1077         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1078                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1079
1080         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1081
1082         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1083          * amdgpu_ucode_init_single_fw will load dmub firmware
1084          * fw_inst_const part to cw0; otherwise, the firmware back door load
1085          * will be done by dm_dmub_hw_init
1086          */
1087         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1088                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1089                                 fw_inst_const_size);
1090         }
1091
1092         if (fw_bss_data_size)
1093                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1094                        fw_bss_data, fw_bss_data_size);
1095
1096         /* Copy firmware bios info into FB memory. */
1097         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1098                adev->bios_size);
1099
1100         /* Reset regions that need to be reset. */
1101         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1102         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1103
1104         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1105                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1106
1107         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1108                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1109
1110         /* Initialize hardware. */
1111         memset(&hw_params, 0, sizeof(hw_params));
1112         hw_params.fb_base = adev->gmc.fb_start;
1113         hw_params.fb_offset = adev->gmc.aper_base;
1114
1115         /* backdoor load firmware and trigger dmub running */
1116         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1117                 hw_params.load_inst_const = true;
1118
1119         if (dmcu)
1120                 hw_params.psp_version = dmcu->psp_version;
1121
1122         for (i = 0; i < fb_info->num_fb; ++i)
1123                 hw_params.fb[i] = &fb_info->fb[i];
1124
1125         switch (adev->ip_versions[DCE_HWIP][0]) {
1126         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1127                 hw_params.dpia_supported = true;
1128 #if defined(CONFIG_DRM_AMD_DC_DCN)
1129                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1130 #endif
1131                 break;
1132         default:
1133                 break;
1134         }
1135
1136         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1137         if (status != DMUB_STATUS_OK) {
1138                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1139                 return -EINVAL;
1140         }
1141
1142         /* Wait for firmware load to finish. */
1143         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1144         if (status != DMUB_STATUS_OK)
1145                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1146
1147         /* Init DMCU and ABM if available. */
1148         if (dmcu && abm) {
1149                 dmcu->funcs->dmcu_init(dmcu);
1150                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1151         }
1152
1153         if (!adev->dm.dc->ctx->dmub_srv)
1154                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1155         if (!adev->dm.dc->ctx->dmub_srv) {
1156                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1157                 return -ENOMEM;
1158         }
1159
1160         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1161                  adev->dm.dmcub_fw_version);
1162
1163         return 0;
1164 }
1165
1166 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1167 {
1168         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1169         enum dmub_status status;
1170         bool init;
1171
1172         if (!dmub_srv) {
1173                 /* DMUB isn't supported on the ASIC. */
1174                 return;
1175         }
1176
1177         status = dmub_srv_is_hw_init(dmub_srv, &init);
1178         if (status != DMUB_STATUS_OK)
1179                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1180
1181         if (status == DMUB_STATUS_OK && init) {
1182                 /* Wait for firmware load to finish. */
1183                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1184                 if (status != DMUB_STATUS_OK)
1185                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1186         } else {
1187                 /* Perform the full hardware initialization. */
1188                 dm_dmub_hw_init(adev);
1189         }
1190 }
1191
1192 #if defined(CONFIG_DRM_AMD_DC_DCN)
1193 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1194 {
1195         uint64_t pt_base;
1196         uint32_t logical_addr_low;
1197         uint32_t logical_addr_high;
1198         uint32_t agp_base, agp_bot, agp_top;
1199         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1200
1201         memset(pa_config, 0, sizeof(*pa_config));
1202
1203         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1204         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1205
1206         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1207                 /*
1208                  * Raven2 has a HW issue that it is unable to use the vram which
1209                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1210                  * workaround that increase system aperture high address (add 1)
1211                  * to get rid of the VM fault and hardware hang.
1212                  */
1213                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1214         else
1215                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1216
1217         agp_base = 0;
1218         agp_bot = adev->gmc.agp_start >> 24;
1219         agp_top = adev->gmc.agp_end >> 24;
1220
1221
1222         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1223         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1224         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1225         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1226         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1227         page_table_base.low_part = lower_32_bits(pt_base);
1228
1229         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1230         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1231
1232         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1233         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1234         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1235
1236         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1237         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1238         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1239
1240         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1241         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1242         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1243
1244         pa_config->is_hvm_enabled = 0;
1245
1246 }
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_DC_DCN)
1249 static void vblank_control_worker(struct work_struct *work)
1250 {
1251         struct vblank_control_work *vblank_work =
1252                 container_of(work, struct vblank_control_work, work);
1253         struct amdgpu_display_manager *dm = vblank_work->dm;
1254
1255         mutex_lock(&dm->dc_lock);
1256
1257         if (vblank_work->enable)
1258                 dm->active_vblank_irq_count++;
1259         else if(dm->active_vblank_irq_count)
1260                 dm->active_vblank_irq_count--;
1261
1262         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1263
1264         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1265
1266         /* Control PSR based on vblank requirements from OS */
1267         if (vblank_work->stream && vblank_work->stream->link) {
1268                 if (vblank_work->enable) {
1269                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1270                                 amdgpu_dm_psr_disable(vblank_work->stream);
1271                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1272                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1273                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1274                         amdgpu_dm_psr_enable(vblank_work->stream);
1275                 }
1276         }
1277
1278         mutex_unlock(&dm->dc_lock);
1279
1280         dc_stream_release(vblank_work->stream);
1281
1282         kfree(vblank_work);
1283 }
1284
1285 #endif
1286
1287 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288 {
1289         struct hpd_rx_irq_offload_work *offload_work;
1290         struct amdgpu_dm_connector *aconnector;
1291         struct dc_link *dc_link;
1292         struct amdgpu_device *adev;
1293         enum dc_connection_type new_connection_type = dc_connection_none;
1294         unsigned long flags;
1295
1296         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297         aconnector = offload_work->offload_wq->aconnector;
1298
1299         if (!aconnector) {
1300                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1301                 goto skip;
1302         }
1303
1304         adev = drm_to_adev(aconnector->base.dev);
1305         dc_link = aconnector->dc_link;
1306
1307         mutex_lock(&aconnector->hpd_lock);
1308         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309                 DRM_ERROR("KMS: Failed to detect connector\n");
1310         mutex_unlock(&aconnector->hpd_lock);
1311
1312         if (new_connection_type == dc_connection_none)
1313                 goto skip;
1314
1315         if (amdgpu_in_reset(adev))
1316                 goto skip;
1317
1318         mutex_lock(&adev->dm.dc_lock);
1319         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320                 dc_link_dp_handle_automated_test(dc_link);
1321         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324                 dc_link_dp_handle_link_loss(dc_link);
1325                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326                 offload_work->offload_wq->is_handling_link_loss = false;
1327                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328         }
1329         mutex_unlock(&adev->dm.dc_lock);
1330
1331 skip:
1332         kfree(offload_work);
1333
1334 }
1335
1336 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337 {
1338         int max_caps = dc->caps.max_links;
1339         int i = 0;
1340         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341
1342         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343
1344         if (!hpd_rx_offload_wq)
1345                 return NULL;
1346
1347
1348         for (i = 0; i < max_caps; i++) {
1349                 hpd_rx_offload_wq[i].wq =
1350                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351
1352                 if (hpd_rx_offload_wq[i].wq == NULL) {
1353                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1354                         return NULL;
1355                 }
1356
1357                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1358         }
1359
1360         return hpd_rx_offload_wq;
1361 }
1362
1363 struct amdgpu_stutter_quirk {
1364         u16 chip_vendor;
1365         u16 chip_device;
1366         u16 subsys_vendor;
1367         u16 subsys_device;
1368         u8 revision;
1369 };
1370
1371 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1374         { 0, 0, 0, 0, 0 },
1375 };
1376
1377 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378 {
1379         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380
1381         while (p && p->chip_device != 0) {
1382                 if (pdev->vendor == p->chip_vendor &&
1383                     pdev->device == p->chip_device &&
1384                     pdev->subsystem_vendor == p->subsys_vendor &&
1385                     pdev->subsystem_device == p->subsys_device &&
1386                     pdev->revision == p->revision) {
1387                         return true;
1388                 }
1389                 ++p;
1390         }
1391         return false;
1392 }
1393
1394 static int amdgpu_dm_init(struct amdgpu_device *adev)
1395 {
1396         struct dc_init_data init_data;
1397 #ifdef CONFIG_DRM_AMD_DC_HDCP
1398         struct dc_callback_init init_params;
1399 #endif
1400         int r;
1401
1402         adev->dm.ddev = adev_to_drm(adev);
1403         adev->dm.adev = adev;
1404
1405         /* Zero all the fields */
1406         memset(&init_data, 0, sizeof(init_data));
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408         memset(&init_params, 0, sizeof(init_params));
1409 #endif
1410
1411         mutex_init(&adev->dm.dc_lock);
1412         mutex_init(&adev->dm.audio_lock);
1413 #if defined(CONFIG_DRM_AMD_DC_DCN)
1414         spin_lock_init(&adev->dm.vblank_lock);
1415 #endif
1416
1417         if(amdgpu_dm_irq_init(adev)) {
1418                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1419                 goto error;
1420         }
1421
1422         init_data.asic_id.chip_family = adev->family;
1423
1424         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1425         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1426         init_data.asic_id.chip_id = adev->pdev->device;
1427
1428         init_data.asic_id.vram_width = adev->gmc.vram_width;
1429         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1430         init_data.asic_id.atombios_base_address =
1431                 adev->mode_info.atom_context->bios;
1432
1433         init_data.driver = adev;
1434
1435         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1436
1437         if (!adev->dm.cgs_device) {
1438                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1439                 goto error;
1440         }
1441
1442         init_data.cgs_device = adev->dm.cgs_device;
1443
1444         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1445
1446         switch (adev->ip_versions[DCE_HWIP][0]) {
1447         case IP_VERSION(2, 1, 0):
1448                 switch (adev->dm.dmcub_fw_version) {
1449                 case 0: /* development */
1450                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1451                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1452                         init_data.flags.disable_dmcu = false;
1453                         break;
1454                 default:
1455                         init_data.flags.disable_dmcu = true;
1456                 }
1457                 break;
1458         case IP_VERSION(2, 0, 3):
1459                 init_data.flags.disable_dmcu = true;
1460                 break;
1461         default:
1462                 break;
1463         }
1464
1465         switch (adev->asic_type) {
1466         case CHIP_CARRIZO:
1467         case CHIP_STONEY:
1468                 init_data.flags.gpu_vm_support = true;
1469                 break;
1470         default:
1471                 switch (adev->ip_versions[DCE_HWIP][0]) {
1472                 case IP_VERSION(1, 0, 0):
1473                 case IP_VERSION(1, 0, 1):
1474                         /* enable S/G on PCO and RV2 */
1475                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1476                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1477                                 init_data.flags.gpu_vm_support = true;
1478                         break;
1479                 case IP_VERSION(2, 1, 0):
1480                 case IP_VERSION(3, 0, 1):
1481                 case IP_VERSION(3, 1, 2):
1482                 case IP_VERSION(3, 1, 3):
1483                 case IP_VERSION(3, 1, 5):
1484                         init_data.flags.gpu_vm_support = true;
1485                         break;
1486                 default:
1487                         break;
1488                 }
1489                 break;
1490         }
1491
1492         if (init_data.flags.gpu_vm_support)
1493                 adev->mode_info.gpu_vm_support = true;
1494
1495         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1496                 init_data.flags.fbc_support = true;
1497
1498         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1499                 init_data.flags.multi_mon_pp_mclk_switch = true;
1500
1501         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1502                 init_data.flags.disable_fractional_pwm = true;
1503
1504         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1505                 init_data.flags.edp_no_power_sequencing = true;
1506
1507 #ifdef CONFIG_DRM_AMD_DC_DCN
1508         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1509                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1510         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1511                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1512 #endif
1513
1514         init_data.flags.seamless_boot_edp_requested = false;
1515
1516         if (check_seamless_boot_capability(adev)) {
1517                 init_data.flags.seamless_boot_edp_requested = true;
1518                 init_data.flags.allow_seamless_boot_optimization = true;
1519                 DRM_INFO("Seamless boot condition check passed\n");
1520         }
1521
1522         INIT_LIST_HEAD(&adev->dm.da_list);
1523         /* Display Core create. */
1524         adev->dm.dc = dc_create(&init_data);
1525
1526         if (adev->dm.dc) {
1527                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1528         } else {
1529                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1530                 goto error;
1531         }
1532
1533         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1534                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1535                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1536         }
1537
1538         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1539                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1540         if (dm_should_disable_stutter(adev->pdev))
1541                 adev->dm.dc->debug.disable_stutter = true;
1542
1543         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1544                 adev->dm.dc->debug.disable_stutter = true;
1545
1546         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1547                 adev->dm.dc->debug.disable_dsc = true;
1548                 adev->dm.dc->debug.disable_dsc_edp = true;
1549         }
1550
1551         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1552                 adev->dm.dc->debug.disable_clock_gate = true;
1553
1554         r = dm_dmub_hw_init(adev);
1555         if (r) {
1556                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1557                 goto error;
1558         }
1559
1560         dc_hardware_init(adev->dm.dc);
1561
1562         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1563         if (!adev->dm.hpd_rx_offload_wq) {
1564                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1565                 goto error;
1566         }
1567
1568 #if defined(CONFIG_DRM_AMD_DC_DCN)
1569         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1570                 struct dc_phy_addr_space_config pa_config;
1571
1572                 mmhub_read_system_context(adev, &pa_config);
1573
1574                 // Call the DC init_memory func
1575                 dc_setup_system_context(adev->dm.dc, &pa_config);
1576         }
1577 #endif
1578
1579         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1580         if (!adev->dm.freesync_module) {
1581                 DRM_ERROR(
1582                 "amdgpu: failed to initialize freesync_module.\n");
1583         } else
1584                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1585                                 adev->dm.freesync_module);
1586
1587         amdgpu_dm_init_color_mod();
1588
1589 #if defined(CONFIG_DRM_AMD_DC_DCN)
1590         if (adev->dm.dc->caps.max_links > 0) {
1591                 adev->dm.vblank_control_workqueue =
1592                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1593                 if (!adev->dm.vblank_control_workqueue)
1594                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1595         }
1596 #endif
1597
1598 #ifdef CONFIG_DRM_AMD_DC_HDCP
1599         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1600                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1601
1602                 if (!adev->dm.hdcp_workqueue)
1603                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1604                 else
1605                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1606
1607                 dc_init_callbacks(adev->dm.dc, &init_params);
1608         }
1609 #endif
1610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1611         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1612 #endif
1613         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1614                 init_completion(&adev->dm.dmub_aux_transfer_done);
1615                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1616                 if (!adev->dm.dmub_notify) {
1617                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1618                         goto error;
1619                 }
1620
1621                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1622                 if (!adev->dm.delayed_hpd_wq) {
1623                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1624                         goto error;
1625                 }
1626
1627                 amdgpu_dm_outbox_init(adev);
1628 #if defined(CONFIG_DRM_AMD_DC_DCN)
1629                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1630                         dmub_aux_setconfig_callback, false)) {
1631                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1632                         goto error;
1633                 }
1634                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1635                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1636                         goto error;
1637                 }
1638                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1639                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1640                         goto error;
1641                 }
1642 #endif /* CONFIG_DRM_AMD_DC_DCN */
1643         }
1644
1645         if (amdgpu_dm_initialize_drm_device(adev)) {
1646                 DRM_ERROR(
1647                 "amdgpu: failed to initialize sw for display support.\n");
1648                 goto error;
1649         }
1650
1651         /* create fake encoders for MST */
1652         dm_dp_create_fake_mst_encoders(adev);
1653
1654         /* TODO: Add_display_info? */
1655
1656         /* TODO use dynamic cursor width */
1657         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1658         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1659
1660         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1661                 DRM_ERROR(
1662                 "amdgpu: failed to initialize sw for display support.\n");
1663                 goto error;
1664         }
1665
1666
1667         DRM_DEBUG_DRIVER("KMS initialized.\n");
1668
1669         return 0;
1670 error:
1671         amdgpu_dm_fini(adev);
1672
1673         return -EINVAL;
1674 }
1675
1676 static int amdgpu_dm_early_fini(void *handle)
1677 {
1678         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1679
1680         amdgpu_dm_audio_fini(adev);
1681
1682         return 0;
1683 }
1684
1685 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1686 {
1687         int i;
1688
1689 #if defined(CONFIG_DRM_AMD_DC_DCN)
1690         if (adev->dm.vblank_control_workqueue) {
1691                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1692                 adev->dm.vblank_control_workqueue = NULL;
1693         }
1694 #endif
1695
1696         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1697                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1698         }
1699
1700         amdgpu_dm_destroy_drm_device(&adev->dm);
1701
1702 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1703         if (adev->dm.crc_rd_wrk) {
1704                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1705                 kfree(adev->dm.crc_rd_wrk);
1706                 adev->dm.crc_rd_wrk = NULL;
1707         }
1708 #endif
1709 #ifdef CONFIG_DRM_AMD_DC_HDCP
1710         if (adev->dm.hdcp_workqueue) {
1711                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1712                 adev->dm.hdcp_workqueue = NULL;
1713         }
1714
1715         if (adev->dm.dc)
1716                 dc_deinit_callbacks(adev->dm.dc);
1717 #endif
1718
1719         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1720
1721         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1722                 kfree(adev->dm.dmub_notify);
1723                 adev->dm.dmub_notify = NULL;
1724                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1725                 adev->dm.delayed_hpd_wq = NULL;
1726         }
1727
1728         if (adev->dm.dmub_bo)
1729                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1730                                       &adev->dm.dmub_bo_gpu_addr,
1731                                       &adev->dm.dmub_bo_cpu_addr);
1732
1733         if (adev->dm.hpd_rx_offload_wq) {
1734                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1735                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1736                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1737                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1738                         }
1739                 }
1740
1741                 kfree(adev->dm.hpd_rx_offload_wq);
1742                 adev->dm.hpd_rx_offload_wq = NULL;
1743         }
1744
1745         /* DC Destroy TODO: Replace destroy DAL */
1746         if (adev->dm.dc)
1747                 dc_destroy(&adev->dm.dc);
1748         /*
1749          * TODO: pageflip, vlank interrupt
1750          *
1751          * amdgpu_dm_irq_fini(adev);
1752          */
1753
1754         if (adev->dm.cgs_device) {
1755                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1756                 adev->dm.cgs_device = NULL;
1757         }
1758         if (adev->dm.freesync_module) {
1759                 mod_freesync_destroy(adev->dm.freesync_module);
1760                 adev->dm.freesync_module = NULL;
1761         }
1762
1763         mutex_destroy(&adev->dm.audio_lock);
1764         mutex_destroy(&adev->dm.dc_lock);
1765
1766         return;
1767 }
1768
1769 static int load_dmcu_fw(struct amdgpu_device *adev)
1770 {
1771         const char *fw_name_dmcu = NULL;
1772         int r;
1773         const struct dmcu_firmware_header_v1_0 *hdr;
1774
1775         switch(adev->asic_type) {
1776 #if defined(CONFIG_DRM_AMD_DC_SI)
1777         case CHIP_TAHITI:
1778         case CHIP_PITCAIRN:
1779         case CHIP_VERDE:
1780         case CHIP_OLAND:
1781 #endif
1782         case CHIP_BONAIRE:
1783         case CHIP_HAWAII:
1784         case CHIP_KAVERI:
1785         case CHIP_KABINI:
1786         case CHIP_MULLINS:
1787         case CHIP_TONGA:
1788         case CHIP_FIJI:
1789         case CHIP_CARRIZO:
1790         case CHIP_STONEY:
1791         case CHIP_POLARIS11:
1792         case CHIP_POLARIS10:
1793         case CHIP_POLARIS12:
1794         case CHIP_VEGAM:
1795         case CHIP_VEGA10:
1796         case CHIP_VEGA12:
1797         case CHIP_VEGA20:
1798                 return 0;
1799         case CHIP_NAVI12:
1800                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1801                 break;
1802         case CHIP_RAVEN:
1803                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1804                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1805                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1806                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1807                 else
1808                         return 0;
1809                 break;
1810         default:
1811                 switch (adev->ip_versions[DCE_HWIP][0]) {
1812                 case IP_VERSION(2, 0, 2):
1813                 case IP_VERSION(2, 0, 3):
1814                 case IP_VERSION(2, 0, 0):
1815                 case IP_VERSION(2, 1, 0):
1816                 case IP_VERSION(3, 0, 0):
1817                 case IP_VERSION(3, 0, 2):
1818                 case IP_VERSION(3, 0, 3):
1819                 case IP_VERSION(3, 0, 1):
1820                 case IP_VERSION(3, 1, 2):
1821                 case IP_VERSION(3, 1, 3):
1822                 case IP_VERSION(3, 1, 5):
1823                 case IP_VERSION(3, 1, 6):
1824                         return 0;
1825                 default:
1826                         break;
1827                 }
1828                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1829                 return -EINVAL;
1830         }
1831
1832         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1833                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1834                 return 0;
1835         }
1836
1837         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1838         if (r == -ENOENT) {
1839                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1840                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1841                 adev->dm.fw_dmcu = NULL;
1842                 return 0;
1843         }
1844         if (r) {
1845                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1846                         fw_name_dmcu);
1847                 return r;
1848         }
1849
1850         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1851         if (r) {
1852                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1853                         fw_name_dmcu);
1854                 release_firmware(adev->dm.fw_dmcu);
1855                 adev->dm.fw_dmcu = NULL;
1856                 return r;
1857         }
1858
1859         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1860         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1861         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1862         adev->firmware.fw_size +=
1863                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1864
1865         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1866         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1867         adev->firmware.fw_size +=
1868                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1869
1870         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1871
1872         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1873
1874         return 0;
1875 }
1876
1877 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1878 {
1879         struct amdgpu_device *adev = ctx;
1880
1881         return dm_read_reg(adev->dm.dc->ctx, address);
1882 }
1883
1884 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1885                                      uint32_t value)
1886 {
1887         struct amdgpu_device *adev = ctx;
1888
1889         return dm_write_reg(adev->dm.dc->ctx, address, value);
1890 }
1891
1892 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1893 {
1894         struct dmub_srv_create_params create_params;
1895         struct dmub_srv_region_params region_params;
1896         struct dmub_srv_region_info region_info;
1897         struct dmub_srv_fb_params fb_params;
1898         struct dmub_srv_fb_info *fb_info;
1899         struct dmub_srv *dmub_srv;
1900         const struct dmcub_firmware_header_v1_0 *hdr;
1901         const char *fw_name_dmub;
1902         enum dmub_asic dmub_asic;
1903         enum dmub_status status;
1904         int r;
1905
1906         switch (adev->ip_versions[DCE_HWIP][0]) {
1907         case IP_VERSION(2, 1, 0):
1908                 dmub_asic = DMUB_ASIC_DCN21;
1909                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1910                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1911                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1912                 break;
1913         case IP_VERSION(3, 0, 0):
1914                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1915                         dmub_asic = DMUB_ASIC_DCN30;
1916                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1917                 } else {
1918                         dmub_asic = DMUB_ASIC_DCN30;
1919                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1920                 }
1921                 break;
1922         case IP_VERSION(3, 0, 1):
1923                 dmub_asic = DMUB_ASIC_DCN301;
1924                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1925                 break;
1926         case IP_VERSION(3, 0, 2):
1927                 dmub_asic = DMUB_ASIC_DCN302;
1928                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1929                 break;
1930         case IP_VERSION(3, 0, 3):
1931                 dmub_asic = DMUB_ASIC_DCN303;
1932                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1933                 break;
1934         case IP_VERSION(3, 1, 2):
1935         case IP_VERSION(3, 1, 3):
1936                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1937                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1938                 break;
1939         case IP_VERSION(3, 1, 5):
1940                 dmub_asic = DMUB_ASIC_DCN315;
1941                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1942                 break;
1943         case IP_VERSION(3, 1, 6):
1944                 dmub_asic = DMUB_ASIC_DCN316;
1945                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1946                 break;
1947         default:
1948                 /* ASIC doesn't support DMUB. */
1949                 return 0;
1950         }
1951
1952         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1953         if (r) {
1954                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1955                 return 0;
1956         }
1957
1958         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1959         if (r) {
1960                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1961                 return 0;
1962         }
1963
1964         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1965         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1966
1967         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1968                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1969                         AMDGPU_UCODE_ID_DMCUB;
1970                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1971                         adev->dm.dmub_fw;
1972                 adev->firmware.fw_size +=
1973                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1974
1975                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1976                          adev->dm.dmcub_fw_version);
1977         }
1978
1979
1980         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1981         dmub_srv = adev->dm.dmub_srv;
1982
1983         if (!dmub_srv) {
1984                 DRM_ERROR("Failed to allocate DMUB service!\n");
1985                 return -ENOMEM;
1986         }
1987
1988         memset(&create_params, 0, sizeof(create_params));
1989         create_params.user_ctx = adev;
1990         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1991         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1992         create_params.asic = dmub_asic;
1993
1994         /* Create the DMUB service. */
1995         status = dmub_srv_create(dmub_srv, &create_params);
1996         if (status != DMUB_STATUS_OK) {
1997                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1998                 return -EINVAL;
1999         }
2000
2001         /* Calculate the size of all the regions for the DMUB service. */
2002         memset(&region_params, 0, sizeof(region_params));
2003
2004         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2005                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2006         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2007         region_params.vbios_size = adev->bios_size;
2008         region_params.fw_bss_data = region_params.bss_data_size ?
2009                 adev->dm.dmub_fw->data +
2010                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2011                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2012         region_params.fw_inst_const =
2013                 adev->dm.dmub_fw->data +
2014                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2015                 PSP_HEADER_BYTES;
2016
2017         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2018                                            &region_info);
2019
2020         if (status != DMUB_STATUS_OK) {
2021                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2022                 return -EINVAL;
2023         }
2024
2025         /*
2026          * Allocate a framebuffer based on the total size of all the regions.
2027          * TODO: Move this into GART.
2028          */
2029         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2030                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2031                                     &adev->dm.dmub_bo_gpu_addr,
2032                                     &adev->dm.dmub_bo_cpu_addr);
2033         if (r)
2034                 return r;
2035
2036         /* Rebase the regions on the framebuffer address. */
2037         memset(&fb_params, 0, sizeof(fb_params));
2038         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2039         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2040         fb_params.region_info = &region_info;
2041
2042         adev->dm.dmub_fb_info =
2043                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2044         fb_info = adev->dm.dmub_fb_info;
2045
2046         if (!fb_info) {
2047                 DRM_ERROR(
2048                         "Failed to allocate framebuffer info for DMUB service!\n");
2049                 return -ENOMEM;
2050         }
2051
2052         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2053         if (status != DMUB_STATUS_OK) {
2054                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2055                 return -EINVAL;
2056         }
2057
2058         return 0;
2059 }
2060
2061 static int dm_sw_init(void *handle)
2062 {
2063         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2064         int r;
2065
2066         r = dm_dmub_sw_init(adev);
2067         if (r)
2068                 return r;
2069
2070         return load_dmcu_fw(adev);
2071 }
2072
2073 static int dm_sw_fini(void *handle)
2074 {
2075         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2076
2077         kfree(adev->dm.dmub_fb_info);
2078         adev->dm.dmub_fb_info = NULL;
2079
2080         if (adev->dm.dmub_srv) {
2081                 dmub_srv_destroy(adev->dm.dmub_srv);
2082                 adev->dm.dmub_srv = NULL;
2083         }
2084
2085         release_firmware(adev->dm.dmub_fw);
2086         adev->dm.dmub_fw = NULL;
2087
2088         release_firmware(adev->dm.fw_dmcu);
2089         adev->dm.fw_dmcu = NULL;
2090
2091         return 0;
2092 }
2093
2094 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2095 {
2096         struct amdgpu_dm_connector *aconnector;
2097         struct drm_connector *connector;
2098         struct drm_connector_list_iter iter;
2099         int ret = 0;
2100
2101         drm_connector_list_iter_begin(dev, &iter);
2102         drm_for_each_connector_iter(connector, &iter) {
2103                 aconnector = to_amdgpu_dm_connector(connector);
2104                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2105                     aconnector->mst_mgr.aux) {
2106                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2107                                          aconnector,
2108                                          aconnector->base.base.id);
2109
2110                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2111                         if (ret < 0) {
2112                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2113                                 aconnector->dc_link->type =
2114                                         dc_connection_single;
2115                                 break;
2116                         }
2117                 }
2118         }
2119         drm_connector_list_iter_end(&iter);
2120
2121         return ret;
2122 }
2123
2124 static int dm_late_init(void *handle)
2125 {
2126         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2127
2128         struct dmcu_iram_parameters params;
2129         unsigned int linear_lut[16];
2130         int i;
2131         struct dmcu *dmcu = NULL;
2132
2133         dmcu = adev->dm.dc->res_pool->dmcu;
2134
2135         for (i = 0; i < 16; i++)
2136                 linear_lut[i] = 0xFFFF * i / 15;
2137
2138         params.set = 0;
2139         params.backlight_ramping_override = false;
2140         params.backlight_ramping_start = 0xCCCC;
2141         params.backlight_ramping_reduction = 0xCCCCCCCC;
2142         params.backlight_lut_array_size = 16;
2143         params.backlight_lut_array = linear_lut;
2144
2145         /* Min backlight level after ABM reduction,  Don't allow below 1%
2146          * 0xFFFF x 0.01 = 0x28F
2147          */
2148         params.min_abm_backlight = 0x28F;
2149         /* In the case where abm is implemented on dmcub,
2150         * dmcu object will be null.
2151         * ABM 2.4 and up are implemented on dmcub.
2152         */
2153         if (dmcu) {
2154                 if (!dmcu_load_iram(dmcu, params))
2155                         return -EINVAL;
2156         } else if (adev->dm.dc->ctx->dmub_srv) {
2157                 struct dc_link *edp_links[MAX_NUM_EDP];
2158                 int edp_num;
2159
2160                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2161                 for (i = 0; i < edp_num; i++) {
2162                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2163                                 return -EINVAL;
2164                 }
2165         }
2166
2167         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2168 }
2169
2170 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2171 {
2172         struct amdgpu_dm_connector *aconnector;
2173         struct drm_connector *connector;
2174         struct drm_connector_list_iter iter;
2175         struct drm_dp_mst_topology_mgr *mgr;
2176         int ret;
2177         bool need_hotplug = false;
2178
2179         drm_connector_list_iter_begin(dev, &iter);
2180         drm_for_each_connector_iter(connector, &iter) {
2181                 aconnector = to_amdgpu_dm_connector(connector);
2182                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2183                     aconnector->mst_port)
2184                         continue;
2185
2186                 mgr = &aconnector->mst_mgr;
2187
2188                 if (suspend) {
2189                         drm_dp_mst_topology_mgr_suspend(mgr);
2190                 } else {
2191                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2192                         if (ret < 0) {
2193                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2194                                 need_hotplug = true;
2195                         }
2196                 }
2197         }
2198         drm_connector_list_iter_end(&iter);
2199
2200         if (need_hotplug)
2201                 drm_kms_helper_hotplug_event(dev);
2202 }
2203
2204 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2205 {
2206         int ret = 0;
2207
2208         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2209          * on window driver dc implementation.
2210          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2211          * should be passed to smu during boot up and resume from s3.
2212          * boot up: dc calculate dcn watermark clock settings within dc_create,
2213          * dcn20_resource_construct
2214          * then call pplib functions below to pass the settings to smu:
2215          * smu_set_watermarks_for_clock_ranges
2216          * smu_set_watermarks_table
2217          * navi10_set_watermarks_table
2218          * smu_write_watermarks_table
2219          *
2220          * For Renoir, clock settings of dcn watermark are also fixed values.
2221          * dc has implemented different flow for window driver:
2222          * dc_hardware_init / dc_set_power_state
2223          * dcn10_init_hw
2224          * notify_wm_ranges
2225          * set_wm_ranges
2226          * -- Linux
2227          * smu_set_watermarks_for_clock_ranges
2228          * renoir_set_watermarks_table
2229          * smu_write_watermarks_table
2230          *
2231          * For Linux,
2232          * dc_hardware_init -> amdgpu_dm_init
2233          * dc_set_power_state --> dm_resume
2234          *
2235          * therefore, this function apply to navi10/12/14 but not Renoir
2236          * *
2237          */
2238         switch (adev->ip_versions[DCE_HWIP][0]) {
2239         case IP_VERSION(2, 0, 2):
2240         case IP_VERSION(2, 0, 0):
2241                 break;
2242         default:
2243                 return 0;
2244         }
2245
2246         ret = amdgpu_dpm_write_watermarks_table(adev);
2247         if (ret) {
2248                 DRM_ERROR("Failed to update WMTABLE!\n");
2249                 return ret;
2250         }
2251
2252         return 0;
2253 }
2254
2255 /**
2256  * dm_hw_init() - Initialize DC device
2257  * @handle: The base driver device containing the amdgpu_dm device.
2258  *
2259  * Initialize the &struct amdgpu_display_manager device. This involves calling
2260  * the initializers of each DM component, then populating the struct with them.
2261  *
2262  * Although the function implies hardware initialization, both hardware and
2263  * software are initialized here. Splitting them out to their relevant init
2264  * hooks is a future TODO item.
2265  *
2266  * Some notable things that are initialized here:
2267  *
2268  * - Display Core, both software and hardware
2269  * - DC modules that we need (freesync and color management)
2270  * - DRM software states
2271  * - Interrupt sources and handlers
2272  * - Vblank support
2273  * - Debug FS entries, if enabled
2274  */
2275 static int dm_hw_init(void *handle)
2276 {
2277         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2278         /* Create DAL display manager */
2279         amdgpu_dm_init(adev);
2280         amdgpu_dm_hpd_init(adev);
2281
2282         return 0;
2283 }
2284
2285 /**
2286  * dm_hw_fini() - Teardown DC device
2287  * @handle: The base driver device containing the amdgpu_dm device.
2288  *
2289  * Teardown components within &struct amdgpu_display_manager that require
2290  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2291  * were loaded. Also flush IRQ workqueues and disable them.
2292  */
2293 static int dm_hw_fini(void *handle)
2294 {
2295         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2296
2297         amdgpu_dm_hpd_fini(adev);
2298
2299         amdgpu_dm_irq_fini(adev);
2300         amdgpu_dm_fini(adev);
2301         return 0;
2302 }
2303
2304
2305 static int dm_enable_vblank(struct drm_crtc *crtc);
2306 static void dm_disable_vblank(struct drm_crtc *crtc);
2307
2308 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2309                                  struct dc_state *state, bool enable)
2310 {
2311         enum dc_irq_source irq_source;
2312         struct amdgpu_crtc *acrtc;
2313         int rc = -EBUSY;
2314         int i = 0;
2315
2316         for (i = 0; i < state->stream_count; i++) {
2317                 acrtc = get_crtc_by_otg_inst(
2318                                 adev, state->stream_status[i].primary_otg_inst);
2319
2320                 if (acrtc && state->stream_status[i].plane_count != 0) {
2321                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2322                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2323                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2324                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2325                         if (rc)
2326                                 DRM_WARN("Failed to %s pflip interrupts\n",
2327                                          enable ? "enable" : "disable");
2328
2329                         if (enable) {
2330                                 rc = dm_enable_vblank(&acrtc->base);
2331                                 if (rc)
2332                                         DRM_WARN("Failed to enable vblank interrupts\n");
2333                         } else {
2334                                 dm_disable_vblank(&acrtc->base);
2335                         }
2336
2337                 }
2338         }
2339
2340 }
2341
2342 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2343 {
2344         struct dc_state *context = NULL;
2345         enum dc_status res = DC_ERROR_UNEXPECTED;
2346         int i;
2347         struct dc_stream_state *del_streams[MAX_PIPES];
2348         int del_streams_count = 0;
2349
2350         memset(del_streams, 0, sizeof(del_streams));
2351
2352         context = dc_create_state(dc);
2353         if (context == NULL)
2354                 goto context_alloc_fail;
2355
2356         dc_resource_state_copy_construct_current(dc, context);
2357
2358         /* First remove from context all streams */
2359         for (i = 0; i < context->stream_count; i++) {
2360                 struct dc_stream_state *stream = context->streams[i];
2361
2362                 del_streams[del_streams_count++] = stream;
2363         }
2364
2365         /* Remove all planes for removed streams and then remove the streams */
2366         for (i = 0; i < del_streams_count; i++) {
2367                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2368                         res = DC_FAIL_DETACH_SURFACES;
2369                         goto fail;
2370                 }
2371
2372                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2373                 if (res != DC_OK)
2374                         goto fail;
2375         }
2376
2377         res = dc_commit_state(dc, context);
2378
2379 fail:
2380         dc_release_state(context);
2381
2382 context_alloc_fail:
2383         return res;
2384 }
2385
2386 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2387 {
2388         int i;
2389
2390         if (dm->hpd_rx_offload_wq) {
2391                 for (i = 0; i < dm->dc->caps.max_links; i++)
2392                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2393         }
2394 }
2395
2396 static int dm_suspend(void *handle)
2397 {
2398         struct amdgpu_device *adev = handle;
2399         struct amdgpu_display_manager *dm = &adev->dm;
2400         int ret = 0;
2401
2402         if (amdgpu_in_reset(adev)) {
2403                 mutex_lock(&dm->dc_lock);
2404
2405 #if defined(CONFIG_DRM_AMD_DC_DCN)
2406                 dc_allow_idle_optimizations(adev->dm.dc, false);
2407 #endif
2408
2409                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2410
2411                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2412
2413                 amdgpu_dm_commit_zero_streams(dm->dc);
2414
2415                 amdgpu_dm_irq_suspend(adev);
2416
2417                 hpd_rx_irq_work_suspend(dm);
2418
2419                 return ret;
2420         }
2421
2422         WARN_ON(adev->dm.cached_state);
2423         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2424
2425         s3_handle_mst(adev_to_drm(adev), true);
2426
2427         amdgpu_dm_irq_suspend(adev);
2428
2429         hpd_rx_irq_work_suspend(dm);
2430
2431         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2432
2433         return 0;
2434 }
2435
2436 struct amdgpu_dm_connector *
2437 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2438                                              struct drm_crtc *crtc)
2439 {
2440         uint32_t i;
2441         struct drm_connector_state *new_con_state;
2442         struct drm_connector *connector;
2443         struct drm_crtc *crtc_from_state;
2444
2445         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2446                 crtc_from_state = new_con_state->crtc;
2447
2448                 if (crtc_from_state == crtc)
2449                         return to_amdgpu_dm_connector(connector);
2450         }
2451
2452         return NULL;
2453 }
2454
2455 static void emulated_link_detect(struct dc_link *link)
2456 {
2457         struct dc_sink_init_data sink_init_data = { 0 };
2458         struct display_sink_capability sink_caps = { 0 };
2459         enum dc_edid_status edid_status;
2460         struct dc_context *dc_ctx = link->ctx;
2461         struct dc_sink *sink = NULL;
2462         struct dc_sink *prev_sink = NULL;
2463
2464         link->type = dc_connection_none;
2465         prev_sink = link->local_sink;
2466
2467         if (prev_sink)
2468                 dc_sink_release(prev_sink);
2469
2470         switch (link->connector_signal) {
2471         case SIGNAL_TYPE_HDMI_TYPE_A: {
2472                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2473                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2474                 break;
2475         }
2476
2477         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2478                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2479                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2480                 break;
2481         }
2482
2483         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2484                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2485                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2486                 break;
2487         }
2488
2489         case SIGNAL_TYPE_LVDS: {
2490                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2491                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2492                 break;
2493         }
2494
2495         case SIGNAL_TYPE_EDP: {
2496                 sink_caps.transaction_type =
2497                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2498                 sink_caps.signal = SIGNAL_TYPE_EDP;
2499                 break;
2500         }
2501
2502         case SIGNAL_TYPE_DISPLAY_PORT: {
2503                 sink_caps.transaction_type =
2504                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2505                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2506                 break;
2507         }
2508
2509         default:
2510                 DC_ERROR("Invalid connector type! signal:%d\n",
2511                         link->connector_signal);
2512                 return;
2513         }
2514
2515         sink_init_data.link = link;
2516         sink_init_data.sink_signal = sink_caps.signal;
2517
2518         sink = dc_sink_create(&sink_init_data);
2519         if (!sink) {
2520                 DC_ERROR("Failed to create sink!\n");
2521                 return;
2522         }
2523
2524         /* dc_sink_create returns a new reference */
2525         link->local_sink = sink;
2526
2527         edid_status = dm_helpers_read_local_edid(
2528                         link->ctx,
2529                         link,
2530                         sink);
2531
2532         if (edid_status != EDID_OK)
2533                 DC_ERROR("Failed to read EDID");
2534
2535 }
2536
2537 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2538                                      struct amdgpu_display_manager *dm)
2539 {
2540         struct {
2541                 struct dc_surface_update surface_updates[MAX_SURFACES];
2542                 struct dc_plane_info plane_infos[MAX_SURFACES];
2543                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2544                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2545                 struct dc_stream_update stream_update;
2546         } * bundle;
2547         int k, m;
2548
2549         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2550
2551         if (!bundle) {
2552                 dm_error("Failed to allocate update bundle\n");
2553                 goto cleanup;
2554         }
2555
2556         for (k = 0; k < dc_state->stream_count; k++) {
2557                 bundle->stream_update.stream = dc_state->streams[k];
2558
2559                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2560                         bundle->surface_updates[m].surface =
2561                                 dc_state->stream_status->plane_states[m];
2562                         bundle->surface_updates[m].surface->force_full_update =
2563                                 true;
2564                 }
2565                 dc_commit_updates_for_stream(
2566                         dm->dc, bundle->surface_updates,
2567                         dc_state->stream_status->plane_count,
2568                         dc_state->streams[k], &bundle->stream_update, dc_state);
2569         }
2570
2571 cleanup:
2572         kfree(bundle);
2573
2574         return;
2575 }
2576
2577 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2578 {
2579         struct dc_stream_state *stream_state;
2580         struct amdgpu_dm_connector *aconnector = link->priv;
2581         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2582         struct dc_stream_update stream_update;
2583         bool dpms_off = true;
2584
2585         memset(&stream_update, 0, sizeof(stream_update));
2586         stream_update.dpms_off = &dpms_off;
2587
2588         mutex_lock(&adev->dm.dc_lock);
2589         stream_state = dc_stream_find_from_link(link);
2590
2591         if (stream_state == NULL) {
2592                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2593                 mutex_unlock(&adev->dm.dc_lock);
2594                 return;
2595         }
2596
2597         stream_update.stream = stream_state;
2598         acrtc_state->force_dpms_off = true;
2599         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2600                                      stream_state, &stream_update,
2601                                      stream_state->ctx->dc->current_state);
2602         mutex_unlock(&adev->dm.dc_lock);
2603 }
2604
2605 static int dm_resume(void *handle)
2606 {
2607         struct amdgpu_device *adev = handle;
2608         struct drm_device *ddev = adev_to_drm(adev);
2609         struct amdgpu_display_manager *dm = &adev->dm;
2610         struct amdgpu_dm_connector *aconnector;
2611         struct drm_connector *connector;
2612         struct drm_connector_list_iter iter;
2613         struct drm_crtc *crtc;
2614         struct drm_crtc_state *new_crtc_state;
2615         struct dm_crtc_state *dm_new_crtc_state;
2616         struct drm_plane *plane;
2617         struct drm_plane_state *new_plane_state;
2618         struct dm_plane_state *dm_new_plane_state;
2619         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2620         enum dc_connection_type new_connection_type = dc_connection_none;
2621         struct dc_state *dc_state;
2622         int i, r, j;
2623
2624         if (amdgpu_in_reset(adev)) {
2625                 dc_state = dm->cached_dc_state;
2626
2627                 /*
2628                  * The dc->current_state is backed up into dm->cached_dc_state
2629                  * before we commit 0 streams.
2630                  *
2631                  * DC will clear link encoder assignments on the real state
2632                  * but the changes won't propagate over to the copy we made
2633                  * before the 0 streams commit.
2634                  *
2635                  * DC expects that link encoder assignments are *not* valid
2636                  * when committing a state, so as a workaround we can copy
2637                  * off of the current state.
2638                  *
2639                  * We lose the previous assignments, but we had already
2640                  * commit 0 streams anyway.
2641                  */
2642                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2643
2644                 if (dc_enable_dmub_notifications(adev->dm.dc))
2645                         amdgpu_dm_outbox_init(adev);
2646
2647                 r = dm_dmub_hw_init(adev);
2648                 if (r)
2649                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2650
2651                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2652                 dc_resume(dm->dc);
2653
2654                 amdgpu_dm_irq_resume_early(adev);
2655
2656                 for (i = 0; i < dc_state->stream_count; i++) {
2657                         dc_state->streams[i]->mode_changed = true;
2658                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2659                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2660                                         = 0xffffffff;
2661                         }
2662                 }
2663
2664                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2665
2666                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2667
2668                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2669
2670                 dc_release_state(dm->cached_dc_state);
2671                 dm->cached_dc_state = NULL;
2672
2673                 amdgpu_dm_irq_resume_late(adev);
2674
2675                 mutex_unlock(&dm->dc_lock);
2676
2677                 return 0;
2678         }
2679         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2680         dc_release_state(dm_state->context);
2681         dm_state->context = dc_create_state(dm->dc);
2682         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2683         dc_resource_state_construct(dm->dc, dm_state->context);
2684
2685         /* Re-enable outbox interrupts for DPIA. */
2686         if (dc_enable_dmub_notifications(adev->dm.dc))
2687                 amdgpu_dm_outbox_init(adev);
2688
2689         /* Before powering on DC we need to re-initialize DMUB. */
2690         dm_dmub_hw_resume(adev);
2691
2692         /* power on hardware */
2693         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2694
2695         /* program HPD filter */
2696         dc_resume(dm->dc);
2697
2698         /*
2699          * early enable HPD Rx IRQ, should be done before set mode as short
2700          * pulse interrupts are used for MST
2701          */
2702         amdgpu_dm_irq_resume_early(adev);
2703
2704         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2705         s3_handle_mst(ddev, false);
2706
2707         /* Do detection*/
2708         drm_connector_list_iter_begin(ddev, &iter);
2709         drm_for_each_connector_iter(connector, &iter) {
2710                 aconnector = to_amdgpu_dm_connector(connector);
2711
2712                 /*
2713                  * this is the case when traversing through already created
2714                  * MST connectors, should be skipped
2715                  */
2716                 if (aconnector->mst_port)
2717                         continue;
2718
2719                 mutex_lock(&aconnector->hpd_lock);
2720                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2721                         DRM_ERROR("KMS: Failed to detect connector\n");
2722
2723                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2724                         emulated_link_detect(aconnector->dc_link);
2725                 else
2726                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2727
2728                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2729                         aconnector->fake_enable = false;
2730
2731                 if (aconnector->dc_sink)
2732                         dc_sink_release(aconnector->dc_sink);
2733                 aconnector->dc_sink = NULL;
2734                 amdgpu_dm_update_connector_after_detect(aconnector);
2735                 mutex_unlock(&aconnector->hpd_lock);
2736         }
2737         drm_connector_list_iter_end(&iter);
2738
2739         /* Force mode set in atomic commit */
2740         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2741                 new_crtc_state->active_changed = true;
2742
2743         /*
2744          * atomic_check is expected to create the dc states. We need to release
2745          * them here, since they were duplicated as part of the suspend
2746          * procedure.
2747          */
2748         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2749                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2750                 if (dm_new_crtc_state->stream) {
2751                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2752                         dc_stream_release(dm_new_crtc_state->stream);
2753                         dm_new_crtc_state->stream = NULL;
2754                 }
2755         }
2756
2757         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2758                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2759                 if (dm_new_plane_state->dc_state) {
2760                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2761                         dc_plane_state_release(dm_new_plane_state->dc_state);
2762                         dm_new_plane_state->dc_state = NULL;
2763                 }
2764         }
2765
2766         drm_atomic_helper_resume(ddev, dm->cached_state);
2767
2768         dm->cached_state = NULL;
2769
2770         amdgpu_dm_irq_resume_late(adev);
2771
2772         amdgpu_dm_smu_write_watermarks_table(adev);
2773
2774         return 0;
2775 }
2776
2777 /**
2778  * DOC: DM Lifecycle
2779  *
2780  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2781  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2782  * the base driver's device list to be initialized and torn down accordingly.
2783  *
2784  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2785  */
2786
2787 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2788         .name = "dm",
2789         .early_init = dm_early_init,
2790         .late_init = dm_late_init,
2791         .sw_init = dm_sw_init,
2792         .sw_fini = dm_sw_fini,
2793         .early_fini = amdgpu_dm_early_fini,
2794         .hw_init = dm_hw_init,
2795         .hw_fini = dm_hw_fini,
2796         .suspend = dm_suspend,
2797         .resume = dm_resume,
2798         .is_idle = dm_is_idle,
2799         .wait_for_idle = dm_wait_for_idle,
2800         .check_soft_reset = dm_check_soft_reset,
2801         .soft_reset = dm_soft_reset,
2802         .set_clockgating_state = dm_set_clockgating_state,
2803         .set_powergating_state = dm_set_powergating_state,
2804 };
2805
2806 const struct amdgpu_ip_block_version dm_ip_block =
2807 {
2808         .type = AMD_IP_BLOCK_TYPE_DCE,
2809         .major = 1,
2810         .minor = 0,
2811         .rev = 0,
2812         .funcs = &amdgpu_dm_funcs,
2813 };
2814
2815
2816 /**
2817  * DOC: atomic
2818  *
2819  * *WIP*
2820  */
2821
2822 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2823         .fb_create = amdgpu_display_user_framebuffer_create,
2824         .get_format_info = amd_get_format_info,
2825         .output_poll_changed = drm_fb_helper_output_poll_changed,
2826         .atomic_check = amdgpu_dm_atomic_check,
2827         .atomic_commit = drm_atomic_helper_commit,
2828 };
2829
2830 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2831         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2832 };
2833
2834 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2835 {
2836         u32 max_cll, min_cll, max, min, q, r;
2837         struct amdgpu_dm_backlight_caps *caps;
2838         struct amdgpu_display_manager *dm;
2839         struct drm_connector *conn_base;
2840         struct amdgpu_device *adev;
2841         struct dc_link *link = NULL;
2842         static const u8 pre_computed_values[] = {
2843                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2844                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2845         int i;
2846
2847         if (!aconnector || !aconnector->dc_link)
2848                 return;
2849
2850         link = aconnector->dc_link;
2851         if (link->connector_signal != SIGNAL_TYPE_EDP)
2852                 return;
2853
2854         conn_base = &aconnector->base;
2855         adev = drm_to_adev(conn_base->dev);
2856         dm = &adev->dm;
2857         for (i = 0; i < dm->num_of_edps; i++) {
2858                 if (link == dm->backlight_link[i])
2859                         break;
2860         }
2861         if (i >= dm->num_of_edps)
2862                 return;
2863         caps = &dm->backlight_caps[i];
2864         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2865         caps->aux_support = false;
2866         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2867         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2868
2869         if (caps->ext_caps->bits.oled == 1 /*||
2870             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2871             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2872                 caps->aux_support = true;
2873
2874         if (amdgpu_backlight == 0)
2875                 caps->aux_support = false;
2876         else if (amdgpu_backlight == 1)
2877                 caps->aux_support = true;
2878
2879         /* From the specification (CTA-861-G), for calculating the maximum
2880          * luminance we need to use:
2881          *      Luminance = 50*2**(CV/32)
2882          * Where CV is a one-byte value.
2883          * For calculating this expression we may need float point precision;
2884          * to avoid this complexity level, we take advantage that CV is divided
2885          * by a constant. From the Euclids division algorithm, we know that CV
2886          * can be written as: CV = 32*q + r. Next, we replace CV in the
2887          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2888          * need to pre-compute the value of r/32. For pre-computing the values
2889          * We just used the following Ruby line:
2890          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2891          * The results of the above expressions can be verified at
2892          * pre_computed_values.
2893          */
2894         q = max_cll >> 5;
2895         r = max_cll % 32;
2896         max = (1 << q) * pre_computed_values[r];
2897
2898         // min luminance: maxLum * (CV/255)^2 / 100
2899         q = DIV_ROUND_CLOSEST(min_cll, 255);
2900         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2901
2902         caps->aux_max_input_signal = max;
2903         caps->aux_min_input_signal = min;
2904 }
2905
2906 void amdgpu_dm_update_connector_after_detect(
2907                 struct amdgpu_dm_connector *aconnector)
2908 {
2909         struct drm_connector *connector = &aconnector->base;
2910         struct drm_device *dev = connector->dev;
2911         struct dc_sink *sink;
2912
2913         /* MST handled by drm_mst framework */
2914         if (aconnector->mst_mgr.mst_state == true)
2915                 return;
2916
2917         sink = aconnector->dc_link->local_sink;
2918         if (sink)
2919                 dc_sink_retain(sink);
2920
2921         /*
2922          * Edid mgmt connector gets first update only in mode_valid hook and then
2923          * the connector sink is set to either fake or physical sink depends on link status.
2924          * Skip if already done during boot.
2925          */
2926         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2927                         && aconnector->dc_em_sink) {
2928
2929                 /*
2930                  * For S3 resume with headless use eml_sink to fake stream
2931                  * because on resume connector->sink is set to NULL
2932                  */
2933                 mutex_lock(&dev->mode_config.mutex);
2934
2935                 if (sink) {
2936                         if (aconnector->dc_sink) {
2937                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2938                                 /*
2939                                  * retain and release below are used to
2940                                  * bump up refcount for sink because the link doesn't point
2941                                  * to it anymore after disconnect, so on next crtc to connector
2942                                  * reshuffle by UMD we will get into unwanted dc_sink release
2943                                  */
2944                                 dc_sink_release(aconnector->dc_sink);
2945                         }
2946                         aconnector->dc_sink = sink;
2947                         dc_sink_retain(aconnector->dc_sink);
2948                         amdgpu_dm_update_freesync_caps(connector,
2949                                         aconnector->edid);
2950                 } else {
2951                         amdgpu_dm_update_freesync_caps(connector, NULL);
2952                         if (!aconnector->dc_sink) {
2953                                 aconnector->dc_sink = aconnector->dc_em_sink;
2954                                 dc_sink_retain(aconnector->dc_sink);
2955                         }
2956                 }
2957
2958                 mutex_unlock(&dev->mode_config.mutex);
2959
2960                 if (sink)
2961                         dc_sink_release(sink);
2962                 return;
2963         }
2964
2965         /*
2966          * TODO: temporary guard to look for proper fix
2967          * if this sink is MST sink, we should not do anything
2968          */
2969         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2970                 dc_sink_release(sink);
2971                 return;
2972         }
2973
2974         if (aconnector->dc_sink == sink) {
2975                 /*
2976                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2977                  * Do nothing!!
2978                  */
2979                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2980                                 aconnector->connector_id);
2981                 if (sink)
2982                         dc_sink_release(sink);
2983                 return;
2984         }
2985
2986         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2987                 aconnector->connector_id, aconnector->dc_sink, sink);
2988
2989         mutex_lock(&dev->mode_config.mutex);
2990
2991         /*
2992          * 1. Update status of the drm connector
2993          * 2. Send an event and let userspace tell us what to do
2994          */
2995         if (sink) {
2996                 /*
2997                  * TODO: check if we still need the S3 mode update workaround.
2998                  * If yes, put it here.
2999                  */
3000                 if (aconnector->dc_sink) {
3001                         amdgpu_dm_update_freesync_caps(connector, NULL);
3002                         dc_sink_release(aconnector->dc_sink);
3003                 }
3004
3005                 aconnector->dc_sink = sink;
3006                 dc_sink_retain(aconnector->dc_sink);
3007                 if (sink->dc_edid.length == 0) {
3008                         aconnector->edid = NULL;
3009                         if (aconnector->dc_link->aux_mode) {
3010                                 drm_dp_cec_unset_edid(
3011                                         &aconnector->dm_dp_aux.aux);
3012                         }
3013                 } else {
3014                         aconnector->edid =
3015                                 (struct edid *)sink->dc_edid.raw_edid;
3016
3017                         if (aconnector->dc_link->aux_mode)
3018                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3019                                                     aconnector->edid);
3020                 }
3021
3022                 drm_connector_update_edid_property(connector, aconnector->edid);
3023                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3024                 update_connector_ext_caps(aconnector);
3025         } else {
3026                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3027                 amdgpu_dm_update_freesync_caps(connector, NULL);
3028                 drm_connector_update_edid_property(connector, NULL);
3029                 aconnector->num_modes = 0;
3030                 dc_sink_release(aconnector->dc_sink);
3031                 aconnector->dc_sink = NULL;
3032                 aconnector->edid = NULL;
3033 #ifdef CONFIG_DRM_AMD_DC_HDCP
3034                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3035                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3036                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3037 #endif
3038         }
3039
3040         mutex_unlock(&dev->mode_config.mutex);
3041
3042         update_subconnector_property(aconnector);
3043
3044         if (sink)
3045                 dc_sink_release(sink);
3046 }
3047
3048 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3049 {
3050         struct drm_connector *connector = &aconnector->base;
3051         struct drm_device *dev = connector->dev;
3052         enum dc_connection_type new_connection_type = dc_connection_none;
3053         struct amdgpu_device *adev = drm_to_adev(dev);
3054         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3055         struct dm_crtc_state *dm_crtc_state = NULL;
3056
3057         if (adev->dm.disable_hpd_irq)
3058                 return;
3059
3060         if (dm_con_state->base.state && dm_con_state->base.crtc)
3061                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3062                                         dm_con_state->base.state,
3063                                         dm_con_state->base.crtc));
3064         /*
3065          * In case of failure or MST no need to update connector status or notify the OS
3066          * since (for MST case) MST does this in its own context.
3067          */
3068         mutex_lock(&aconnector->hpd_lock);
3069
3070 #ifdef CONFIG_DRM_AMD_DC_HDCP
3071         if (adev->dm.hdcp_workqueue) {
3072                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3073                 dm_con_state->update_hdcp = true;
3074         }
3075 #endif
3076         if (aconnector->fake_enable)
3077                 aconnector->fake_enable = false;
3078
3079         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3080                 DRM_ERROR("KMS: Failed to detect connector\n");
3081
3082         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3083                 emulated_link_detect(aconnector->dc_link);
3084
3085                 drm_modeset_lock_all(dev);
3086                 dm_restore_drm_connector_state(dev, connector);
3087                 drm_modeset_unlock_all(dev);
3088
3089                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3090                         drm_kms_helper_connector_hotplug_event(connector);
3091
3092         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3093                 if (new_connection_type == dc_connection_none &&
3094                     aconnector->dc_link->type == dc_connection_none &&
3095                     dm_crtc_state)
3096                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3097
3098                 amdgpu_dm_update_connector_after_detect(aconnector);
3099
3100                 drm_modeset_lock_all(dev);
3101                 dm_restore_drm_connector_state(dev, connector);
3102                 drm_modeset_unlock_all(dev);
3103
3104                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3105                         drm_kms_helper_connector_hotplug_event(connector);
3106         }
3107         mutex_unlock(&aconnector->hpd_lock);
3108
3109 }
3110
3111 static void handle_hpd_irq(void *param)
3112 {
3113         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3114
3115         handle_hpd_irq_helper(aconnector);
3116
3117 }
3118
3119 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3120 {
3121         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3122         uint8_t dret;
3123         bool new_irq_handled = false;
3124         int dpcd_addr;
3125         int dpcd_bytes_to_read;
3126
3127         const int max_process_count = 30;
3128         int process_count = 0;
3129
3130         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3131
3132         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3133                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3134                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3135                 dpcd_addr = DP_SINK_COUNT;
3136         } else {
3137                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3138                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3139                 dpcd_addr = DP_SINK_COUNT_ESI;
3140         }
3141
3142         dret = drm_dp_dpcd_read(
3143                 &aconnector->dm_dp_aux.aux,
3144                 dpcd_addr,
3145                 esi,
3146                 dpcd_bytes_to_read);
3147
3148         while (dret == dpcd_bytes_to_read &&
3149                 process_count < max_process_count) {
3150                 uint8_t retry;
3151                 dret = 0;
3152
3153                 process_count++;
3154
3155                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3156                 /* handle HPD short pulse irq */
3157                 if (aconnector->mst_mgr.mst_state)
3158                         drm_dp_mst_hpd_irq(
3159                                 &aconnector->mst_mgr,
3160                                 esi,
3161                                 &new_irq_handled);
3162
3163                 if (new_irq_handled) {
3164                         /* ACK at DPCD to notify down stream */
3165                         const int ack_dpcd_bytes_to_write =
3166                                 dpcd_bytes_to_read - 1;
3167
3168                         for (retry = 0; retry < 3; retry++) {
3169                                 uint8_t wret;
3170
3171                                 wret = drm_dp_dpcd_write(
3172                                         &aconnector->dm_dp_aux.aux,
3173                                         dpcd_addr + 1,
3174                                         &esi[1],
3175                                         ack_dpcd_bytes_to_write);
3176                                 if (wret == ack_dpcd_bytes_to_write)
3177                                         break;
3178                         }
3179
3180                         /* check if there is new irq to be handled */
3181                         dret = drm_dp_dpcd_read(
3182                                 &aconnector->dm_dp_aux.aux,
3183                                 dpcd_addr,
3184                                 esi,
3185                                 dpcd_bytes_to_read);
3186
3187                         new_irq_handled = false;
3188                 } else {
3189                         break;
3190                 }
3191         }
3192
3193         if (process_count == max_process_count)
3194                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3195 }
3196
3197 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3198                                                         union hpd_irq_data hpd_irq_data)
3199 {
3200         struct hpd_rx_irq_offload_work *offload_work =
3201                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3202
3203         if (!offload_work) {
3204                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3205                 return;
3206         }
3207
3208         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3209         offload_work->data = hpd_irq_data;
3210         offload_work->offload_wq = offload_wq;
3211
3212         queue_work(offload_wq->wq, &offload_work->work);
3213         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3214 }
3215
3216 static void handle_hpd_rx_irq(void *param)
3217 {
3218         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3219         struct drm_connector *connector = &aconnector->base;
3220         struct drm_device *dev = connector->dev;
3221         struct dc_link *dc_link = aconnector->dc_link;
3222         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3223         bool result = false;
3224         enum dc_connection_type new_connection_type = dc_connection_none;
3225         struct amdgpu_device *adev = drm_to_adev(dev);
3226         union hpd_irq_data hpd_irq_data;
3227         bool link_loss = false;
3228         bool has_left_work = false;
3229         int idx = aconnector->base.index;
3230         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3231
3232         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3233
3234         if (adev->dm.disable_hpd_irq)
3235                 return;
3236
3237         /*
3238          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3239          * conflict, after implement i2c helper, this mutex should be
3240          * retired.
3241          */
3242         mutex_lock(&aconnector->hpd_lock);
3243
3244         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3245                                                 &link_loss, true, &has_left_work);
3246
3247         if (!has_left_work)
3248                 goto out;
3249
3250         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3251                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3252                 goto out;
3253         }
3254
3255         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3256                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3257                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3258                         dm_handle_mst_sideband_msg(aconnector);
3259                         goto out;
3260                 }
3261
3262                 if (link_loss) {
3263                         bool skip = false;
3264
3265                         spin_lock(&offload_wq->offload_lock);
3266                         skip = offload_wq->is_handling_link_loss;
3267
3268                         if (!skip)
3269                                 offload_wq->is_handling_link_loss = true;
3270
3271                         spin_unlock(&offload_wq->offload_lock);
3272
3273                         if (!skip)
3274                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3275
3276                         goto out;
3277                 }
3278         }
3279
3280 out:
3281         if (result && !is_mst_root_connector) {
3282                 /* Downstream Port status changed. */
3283                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3284                         DRM_ERROR("KMS: Failed to detect connector\n");
3285
3286                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3287                         emulated_link_detect(dc_link);
3288
3289                         if (aconnector->fake_enable)
3290                                 aconnector->fake_enable = false;
3291
3292                         amdgpu_dm_update_connector_after_detect(aconnector);
3293
3294
3295                         drm_modeset_lock_all(dev);
3296                         dm_restore_drm_connector_state(dev, connector);
3297                         drm_modeset_unlock_all(dev);
3298
3299                         drm_kms_helper_connector_hotplug_event(connector);
3300                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3301
3302                         if (aconnector->fake_enable)
3303                                 aconnector->fake_enable = false;
3304
3305                         amdgpu_dm_update_connector_after_detect(aconnector);
3306
3307
3308                         drm_modeset_lock_all(dev);
3309                         dm_restore_drm_connector_state(dev, connector);
3310                         drm_modeset_unlock_all(dev);
3311
3312                         drm_kms_helper_connector_hotplug_event(connector);
3313                 }
3314         }
3315 #ifdef CONFIG_DRM_AMD_DC_HDCP
3316         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3317                 if (adev->dm.hdcp_workqueue)
3318                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3319         }
3320 #endif
3321
3322         if (dc_link->type != dc_connection_mst_branch)
3323                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3324
3325         mutex_unlock(&aconnector->hpd_lock);
3326 }
3327
3328 static void register_hpd_handlers(struct amdgpu_device *adev)
3329 {
3330         struct drm_device *dev = adev_to_drm(adev);
3331         struct drm_connector *connector;
3332         struct amdgpu_dm_connector *aconnector;
3333         const struct dc_link *dc_link;
3334         struct dc_interrupt_params int_params = {0};
3335
3336         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3337         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3338
3339         list_for_each_entry(connector,
3340                         &dev->mode_config.connector_list, head) {
3341
3342                 aconnector = to_amdgpu_dm_connector(connector);
3343                 dc_link = aconnector->dc_link;
3344
3345                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3346                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3347                         int_params.irq_source = dc_link->irq_source_hpd;
3348
3349                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3350                                         handle_hpd_irq,
3351                                         (void *) aconnector);
3352                 }
3353
3354                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3355
3356                         /* Also register for DP short pulse (hpd_rx). */
3357                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3358                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3359
3360                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3361                                         handle_hpd_rx_irq,
3362                                         (void *) aconnector);
3363
3364                         if (adev->dm.hpd_rx_offload_wq)
3365                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3366                                         aconnector;
3367                 }
3368         }
3369 }
3370
3371 #if defined(CONFIG_DRM_AMD_DC_SI)
3372 /* Register IRQ sources and initialize IRQ callbacks */
3373 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3374 {
3375         struct dc *dc = adev->dm.dc;
3376         struct common_irq_params *c_irq_params;
3377         struct dc_interrupt_params int_params = {0};
3378         int r;
3379         int i;
3380         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3381
3382         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3383         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3384
3385         /*
3386          * Actions of amdgpu_irq_add_id():
3387          * 1. Register a set() function with base driver.
3388          *    Base driver will call set() function to enable/disable an
3389          *    interrupt in DC hardware.
3390          * 2. Register amdgpu_dm_irq_handler().
3391          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3392          *    coming from DC hardware.
3393          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3394          *    for acknowledging and handling. */
3395
3396         /* Use VBLANK interrupt */
3397         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3398                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3399                 if (r) {
3400                         DRM_ERROR("Failed to add crtc irq id!\n");
3401                         return r;
3402                 }
3403
3404                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3405                 int_params.irq_source =
3406                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3407
3408                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3409
3410                 c_irq_params->adev = adev;
3411                 c_irq_params->irq_src = int_params.irq_source;
3412
3413                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3414                                 dm_crtc_high_irq, c_irq_params);
3415         }
3416
3417         /* Use GRPH_PFLIP interrupt */
3418         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3419                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3420                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3421                 if (r) {
3422                         DRM_ERROR("Failed to add page flip irq id!\n");
3423                         return r;
3424                 }
3425
3426                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427                 int_params.irq_source =
3428                         dc_interrupt_to_irq_source(dc, i, 0);
3429
3430                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3431
3432                 c_irq_params->adev = adev;
3433                 c_irq_params->irq_src = int_params.irq_source;
3434
3435                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436                                 dm_pflip_high_irq, c_irq_params);
3437
3438         }
3439
3440         /* HPD */
3441         r = amdgpu_irq_add_id(adev, client_id,
3442                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3443         if (r) {
3444                 DRM_ERROR("Failed to add hpd irq id!\n");
3445                 return r;
3446         }
3447
3448         register_hpd_handlers(adev);
3449
3450         return 0;
3451 }
3452 #endif
3453
3454 /* Register IRQ sources and initialize IRQ callbacks */
3455 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3456 {
3457         struct dc *dc = adev->dm.dc;
3458         struct common_irq_params *c_irq_params;
3459         struct dc_interrupt_params int_params = {0};
3460         int r;
3461         int i;
3462         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3463
3464         if (adev->family >= AMDGPU_FAMILY_AI)
3465                 client_id = SOC15_IH_CLIENTID_DCE;
3466
3467         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3468         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3469
3470         /*
3471          * Actions of amdgpu_irq_add_id():
3472          * 1. Register a set() function with base driver.
3473          *    Base driver will call set() function to enable/disable an
3474          *    interrupt in DC hardware.
3475          * 2. Register amdgpu_dm_irq_handler().
3476          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3477          *    coming from DC hardware.
3478          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3479          *    for acknowledging and handling. */
3480
3481         /* Use VBLANK interrupt */
3482         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3483                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3484                 if (r) {
3485                         DRM_ERROR("Failed to add crtc irq id!\n");
3486                         return r;
3487                 }
3488
3489                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3490                 int_params.irq_source =
3491                         dc_interrupt_to_irq_source(dc, i, 0);
3492
3493                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3494
3495                 c_irq_params->adev = adev;
3496                 c_irq_params->irq_src = int_params.irq_source;
3497
3498                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3499                                 dm_crtc_high_irq, c_irq_params);
3500         }
3501
3502         /* Use VUPDATE interrupt */
3503         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3504                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3505                 if (r) {
3506                         DRM_ERROR("Failed to add vupdate irq id!\n");
3507                         return r;
3508                 }
3509
3510                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3511                 int_params.irq_source =
3512                         dc_interrupt_to_irq_source(dc, i, 0);
3513
3514                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3515
3516                 c_irq_params->adev = adev;
3517                 c_irq_params->irq_src = int_params.irq_source;
3518
3519                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3520                                 dm_vupdate_high_irq, c_irq_params);
3521         }
3522
3523         /* Use GRPH_PFLIP interrupt */
3524         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3525                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3526                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3527                 if (r) {
3528                         DRM_ERROR("Failed to add page flip irq id!\n");
3529                         return r;
3530                 }
3531
3532                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3533                 int_params.irq_source =
3534                         dc_interrupt_to_irq_source(dc, i, 0);
3535
3536                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3537
3538                 c_irq_params->adev = adev;
3539                 c_irq_params->irq_src = int_params.irq_source;
3540
3541                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3542                                 dm_pflip_high_irq, c_irq_params);
3543
3544         }
3545
3546         /* HPD */
3547         r = amdgpu_irq_add_id(adev, client_id,
3548                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3549         if (r) {
3550                 DRM_ERROR("Failed to add hpd irq id!\n");
3551                 return r;
3552         }
3553
3554         register_hpd_handlers(adev);
3555
3556         return 0;
3557 }
3558
3559 #if defined(CONFIG_DRM_AMD_DC_DCN)
3560 /* Register IRQ sources and initialize IRQ callbacks */
3561 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3562 {
3563         struct dc *dc = adev->dm.dc;
3564         struct common_irq_params *c_irq_params;
3565         struct dc_interrupt_params int_params = {0};
3566         int r;
3567         int i;
3568 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3569         static const unsigned int vrtl_int_srcid[] = {
3570                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3571                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3572                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3573                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3574                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3575                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3576         };
3577 #endif
3578
3579         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3580         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3581
3582         /*
3583          * Actions of amdgpu_irq_add_id():
3584          * 1. Register a set() function with base driver.
3585          *    Base driver will call set() function to enable/disable an
3586          *    interrupt in DC hardware.
3587          * 2. Register amdgpu_dm_irq_handler().
3588          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3589          *    coming from DC hardware.
3590          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3591          *    for acknowledging and handling.
3592          */
3593
3594         /* Use VSTARTUP interrupt */
3595         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3596                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3597                         i++) {
3598                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3599
3600                 if (r) {
3601                         DRM_ERROR("Failed to add crtc irq id!\n");
3602                         return r;
3603                 }
3604
3605                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3606                 int_params.irq_source =
3607                         dc_interrupt_to_irq_source(dc, i, 0);
3608
3609                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3610
3611                 c_irq_params->adev = adev;
3612                 c_irq_params->irq_src = int_params.irq_source;
3613
3614                 amdgpu_dm_irq_register_interrupt(
3615                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3616         }
3617
3618         /* Use otg vertical line interrupt */
3619 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3620         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3621                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3622                                 vrtl_int_srcid[i], &adev->vline0_irq);
3623
3624                 if (r) {
3625                         DRM_ERROR("Failed to add vline0 irq id!\n");
3626                         return r;
3627                 }
3628
3629                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3630                 int_params.irq_source =
3631                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3632
3633                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3634                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3635                         break;
3636                 }
3637
3638                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3639                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3640
3641                 c_irq_params->adev = adev;
3642                 c_irq_params->irq_src = int_params.irq_source;
3643
3644                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3645                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3646         }
3647 #endif
3648
3649         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3650          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3651          * to trigger at end of each vblank, regardless of state of the lock,
3652          * matching DCE behaviour.
3653          */
3654         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3655              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3656              i++) {
3657                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3658
3659                 if (r) {
3660                         DRM_ERROR("Failed to add vupdate irq id!\n");
3661                         return r;
3662                 }
3663
3664                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3665                 int_params.irq_source =
3666                         dc_interrupt_to_irq_source(dc, i, 0);
3667
3668                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3669
3670                 c_irq_params->adev = adev;
3671                 c_irq_params->irq_src = int_params.irq_source;
3672
3673                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3674                                 dm_vupdate_high_irq, c_irq_params);
3675         }
3676
3677         /* Use GRPH_PFLIP interrupt */
3678         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3679                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3680                         i++) {
3681                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3682                 if (r) {
3683                         DRM_ERROR("Failed to add page flip irq id!\n");
3684                         return r;
3685                 }
3686
3687                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3688                 int_params.irq_source =
3689                         dc_interrupt_to_irq_source(dc, i, 0);
3690
3691                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3692
3693                 c_irq_params->adev = adev;
3694                 c_irq_params->irq_src = int_params.irq_source;
3695
3696                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3697                                 dm_pflip_high_irq, c_irq_params);
3698
3699         }
3700
3701         /* HPD */
3702         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3703                         &adev->hpd_irq);
3704         if (r) {
3705                 DRM_ERROR("Failed to add hpd irq id!\n");
3706                 return r;
3707         }
3708
3709         register_hpd_handlers(adev);
3710
3711         return 0;
3712 }
3713 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3714 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3715 {
3716         struct dc *dc = adev->dm.dc;
3717         struct common_irq_params *c_irq_params;
3718         struct dc_interrupt_params int_params = {0};
3719         int r, i;
3720
3721         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3722         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3723
3724         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3725                         &adev->dmub_outbox_irq);
3726         if (r) {
3727                 DRM_ERROR("Failed to add outbox irq id!\n");
3728                 return r;
3729         }
3730
3731         if (dc->ctx->dmub_srv) {
3732                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3733                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3734                 int_params.irq_source =
3735                 dc_interrupt_to_irq_source(dc, i, 0);
3736
3737                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3738
3739                 c_irq_params->adev = adev;
3740                 c_irq_params->irq_src = int_params.irq_source;
3741
3742                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3743                                 dm_dmub_outbox1_low_irq, c_irq_params);
3744         }
3745
3746         return 0;
3747 }
3748 #endif
3749
3750 /*
3751  * Acquires the lock for the atomic state object and returns
3752  * the new atomic state.
3753  *
3754  * This should only be called during atomic check.
3755  */
3756 int dm_atomic_get_state(struct drm_atomic_state *state,
3757                         struct dm_atomic_state **dm_state)
3758 {
3759         struct drm_device *dev = state->dev;
3760         struct amdgpu_device *adev = drm_to_adev(dev);
3761         struct amdgpu_display_manager *dm = &adev->dm;
3762         struct drm_private_state *priv_state;
3763
3764         if (*dm_state)
3765                 return 0;
3766
3767         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3768         if (IS_ERR(priv_state))
3769                 return PTR_ERR(priv_state);
3770
3771         *dm_state = to_dm_atomic_state(priv_state);
3772
3773         return 0;
3774 }
3775
3776 static struct dm_atomic_state *
3777 dm_atomic_get_new_state(struct drm_atomic_state *state)
3778 {
3779         struct drm_device *dev = state->dev;
3780         struct amdgpu_device *adev = drm_to_adev(dev);
3781         struct amdgpu_display_manager *dm = &adev->dm;
3782         struct drm_private_obj *obj;
3783         struct drm_private_state *new_obj_state;
3784         int i;
3785
3786         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3787                 if (obj->funcs == dm->atomic_obj.funcs)
3788                         return to_dm_atomic_state(new_obj_state);
3789         }
3790
3791         return NULL;
3792 }
3793
3794 static struct drm_private_state *
3795 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3796 {
3797         struct dm_atomic_state *old_state, *new_state;
3798
3799         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3800         if (!new_state)
3801                 return NULL;
3802
3803         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3804
3805         old_state = to_dm_atomic_state(obj->state);
3806
3807         if (old_state && old_state->context)
3808                 new_state->context = dc_copy_state(old_state->context);
3809
3810         if (!new_state->context) {
3811                 kfree(new_state);
3812                 return NULL;
3813         }
3814
3815         return &new_state->base;
3816 }
3817
3818 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3819                                     struct drm_private_state *state)
3820 {
3821         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3822
3823         if (dm_state && dm_state->context)
3824                 dc_release_state(dm_state->context);
3825
3826         kfree(dm_state);
3827 }
3828
3829 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3830         .atomic_duplicate_state = dm_atomic_duplicate_state,
3831         .atomic_destroy_state = dm_atomic_destroy_state,
3832 };
3833
3834 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3835 {
3836         struct dm_atomic_state *state;
3837         int r;
3838
3839         adev->mode_info.mode_config_initialized = true;
3840
3841         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3842         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3843
3844         adev_to_drm(adev)->mode_config.max_width = 16384;
3845         adev_to_drm(adev)->mode_config.max_height = 16384;
3846
3847         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3848         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3849         /* indicates support for immediate flip */
3850         adev_to_drm(adev)->mode_config.async_page_flip = true;
3851
3852         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3853
3854         state = kzalloc(sizeof(*state), GFP_KERNEL);
3855         if (!state)
3856                 return -ENOMEM;
3857
3858         state->context = dc_create_state(adev->dm.dc);
3859         if (!state->context) {
3860                 kfree(state);
3861                 return -ENOMEM;
3862         }
3863
3864         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3865
3866         drm_atomic_private_obj_init(adev_to_drm(adev),
3867                                     &adev->dm.atomic_obj,
3868                                     &state->base,
3869                                     &dm_atomic_state_funcs);
3870
3871         r = amdgpu_display_modeset_create_props(adev);
3872         if (r) {
3873                 dc_release_state(state->context);
3874                 kfree(state);
3875                 return r;
3876         }
3877
3878         r = amdgpu_dm_audio_init(adev);
3879         if (r) {
3880                 dc_release_state(state->context);
3881                 kfree(state);
3882                 return r;
3883         }
3884
3885         return 0;
3886 }
3887
3888 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3889 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3890 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3891
3892 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3893         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3894
3895 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3896                                             int bl_idx)
3897 {
3898 #if defined(CONFIG_ACPI)
3899         struct amdgpu_dm_backlight_caps caps;
3900
3901         memset(&caps, 0, sizeof(caps));
3902
3903         if (dm->backlight_caps[bl_idx].caps_valid)
3904                 return;
3905
3906         amdgpu_acpi_get_backlight_caps(&caps);
3907         if (caps.caps_valid) {
3908                 dm->backlight_caps[bl_idx].caps_valid = true;
3909                 if (caps.aux_support)
3910                         return;
3911                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3912                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3913         } else {
3914                 dm->backlight_caps[bl_idx].min_input_signal =
3915                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3916                 dm->backlight_caps[bl_idx].max_input_signal =
3917                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3918         }
3919 #else
3920         if (dm->backlight_caps[bl_idx].aux_support)
3921                 return;
3922
3923         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3924         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3925 #endif
3926 }
3927
3928 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3929                                 unsigned *min, unsigned *max)
3930 {
3931         if (!caps)
3932                 return 0;
3933
3934         if (caps->aux_support) {
3935                 // Firmware limits are in nits, DC API wants millinits.
3936                 *max = 1000 * caps->aux_max_input_signal;
3937                 *min = 1000 * caps->aux_min_input_signal;
3938         } else {
3939                 // Firmware limits are 8-bit, PWM control is 16-bit.
3940                 *max = 0x101 * caps->max_input_signal;
3941                 *min = 0x101 * caps->min_input_signal;
3942         }
3943         return 1;
3944 }
3945
3946 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3947                                         uint32_t brightness)
3948 {
3949         unsigned min, max;
3950
3951         if (!get_brightness_range(caps, &min, &max))
3952                 return brightness;
3953
3954         // Rescale 0..255 to min..max
3955         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3956                                        AMDGPU_MAX_BL_LEVEL);
3957 }
3958
3959 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3960                                       uint32_t brightness)
3961 {
3962         unsigned min, max;
3963
3964         if (!get_brightness_range(caps, &min, &max))
3965                 return brightness;
3966
3967         if (brightness < min)
3968                 return 0;
3969         // Rescale min..max to 0..255
3970         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3971                                  max - min);
3972 }
3973
3974 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3975                                          int bl_idx,
3976                                          u32 user_brightness)
3977 {
3978         struct amdgpu_dm_backlight_caps caps;
3979         struct dc_link *link;
3980         u32 brightness;
3981         bool rc;
3982
3983         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3984         caps = dm->backlight_caps[bl_idx];
3985
3986         dm->brightness[bl_idx] = user_brightness;
3987         /* update scratch register */
3988         if (bl_idx == 0)
3989                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3990         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3991         link = (struct dc_link *)dm->backlight_link[bl_idx];
3992
3993         /* Change brightness based on AUX property */
3994         if (caps.aux_support) {
3995                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3996                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3997                 if (!rc)
3998                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3999         } else {
4000                 rc = dc_link_set_backlight_level(link, brightness, 0);
4001                 if (!rc)
4002                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4003         }
4004
4005         return rc ? 0 : 1;
4006 }
4007
4008 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4009 {
4010         struct amdgpu_display_manager *dm = bl_get_data(bd);
4011         int i;
4012
4013         for (i = 0; i < dm->num_of_edps; i++) {
4014                 if (bd == dm->backlight_dev[i])
4015                         break;
4016         }
4017         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4018                 i = 0;
4019         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4020
4021         return 0;
4022 }
4023
4024 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4025                                          int bl_idx)
4026 {
4027         struct amdgpu_dm_backlight_caps caps;
4028         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4029
4030         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4031         caps = dm->backlight_caps[bl_idx];
4032
4033         if (caps.aux_support) {
4034                 u32 avg, peak;
4035                 bool rc;
4036
4037                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4038                 if (!rc)
4039                         return dm->brightness[bl_idx];
4040                 return convert_brightness_to_user(&caps, avg);
4041         } else {
4042                 int ret = dc_link_get_backlight_level(link);
4043
4044                 if (ret == DC_ERROR_UNEXPECTED)
4045                         return dm->brightness[bl_idx];
4046                 return convert_brightness_to_user(&caps, ret);
4047         }
4048 }
4049
4050 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4051 {
4052         struct amdgpu_display_manager *dm = bl_get_data(bd);
4053         int i;
4054
4055         for (i = 0; i < dm->num_of_edps; i++) {
4056                 if (bd == dm->backlight_dev[i])
4057                         break;
4058         }
4059         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4060                 i = 0;
4061         return amdgpu_dm_backlight_get_level(dm, i);
4062 }
4063
4064 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4065         .options = BL_CORE_SUSPENDRESUME,
4066         .get_brightness = amdgpu_dm_backlight_get_brightness,
4067         .update_status  = amdgpu_dm_backlight_update_status,
4068 };
4069
4070 static void
4071 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4072 {
4073         char bl_name[16];
4074         struct backlight_properties props = { 0 };
4075
4076         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4077         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4078
4079         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4080         props.brightness = AMDGPU_MAX_BL_LEVEL;
4081         props.type = BACKLIGHT_RAW;
4082
4083         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4084                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4085
4086         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4087                                                                        adev_to_drm(dm->adev)->dev,
4088                                                                        dm,
4089                                                                        &amdgpu_dm_backlight_ops,
4090                                                                        &props);
4091
4092         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4093                 DRM_ERROR("DM: Backlight registration failed!\n");
4094         else
4095                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4096 }
4097 #endif
4098
4099 static int initialize_plane(struct amdgpu_display_manager *dm,
4100                             struct amdgpu_mode_info *mode_info, int plane_id,
4101                             enum drm_plane_type plane_type,
4102                             const struct dc_plane_cap *plane_cap)
4103 {
4104         struct drm_plane *plane;
4105         unsigned long possible_crtcs;
4106         int ret = 0;
4107
4108         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4109         if (!plane) {
4110                 DRM_ERROR("KMS: Failed to allocate plane\n");
4111                 return -ENOMEM;
4112         }
4113         plane->type = plane_type;
4114
4115         /*
4116          * HACK: IGT tests expect that the primary plane for a CRTC
4117          * can only have one possible CRTC. Only expose support for
4118          * any CRTC if they're not going to be used as a primary plane
4119          * for a CRTC - like overlay or underlay planes.
4120          */
4121         possible_crtcs = 1 << plane_id;
4122         if (plane_id >= dm->dc->caps.max_streams)
4123                 possible_crtcs = 0xff;
4124
4125         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4126
4127         if (ret) {
4128                 DRM_ERROR("KMS: Failed to initialize plane\n");
4129                 kfree(plane);
4130                 return ret;
4131         }
4132
4133         if (mode_info)
4134                 mode_info->planes[plane_id] = plane;
4135
4136         return ret;
4137 }
4138
4139
4140 static void register_backlight_device(struct amdgpu_display_manager *dm,
4141                                       struct dc_link *link)
4142 {
4143 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4144         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4145
4146         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4147             link->type != dc_connection_none) {
4148                 /*
4149                  * Event if registration failed, we should continue with
4150                  * DM initialization because not having a backlight control
4151                  * is better then a black screen.
4152                  */
4153                 if (!dm->backlight_dev[dm->num_of_edps])
4154                         amdgpu_dm_register_backlight_device(dm);
4155
4156                 if (dm->backlight_dev[dm->num_of_edps]) {
4157                         dm->backlight_link[dm->num_of_edps] = link;
4158                         dm->num_of_edps++;
4159                 }
4160         }
4161 #endif
4162 }
4163
4164
4165 /*
4166  * In this architecture, the association
4167  * connector -> encoder -> crtc
4168  * id not really requried. The crtc and connector will hold the
4169  * display_index as an abstraction to use with DAL component
4170  *
4171  * Returns 0 on success
4172  */
4173 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4174 {
4175         struct amdgpu_display_manager *dm = &adev->dm;
4176         int32_t i;
4177         struct amdgpu_dm_connector *aconnector = NULL;
4178         struct amdgpu_encoder *aencoder = NULL;
4179         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4180         uint32_t link_cnt;
4181         int32_t primary_planes;
4182         enum dc_connection_type new_connection_type = dc_connection_none;
4183         const struct dc_plane_cap *plane;
4184         bool psr_feature_enabled = false;
4185
4186         dm->display_indexes_num = dm->dc->caps.max_streams;
4187         /* Update the actual used number of crtc */
4188         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4189
4190         link_cnt = dm->dc->caps.max_links;
4191         if (amdgpu_dm_mode_config_init(dm->adev)) {
4192                 DRM_ERROR("DM: Failed to initialize mode config\n");
4193                 return -EINVAL;
4194         }
4195
4196         /* There is one primary plane per CRTC */
4197         primary_planes = dm->dc->caps.max_streams;
4198         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4199
4200         /*
4201          * Initialize primary planes, implicit planes for legacy IOCTLS.
4202          * Order is reversed to match iteration order in atomic check.
4203          */
4204         for (i = (primary_planes - 1); i >= 0; i--) {
4205                 plane = &dm->dc->caps.planes[i];
4206
4207                 if (initialize_plane(dm, mode_info, i,
4208                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4209                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4210                         goto fail;
4211                 }
4212         }
4213
4214         /*
4215          * Initialize overlay planes, index starting after primary planes.
4216          * These planes have a higher DRM index than the primary planes since
4217          * they should be considered as having a higher z-order.
4218          * Order is reversed to match iteration order in atomic check.
4219          *
4220          * Only support DCN for now, and only expose one so we don't encourage
4221          * userspace to use up all the pipes.
4222          */
4223         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4224                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4225
4226                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4227                         continue;
4228
4229                 if (!plane->blends_with_above || !plane->blends_with_below)
4230                         continue;
4231
4232                 if (!plane->pixel_format_support.argb8888)
4233                         continue;
4234
4235                 if (initialize_plane(dm, NULL, primary_planes + i,
4236                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4237                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4238                         goto fail;
4239                 }
4240
4241                 /* Only create one overlay plane. */
4242                 break;
4243         }
4244
4245         for (i = 0; i < dm->dc->caps.max_streams; i++)
4246                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4247                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4248                         goto fail;
4249                 }
4250
4251 #if defined(CONFIG_DRM_AMD_DC_DCN)
4252         /* Use Outbox interrupt */
4253         switch (adev->ip_versions[DCE_HWIP][0]) {
4254         case IP_VERSION(3, 0, 0):
4255         case IP_VERSION(3, 1, 2):
4256         case IP_VERSION(3, 1, 3):
4257         case IP_VERSION(3, 1, 5):
4258         case IP_VERSION(3, 1, 6):
4259         case IP_VERSION(2, 1, 0):
4260                 if (register_outbox_irq_handlers(dm->adev)) {
4261                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4262                         goto fail;
4263                 }
4264                 break;
4265         default:
4266                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4267                               adev->ip_versions[DCE_HWIP][0]);
4268         }
4269
4270         /* Determine whether to enable PSR support by default. */
4271         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4272                 switch (adev->ip_versions[DCE_HWIP][0]) {
4273                 case IP_VERSION(3, 1, 2):
4274                 case IP_VERSION(3, 1, 3):
4275                 case IP_VERSION(3, 1, 5):
4276                 case IP_VERSION(3, 1, 6):
4277                         psr_feature_enabled = true;
4278                         break;
4279                 default:
4280                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4281                         break;
4282                 }
4283         }
4284 #endif
4285
4286         /* Disable vblank IRQs aggressively for power-saving. */
4287         adev_to_drm(adev)->vblank_disable_immediate = true;
4288
4289         /* loops over all connectors on the board */
4290         for (i = 0; i < link_cnt; i++) {
4291                 struct dc_link *link = NULL;
4292
4293                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4294                         DRM_ERROR(
4295                                 "KMS: Cannot support more than %d display indexes\n",
4296                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4297                         continue;
4298                 }
4299
4300                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4301                 if (!aconnector)
4302                         goto fail;
4303
4304                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4305                 if (!aencoder)
4306                         goto fail;
4307
4308                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4309                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4310                         goto fail;
4311                 }
4312
4313                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4314                         DRM_ERROR("KMS: Failed to initialize connector\n");
4315                         goto fail;
4316                 }
4317
4318                 link = dc_get_link_at_index(dm->dc, i);
4319
4320                 if (!dc_link_detect_sink(link, &new_connection_type))
4321                         DRM_ERROR("KMS: Failed to detect connector\n");
4322
4323                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4324                         emulated_link_detect(link);
4325                         amdgpu_dm_update_connector_after_detect(aconnector);
4326
4327                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4328                         amdgpu_dm_update_connector_after_detect(aconnector);
4329                         register_backlight_device(dm, link);
4330                         if (dm->num_of_edps)
4331                                 update_connector_ext_caps(aconnector);
4332                         if (psr_feature_enabled)
4333                                 amdgpu_dm_set_psr_caps(link);
4334
4335                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4336                          * PSR is also supported.
4337                          */
4338                         if (link->psr_settings.psr_feature_enabled)
4339                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4340                 }
4341
4342
4343         }
4344
4345         /* Software is initialized. Now we can register interrupt handlers. */
4346         switch (adev->asic_type) {
4347 #if defined(CONFIG_DRM_AMD_DC_SI)
4348         case CHIP_TAHITI:
4349         case CHIP_PITCAIRN:
4350         case CHIP_VERDE:
4351         case CHIP_OLAND:
4352                 if (dce60_register_irq_handlers(dm->adev)) {
4353                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4354                         goto fail;
4355                 }
4356                 break;
4357 #endif
4358         case CHIP_BONAIRE:
4359         case CHIP_HAWAII:
4360         case CHIP_KAVERI:
4361         case CHIP_KABINI:
4362         case CHIP_MULLINS:
4363         case CHIP_TONGA:
4364         case CHIP_FIJI:
4365         case CHIP_CARRIZO:
4366         case CHIP_STONEY:
4367         case CHIP_POLARIS11:
4368         case CHIP_POLARIS10:
4369         case CHIP_POLARIS12:
4370         case CHIP_VEGAM:
4371         case CHIP_VEGA10:
4372         case CHIP_VEGA12:
4373         case CHIP_VEGA20:
4374                 if (dce110_register_irq_handlers(dm->adev)) {
4375                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4376                         goto fail;
4377                 }
4378                 break;
4379         default:
4380 #if defined(CONFIG_DRM_AMD_DC_DCN)
4381                 switch (adev->ip_versions[DCE_HWIP][0]) {
4382                 case IP_VERSION(1, 0, 0):
4383                 case IP_VERSION(1, 0, 1):
4384                 case IP_VERSION(2, 0, 2):
4385                 case IP_VERSION(2, 0, 3):
4386                 case IP_VERSION(2, 0, 0):
4387                 case IP_VERSION(2, 1, 0):
4388                 case IP_VERSION(3, 0, 0):
4389                 case IP_VERSION(3, 0, 2):
4390                 case IP_VERSION(3, 0, 3):
4391                 case IP_VERSION(3, 0, 1):
4392                 case IP_VERSION(3, 1, 2):
4393                 case IP_VERSION(3, 1, 3):
4394                 case IP_VERSION(3, 1, 5):
4395                 case IP_VERSION(3, 1, 6):
4396                         if (dcn10_register_irq_handlers(dm->adev)) {
4397                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4398                                 goto fail;
4399                         }
4400                         break;
4401                 default:
4402                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4403                                         adev->ip_versions[DCE_HWIP][0]);
4404                         goto fail;
4405                 }
4406 #endif
4407                 break;
4408         }
4409
4410         return 0;
4411 fail:
4412         kfree(aencoder);
4413         kfree(aconnector);
4414
4415         return -EINVAL;
4416 }
4417
4418 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4419 {
4420         drm_atomic_private_obj_fini(&dm->atomic_obj);
4421         return;
4422 }
4423
4424 /******************************************************************************
4425  * amdgpu_display_funcs functions
4426  *****************************************************************************/
4427
4428 /*
4429  * dm_bandwidth_update - program display watermarks
4430  *
4431  * @adev: amdgpu_device pointer
4432  *
4433  * Calculate and program the display watermarks and line buffer allocation.
4434  */
4435 static void dm_bandwidth_update(struct amdgpu_device *adev)
4436 {
4437         /* TODO: implement later */
4438 }
4439
4440 static const struct amdgpu_display_funcs dm_display_funcs = {
4441         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4442         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4443         .backlight_set_level = NULL, /* never called for DC */
4444         .backlight_get_level = NULL, /* never called for DC */
4445         .hpd_sense = NULL,/* called unconditionally */
4446         .hpd_set_polarity = NULL, /* called unconditionally */
4447         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4448         .page_flip_get_scanoutpos =
4449                 dm_crtc_get_scanoutpos,/* called unconditionally */
4450         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4451         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4452 };
4453
4454 #if defined(CONFIG_DEBUG_KERNEL_DC)
4455
4456 static ssize_t s3_debug_store(struct device *device,
4457                               struct device_attribute *attr,
4458                               const char *buf,
4459                               size_t count)
4460 {
4461         int ret;
4462         int s3_state;
4463         struct drm_device *drm_dev = dev_get_drvdata(device);
4464         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4465
4466         ret = kstrtoint(buf, 0, &s3_state);
4467
4468         if (ret == 0) {
4469                 if (s3_state) {
4470                         dm_resume(adev);
4471                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4472                 } else
4473                         dm_suspend(adev);
4474         }
4475
4476         return ret == 0 ? count : 0;
4477 }
4478
4479 DEVICE_ATTR_WO(s3_debug);
4480
4481 #endif
4482
4483 static int dm_early_init(void *handle)
4484 {
4485         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4486
4487         switch (adev->asic_type) {
4488 #if defined(CONFIG_DRM_AMD_DC_SI)
4489         case CHIP_TAHITI:
4490         case CHIP_PITCAIRN:
4491         case CHIP_VERDE:
4492                 adev->mode_info.num_crtc = 6;
4493                 adev->mode_info.num_hpd = 6;
4494                 adev->mode_info.num_dig = 6;
4495                 break;
4496         case CHIP_OLAND:
4497                 adev->mode_info.num_crtc = 2;
4498                 adev->mode_info.num_hpd = 2;
4499                 adev->mode_info.num_dig = 2;
4500                 break;
4501 #endif
4502         case CHIP_BONAIRE:
4503         case CHIP_HAWAII:
4504                 adev->mode_info.num_crtc = 6;
4505                 adev->mode_info.num_hpd = 6;
4506                 adev->mode_info.num_dig = 6;
4507                 break;
4508         case CHIP_KAVERI:
4509                 adev->mode_info.num_crtc = 4;
4510                 adev->mode_info.num_hpd = 6;
4511                 adev->mode_info.num_dig = 7;
4512                 break;
4513         case CHIP_KABINI:
4514         case CHIP_MULLINS:
4515                 adev->mode_info.num_crtc = 2;
4516                 adev->mode_info.num_hpd = 6;
4517                 adev->mode_info.num_dig = 6;
4518                 break;
4519         case CHIP_FIJI:
4520         case CHIP_TONGA:
4521                 adev->mode_info.num_crtc = 6;
4522                 adev->mode_info.num_hpd = 6;
4523                 adev->mode_info.num_dig = 7;
4524                 break;
4525         case CHIP_CARRIZO:
4526                 adev->mode_info.num_crtc = 3;
4527                 adev->mode_info.num_hpd = 6;
4528                 adev->mode_info.num_dig = 9;
4529                 break;
4530         case CHIP_STONEY:
4531                 adev->mode_info.num_crtc = 2;
4532                 adev->mode_info.num_hpd = 6;
4533                 adev->mode_info.num_dig = 9;
4534                 break;
4535         case CHIP_POLARIS11:
4536         case CHIP_POLARIS12:
4537                 adev->mode_info.num_crtc = 5;
4538                 adev->mode_info.num_hpd = 5;
4539                 adev->mode_info.num_dig = 5;
4540                 break;
4541         case CHIP_POLARIS10:
4542         case CHIP_VEGAM:
4543                 adev->mode_info.num_crtc = 6;
4544                 adev->mode_info.num_hpd = 6;
4545                 adev->mode_info.num_dig = 6;
4546                 break;
4547         case CHIP_VEGA10:
4548         case CHIP_VEGA12:
4549         case CHIP_VEGA20:
4550                 adev->mode_info.num_crtc = 6;
4551                 adev->mode_info.num_hpd = 6;
4552                 adev->mode_info.num_dig = 6;
4553                 break;
4554         default:
4555 #if defined(CONFIG_DRM_AMD_DC_DCN)
4556                 switch (adev->ip_versions[DCE_HWIP][0]) {
4557                 case IP_VERSION(2, 0, 2):
4558                 case IP_VERSION(3, 0, 0):
4559                         adev->mode_info.num_crtc = 6;
4560                         adev->mode_info.num_hpd = 6;
4561                         adev->mode_info.num_dig = 6;
4562                         break;
4563                 case IP_VERSION(2, 0, 0):
4564                 case IP_VERSION(3, 0, 2):
4565                         adev->mode_info.num_crtc = 5;
4566                         adev->mode_info.num_hpd = 5;
4567                         adev->mode_info.num_dig = 5;
4568                         break;
4569                 case IP_VERSION(2, 0, 3):
4570                 case IP_VERSION(3, 0, 3):
4571                         adev->mode_info.num_crtc = 2;
4572                         adev->mode_info.num_hpd = 2;
4573                         adev->mode_info.num_dig = 2;
4574                         break;
4575                 case IP_VERSION(1, 0, 0):
4576                 case IP_VERSION(1, 0, 1):
4577                 case IP_VERSION(3, 0, 1):
4578                 case IP_VERSION(2, 1, 0):
4579                 case IP_VERSION(3, 1, 2):
4580                 case IP_VERSION(3, 1, 3):
4581                 case IP_VERSION(3, 1, 5):
4582                 case IP_VERSION(3, 1, 6):
4583                         adev->mode_info.num_crtc = 4;
4584                         adev->mode_info.num_hpd = 4;
4585                         adev->mode_info.num_dig = 4;
4586                         break;
4587                 default:
4588                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4589                                         adev->ip_versions[DCE_HWIP][0]);
4590                         return -EINVAL;
4591                 }
4592 #endif
4593                 break;
4594         }
4595
4596         amdgpu_dm_set_irq_funcs(adev);
4597
4598         if (adev->mode_info.funcs == NULL)
4599                 adev->mode_info.funcs = &dm_display_funcs;
4600
4601         /*
4602          * Note: Do NOT change adev->audio_endpt_rreg and
4603          * adev->audio_endpt_wreg because they are initialised in
4604          * amdgpu_device_init()
4605          */
4606 #if defined(CONFIG_DEBUG_KERNEL_DC)
4607         device_create_file(
4608                 adev_to_drm(adev)->dev,
4609                 &dev_attr_s3_debug);
4610 #endif
4611
4612         return 0;
4613 }
4614
4615 static bool modeset_required(struct drm_crtc_state *crtc_state,
4616                              struct dc_stream_state *new_stream,
4617                              struct dc_stream_state *old_stream)
4618 {
4619         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4620 }
4621
4622 static bool modereset_required(struct drm_crtc_state *crtc_state)
4623 {
4624         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4625 }
4626
4627 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4628 {
4629         drm_encoder_cleanup(encoder);
4630         kfree(encoder);
4631 }
4632
4633 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4634         .destroy = amdgpu_dm_encoder_destroy,
4635 };
4636
4637
4638 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4639                                          struct drm_framebuffer *fb,
4640                                          int *min_downscale, int *max_upscale)
4641 {
4642         struct amdgpu_device *adev = drm_to_adev(dev);
4643         struct dc *dc = adev->dm.dc;
4644         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4645         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4646
4647         switch (fb->format->format) {
4648         case DRM_FORMAT_P010:
4649         case DRM_FORMAT_NV12:
4650         case DRM_FORMAT_NV21:
4651                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4652                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4653                 break;
4654
4655         case DRM_FORMAT_XRGB16161616F:
4656         case DRM_FORMAT_ARGB16161616F:
4657         case DRM_FORMAT_XBGR16161616F:
4658         case DRM_FORMAT_ABGR16161616F:
4659                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4660                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4661                 break;
4662
4663         default:
4664                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4665                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4666                 break;
4667         }
4668
4669         /*
4670          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4671          * scaling factor of 1.0 == 1000 units.
4672          */
4673         if (*max_upscale == 1)
4674                 *max_upscale = 1000;
4675
4676         if (*min_downscale == 1)
4677                 *min_downscale = 1000;
4678 }
4679
4680
4681 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4682                                 const struct drm_plane_state *state,
4683                                 struct dc_scaling_info *scaling_info)
4684 {
4685         int scale_w, scale_h, min_downscale, max_upscale;
4686
4687         memset(scaling_info, 0, sizeof(*scaling_info));
4688
4689         /* Source is fixed 16.16 but we ignore mantissa for now... */
4690         scaling_info->src_rect.x = state->src_x >> 16;
4691         scaling_info->src_rect.y = state->src_y >> 16;
4692
4693         /*
4694          * For reasons we don't (yet) fully understand a non-zero
4695          * src_y coordinate into an NV12 buffer can cause a
4696          * system hang on DCN1x.
4697          * To avoid hangs (and maybe be overly cautious)
4698          * let's reject both non-zero src_x and src_y.
4699          *
4700          * We currently know of only one use-case to reproduce a
4701          * scenario with non-zero src_x and src_y for NV12, which
4702          * is to gesture the YouTube Android app into full screen
4703          * on ChromeOS.
4704          */
4705         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4706             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4707             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4708             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4709                 return -EINVAL;
4710
4711         scaling_info->src_rect.width = state->src_w >> 16;
4712         if (scaling_info->src_rect.width == 0)
4713                 return -EINVAL;
4714
4715         scaling_info->src_rect.height = state->src_h >> 16;
4716         if (scaling_info->src_rect.height == 0)
4717                 return -EINVAL;
4718
4719         scaling_info->dst_rect.x = state->crtc_x;
4720         scaling_info->dst_rect.y = state->crtc_y;
4721
4722         if (state->crtc_w == 0)
4723                 return -EINVAL;
4724
4725         scaling_info->dst_rect.width = state->crtc_w;
4726
4727         if (state->crtc_h == 0)
4728                 return -EINVAL;
4729
4730         scaling_info->dst_rect.height = state->crtc_h;
4731
4732         /* DRM doesn't specify clipping on destination output. */
4733         scaling_info->clip_rect = scaling_info->dst_rect;
4734
4735         /* Validate scaling per-format with DC plane caps */
4736         if (state->plane && state->plane->dev && state->fb) {
4737                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4738                                              &min_downscale, &max_upscale);
4739         } else {
4740                 min_downscale = 250;
4741                 max_upscale = 16000;
4742         }
4743
4744         scale_w = scaling_info->dst_rect.width * 1000 /
4745                   scaling_info->src_rect.width;
4746
4747         if (scale_w < min_downscale || scale_w > max_upscale)
4748                 return -EINVAL;
4749
4750         scale_h = scaling_info->dst_rect.height * 1000 /
4751                   scaling_info->src_rect.height;
4752
4753         if (scale_h < min_downscale || scale_h > max_upscale)
4754                 return -EINVAL;
4755
4756         /*
4757          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4758          * assume reasonable defaults based on the format.
4759          */
4760
4761         return 0;
4762 }
4763
4764 static void
4765 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4766                                  uint64_t tiling_flags)
4767 {
4768         /* Fill GFX8 params */
4769         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4770                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4771
4772                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4773                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4774                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4775                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4776                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4777
4778                 /* XXX fix me for VI */
4779                 tiling_info->gfx8.num_banks = num_banks;
4780                 tiling_info->gfx8.array_mode =
4781                                 DC_ARRAY_2D_TILED_THIN1;
4782                 tiling_info->gfx8.tile_split = tile_split;
4783                 tiling_info->gfx8.bank_width = bankw;
4784                 tiling_info->gfx8.bank_height = bankh;
4785                 tiling_info->gfx8.tile_aspect = mtaspect;
4786                 tiling_info->gfx8.tile_mode =
4787                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4788         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4789                         == DC_ARRAY_1D_TILED_THIN1) {
4790                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4791         }
4792
4793         tiling_info->gfx8.pipe_config =
4794                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4795 }
4796
4797 static void
4798 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4799                                   union dc_tiling_info *tiling_info)
4800 {
4801         tiling_info->gfx9.num_pipes =
4802                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4803         tiling_info->gfx9.num_banks =
4804                 adev->gfx.config.gb_addr_config_fields.num_banks;
4805         tiling_info->gfx9.pipe_interleave =
4806                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4807         tiling_info->gfx9.num_shader_engines =
4808                 adev->gfx.config.gb_addr_config_fields.num_se;
4809         tiling_info->gfx9.max_compressed_frags =
4810                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4811         tiling_info->gfx9.num_rb_per_se =
4812                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4813         tiling_info->gfx9.shaderEnable = 1;
4814         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4815                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4816 }
4817
4818 static int
4819 validate_dcc(struct amdgpu_device *adev,
4820              const enum surface_pixel_format format,
4821              const enum dc_rotation_angle rotation,
4822              const union dc_tiling_info *tiling_info,
4823              const struct dc_plane_dcc_param *dcc,
4824              const struct dc_plane_address *address,
4825              const struct plane_size *plane_size)
4826 {
4827         struct dc *dc = adev->dm.dc;
4828         struct dc_dcc_surface_param input;
4829         struct dc_surface_dcc_cap output;
4830
4831         memset(&input, 0, sizeof(input));
4832         memset(&output, 0, sizeof(output));
4833
4834         if (!dcc->enable)
4835                 return 0;
4836
4837         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4838             !dc->cap_funcs.get_dcc_compression_cap)
4839                 return -EINVAL;
4840
4841         input.format = format;
4842         input.surface_size.width = plane_size->surface_size.width;
4843         input.surface_size.height = plane_size->surface_size.height;
4844         input.swizzle_mode = tiling_info->gfx9.swizzle;
4845
4846         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4847                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4848         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4849                 input.scan = SCAN_DIRECTION_VERTICAL;
4850
4851         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4852                 return -EINVAL;
4853
4854         if (!output.capable)
4855                 return -EINVAL;
4856
4857         if (dcc->independent_64b_blks == 0 &&
4858             output.grph.rgb.independent_64b_blks != 0)
4859                 return -EINVAL;
4860
4861         return 0;
4862 }
4863
4864 static bool
4865 modifier_has_dcc(uint64_t modifier)
4866 {
4867         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4868 }
4869
4870 static unsigned
4871 modifier_gfx9_swizzle_mode(uint64_t modifier)
4872 {
4873         if (modifier == DRM_FORMAT_MOD_LINEAR)
4874                 return 0;
4875
4876         return AMD_FMT_MOD_GET(TILE, modifier);
4877 }
4878
4879 static const struct drm_format_info *
4880 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4881 {
4882         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4883 }
4884
4885 static void
4886 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4887                                     union dc_tiling_info *tiling_info,
4888                                     uint64_t modifier)
4889 {
4890         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4891         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4892         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4893         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4894
4895         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4896
4897         if (!IS_AMD_FMT_MOD(modifier))
4898                 return;
4899
4900         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4901         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4902
4903         if (adev->family >= AMDGPU_FAMILY_NV) {
4904                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4905         } else {
4906                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4907
4908                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4909         }
4910 }
4911
4912 enum dm_micro_swizzle {
4913         MICRO_SWIZZLE_Z = 0,
4914         MICRO_SWIZZLE_S = 1,
4915         MICRO_SWIZZLE_D = 2,
4916         MICRO_SWIZZLE_R = 3
4917 };
4918
4919 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4920                                           uint32_t format,
4921                                           uint64_t modifier)
4922 {
4923         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4924         const struct drm_format_info *info = drm_format_info(format);
4925         int i;
4926
4927         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4928
4929         if (!info)
4930                 return false;
4931
4932         /*
4933          * We always have to allow these modifiers:
4934          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4935          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4936          */
4937         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4938             modifier == DRM_FORMAT_MOD_INVALID) {
4939                 return true;
4940         }
4941
4942         /* Check that the modifier is on the list of the plane's supported modifiers. */
4943         for (i = 0; i < plane->modifier_count; i++) {
4944                 if (modifier == plane->modifiers[i])
4945                         break;
4946         }
4947         if (i == plane->modifier_count)
4948                 return false;
4949
4950         /*
4951          * For D swizzle the canonical modifier depends on the bpp, so check
4952          * it here.
4953          */
4954         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4955             adev->family >= AMDGPU_FAMILY_NV) {
4956                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4957                         return false;
4958         }
4959
4960         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4961             info->cpp[0] < 8)
4962                 return false;
4963
4964         if (modifier_has_dcc(modifier)) {
4965                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4966                 if (info->cpp[0] != 4)
4967                         return false;
4968                 /* We support multi-planar formats, but not when combined with
4969                  * additional DCC metadata planes. */
4970                 if (info->num_planes > 1)
4971                         return false;
4972         }
4973
4974         return true;
4975 }
4976
4977 static void
4978 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4979 {
4980         if (!*mods)
4981                 return;
4982
4983         if (*cap - *size < 1) {
4984                 uint64_t new_cap = *cap * 2;
4985                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4986
4987                 if (!new_mods) {
4988                         kfree(*mods);
4989                         *mods = NULL;
4990                         return;
4991                 }
4992
4993                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4994                 kfree(*mods);
4995                 *mods = new_mods;
4996                 *cap = new_cap;
4997         }
4998
4999         (*mods)[*size] = mod;
5000         *size += 1;
5001 }
5002
5003 static void
5004 add_gfx9_modifiers(const struct amdgpu_device *adev,
5005                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
5006 {
5007         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5008         int pipe_xor_bits = min(8, pipes +
5009                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5010         int bank_xor_bits = min(8 - pipe_xor_bits,
5011                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5012         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5013                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5014
5015
5016         if (adev->family == AMDGPU_FAMILY_RV) {
5017                 /* Raven2 and later */
5018                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5019
5020                 /*
5021                  * No _D DCC swizzles yet because we only allow 32bpp, which
5022                  * doesn't support _D on DCN
5023                  */
5024
5025                 if (has_constant_encode) {
5026                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5027                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5028                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5029                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5030                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5031                                     AMD_FMT_MOD_SET(DCC, 1) |
5032                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5033                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5034                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5035                 }
5036
5037                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5038                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5039                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5040                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5041                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5042                             AMD_FMT_MOD_SET(DCC, 1) |
5043                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5044                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5045                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5046
5047                 if (has_constant_encode) {
5048                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5049                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5050                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5051                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5052                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5053                                     AMD_FMT_MOD_SET(DCC, 1) |
5054                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5055                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5056                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5057
5058                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5059                                     AMD_FMT_MOD_SET(RB, rb) |
5060                                     AMD_FMT_MOD_SET(PIPE, pipes));
5061                 }
5062
5063                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5065                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5066                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5067                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5068                             AMD_FMT_MOD_SET(DCC, 1) |
5069                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5070                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5071                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5072                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5073                             AMD_FMT_MOD_SET(RB, rb) |
5074                             AMD_FMT_MOD_SET(PIPE, pipes));
5075         }
5076
5077         /*
5078          * Only supported for 64bpp on Raven, will be filtered on format in
5079          * dm_plane_format_mod_supported.
5080          */
5081         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5082                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5083                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5084                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5085                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5086
5087         if (adev->family == AMDGPU_FAMILY_RV) {
5088                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5089                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5090                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5091                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5092                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5093         }
5094
5095         /*
5096          * Only supported for 64bpp on Raven, will be filtered on format in
5097          * dm_plane_format_mod_supported.
5098          */
5099         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5100                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5101                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5102
5103         if (adev->family == AMDGPU_FAMILY_RV) {
5104                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5105                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5106                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5107         }
5108 }
5109
5110 static void
5111 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5112                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5113 {
5114         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5115
5116         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5117                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5118                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5119                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5120                     AMD_FMT_MOD_SET(DCC, 1) |
5121                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5122                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5123                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5124
5125         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5126                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5127                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5128                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5129                     AMD_FMT_MOD_SET(DCC, 1) |
5130                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5131                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5132                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5133                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5134
5135         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5137                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5138                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5139
5140         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5141                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5142                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5143                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5144
5145
5146         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5147         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5149                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5150
5151         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5152                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5153                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5154 }
5155
5156 static void
5157 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5158                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5159 {
5160         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5161         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5162
5163         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5164                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5165                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5166                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5167                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5168                     AMD_FMT_MOD_SET(DCC, 1) |
5169                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5170                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5171                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5172                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5173
5174         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5175                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5176                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5177                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5178                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5179                     AMD_FMT_MOD_SET(DCC, 1) |
5180                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5181                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5182                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5183
5184         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5185                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5186                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5187                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5188                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5189                     AMD_FMT_MOD_SET(DCC, 1) |
5190                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5191                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5192                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5193                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5194                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5195
5196         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5197                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5198                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5199                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5200                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5201                     AMD_FMT_MOD_SET(DCC, 1) |
5202                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5203                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5204                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5205                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5206
5207         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5208                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5209                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5210                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5211                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5212
5213         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5214                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5215                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5216                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5217                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5218
5219         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5220         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5221                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5222                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5223
5224         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5225                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5226                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5227 }
5228
5229 static int
5230 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5231 {
5232         uint64_t size = 0, capacity = 128;
5233         *mods = NULL;
5234
5235         /* We have not hooked up any pre-GFX9 modifiers. */
5236         if (adev->family < AMDGPU_FAMILY_AI)
5237                 return 0;
5238
5239         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5240
5241         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5242                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5243                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5244                 return *mods ? 0 : -ENOMEM;
5245         }
5246
5247         switch (adev->family) {
5248         case AMDGPU_FAMILY_AI:
5249         case AMDGPU_FAMILY_RV:
5250                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5251                 break;
5252         case AMDGPU_FAMILY_NV:
5253         case AMDGPU_FAMILY_VGH:
5254         case AMDGPU_FAMILY_YC:
5255         case AMDGPU_FAMILY_GC_10_3_6:
5256         case AMDGPU_FAMILY_GC_10_3_7:
5257                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5258                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5259                 else
5260                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5261                 break;
5262         }
5263
5264         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5265
5266         /* INVALID marks the end of the list. */
5267         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5268
5269         if (!*mods)
5270                 return -ENOMEM;
5271
5272         return 0;
5273 }
5274
5275 static int
5276 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5277                                           const struct amdgpu_framebuffer *afb,
5278                                           const enum surface_pixel_format format,
5279                                           const enum dc_rotation_angle rotation,
5280                                           const struct plane_size *plane_size,
5281                                           union dc_tiling_info *tiling_info,
5282                                           struct dc_plane_dcc_param *dcc,
5283                                           struct dc_plane_address *address,
5284                                           const bool force_disable_dcc)
5285 {
5286         const uint64_t modifier = afb->base.modifier;
5287         int ret = 0;
5288
5289         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5290         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5291
5292         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5293                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5294                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5295                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5296
5297                 dcc->enable = 1;
5298                 dcc->meta_pitch = afb->base.pitches[1];
5299                 dcc->independent_64b_blks = independent_64b_blks;
5300                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5301                         if (independent_64b_blks && independent_128b_blks)
5302                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5303                         else if (independent_128b_blks)
5304                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5305                         else if (independent_64b_blks && !independent_128b_blks)
5306                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5307                         else
5308                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5309                 } else {
5310                         if (independent_64b_blks)
5311                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5312                         else
5313                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5314                 }
5315
5316                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5317                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5318         }
5319
5320         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5321         if (ret)
5322                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5323
5324         return ret;
5325 }
5326
5327 static int
5328 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5329                              const struct amdgpu_framebuffer *afb,
5330                              const enum surface_pixel_format format,
5331                              const enum dc_rotation_angle rotation,
5332                              const uint64_t tiling_flags,
5333                              union dc_tiling_info *tiling_info,
5334                              struct plane_size *plane_size,
5335                              struct dc_plane_dcc_param *dcc,
5336                              struct dc_plane_address *address,
5337                              bool tmz_surface,
5338                              bool force_disable_dcc)
5339 {
5340         const struct drm_framebuffer *fb = &afb->base;
5341         int ret;
5342
5343         memset(tiling_info, 0, sizeof(*tiling_info));
5344         memset(plane_size, 0, sizeof(*plane_size));
5345         memset(dcc, 0, sizeof(*dcc));
5346         memset(address, 0, sizeof(*address));
5347
5348         address->tmz_surface = tmz_surface;
5349
5350         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5351                 uint64_t addr = afb->address + fb->offsets[0];
5352
5353                 plane_size->surface_size.x = 0;
5354                 plane_size->surface_size.y = 0;
5355                 plane_size->surface_size.width = fb->width;
5356                 plane_size->surface_size.height = fb->height;
5357                 plane_size->surface_pitch =
5358                         fb->pitches[0] / fb->format->cpp[0];
5359
5360                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5361                 address->grph.addr.low_part = lower_32_bits(addr);
5362                 address->grph.addr.high_part = upper_32_bits(addr);
5363         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5364                 uint64_t luma_addr = afb->address + fb->offsets[0];
5365                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5366
5367                 plane_size->surface_size.x = 0;
5368                 plane_size->surface_size.y = 0;
5369                 plane_size->surface_size.width = fb->width;
5370                 plane_size->surface_size.height = fb->height;
5371                 plane_size->surface_pitch =
5372                         fb->pitches[0] / fb->format->cpp[0];
5373
5374                 plane_size->chroma_size.x = 0;
5375                 plane_size->chroma_size.y = 0;
5376                 /* TODO: set these based on surface format */
5377                 plane_size->chroma_size.width = fb->width / 2;
5378                 plane_size->chroma_size.height = fb->height / 2;
5379
5380                 plane_size->chroma_pitch =
5381                         fb->pitches[1] / fb->format->cpp[1];
5382
5383                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5384                 address->video_progressive.luma_addr.low_part =
5385                         lower_32_bits(luma_addr);
5386                 address->video_progressive.luma_addr.high_part =
5387                         upper_32_bits(luma_addr);
5388                 address->video_progressive.chroma_addr.low_part =
5389                         lower_32_bits(chroma_addr);
5390                 address->video_progressive.chroma_addr.high_part =
5391                         upper_32_bits(chroma_addr);
5392         }
5393
5394         if (adev->family >= AMDGPU_FAMILY_AI) {
5395                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5396                                                                 rotation, plane_size,
5397                                                                 tiling_info, dcc,
5398                                                                 address,
5399                                                                 force_disable_dcc);
5400                 if (ret)
5401                         return ret;
5402         } else {
5403                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5404         }
5405
5406         return 0;
5407 }
5408
5409 static void
5410 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5411                                bool *per_pixel_alpha, bool *global_alpha,
5412                                int *global_alpha_value)
5413 {
5414         *per_pixel_alpha = false;
5415         *global_alpha = false;
5416         *global_alpha_value = 0xff;
5417
5418         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5419                 return;
5420
5421         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5422                 static const uint32_t alpha_formats[] = {
5423                         DRM_FORMAT_ARGB8888,
5424                         DRM_FORMAT_RGBA8888,
5425                         DRM_FORMAT_ABGR8888,
5426                 };
5427                 uint32_t format = plane_state->fb->format->format;
5428                 unsigned int i;
5429
5430                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5431                         if (format == alpha_formats[i]) {
5432                                 *per_pixel_alpha = true;
5433                                 break;
5434                         }
5435                 }
5436         }
5437
5438         if (plane_state->alpha < 0xffff) {
5439                 *global_alpha = true;
5440                 *global_alpha_value = plane_state->alpha >> 8;
5441         }
5442 }
5443
5444 static int
5445 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5446                             const enum surface_pixel_format format,
5447                             enum dc_color_space *color_space)
5448 {
5449         bool full_range;
5450
5451         *color_space = COLOR_SPACE_SRGB;
5452
5453         /* DRM color properties only affect non-RGB formats. */
5454         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5455                 return 0;
5456
5457         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5458
5459         switch (plane_state->color_encoding) {
5460         case DRM_COLOR_YCBCR_BT601:
5461                 if (full_range)
5462                         *color_space = COLOR_SPACE_YCBCR601;
5463                 else
5464                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5465                 break;
5466
5467         case DRM_COLOR_YCBCR_BT709:
5468                 if (full_range)
5469                         *color_space = COLOR_SPACE_YCBCR709;
5470                 else
5471                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5472                 break;
5473
5474         case DRM_COLOR_YCBCR_BT2020:
5475                 if (full_range)
5476                         *color_space = COLOR_SPACE_2020_YCBCR;
5477                 else
5478                         return -EINVAL;
5479                 break;
5480
5481         default:
5482                 return -EINVAL;
5483         }
5484
5485         return 0;
5486 }
5487
5488 static int
5489 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5490                             const struct drm_plane_state *plane_state,
5491                             const uint64_t tiling_flags,
5492                             struct dc_plane_info *plane_info,
5493                             struct dc_plane_address *address,
5494                             bool tmz_surface,
5495                             bool force_disable_dcc)
5496 {
5497         const struct drm_framebuffer *fb = plane_state->fb;
5498         const struct amdgpu_framebuffer *afb =
5499                 to_amdgpu_framebuffer(plane_state->fb);
5500         int ret;
5501
5502         memset(plane_info, 0, sizeof(*plane_info));
5503
5504         switch (fb->format->format) {
5505         case DRM_FORMAT_C8:
5506                 plane_info->format =
5507                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5508                 break;
5509         case DRM_FORMAT_RGB565:
5510                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5511                 break;
5512         case DRM_FORMAT_XRGB8888:
5513         case DRM_FORMAT_ARGB8888:
5514                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5515                 break;
5516         case DRM_FORMAT_XRGB2101010:
5517         case DRM_FORMAT_ARGB2101010:
5518                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5519                 break;
5520         case DRM_FORMAT_XBGR2101010:
5521         case DRM_FORMAT_ABGR2101010:
5522                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5523                 break;
5524         case DRM_FORMAT_XBGR8888:
5525         case DRM_FORMAT_ABGR8888:
5526                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5527                 break;
5528         case DRM_FORMAT_NV21:
5529                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5530                 break;
5531         case DRM_FORMAT_NV12:
5532                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5533                 break;
5534         case DRM_FORMAT_P010:
5535                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5536                 break;
5537         case DRM_FORMAT_XRGB16161616F:
5538         case DRM_FORMAT_ARGB16161616F:
5539                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5540                 break;
5541         case DRM_FORMAT_XBGR16161616F:
5542         case DRM_FORMAT_ABGR16161616F:
5543                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5544                 break;
5545         case DRM_FORMAT_XRGB16161616:
5546         case DRM_FORMAT_ARGB16161616:
5547                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5548                 break;
5549         case DRM_FORMAT_XBGR16161616:
5550         case DRM_FORMAT_ABGR16161616:
5551                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5552                 break;
5553         default:
5554                 DRM_ERROR(
5555                         "Unsupported screen format %p4cc\n",
5556                         &fb->format->format);
5557                 return -EINVAL;
5558         }
5559
5560         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5561         case DRM_MODE_ROTATE_0:
5562                 plane_info->rotation = ROTATION_ANGLE_0;
5563                 break;
5564         case DRM_MODE_ROTATE_90:
5565                 plane_info->rotation = ROTATION_ANGLE_90;
5566                 break;
5567         case DRM_MODE_ROTATE_180:
5568                 plane_info->rotation = ROTATION_ANGLE_180;
5569                 break;
5570         case DRM_MODE_ROTATE_270:
5571                 plane_info->rotation = ROTATION_ANGLE_270;
5572                 break;
5573         default:
5574                 plane_info->rotation = ROTATION_ANGLE_0;
5575                 break;
5576         }
5577
5578         plane_info->visible = true;
5579         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5580
5581         plane_info->layer_index = 0;
5582
5583         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5584                                           &plane_info->color_space);
5585         if (ret)
5586                 return ret;
5587
5588         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5589                                            plane_info->rotation, tiling_flags,
5590                                            &plane_info->tiling_info,
5591                                            &plane_info->plane_size,
5592                                            &plane_info->dcc, address, tmz_surface,
5593                                            force_disable_dcc);
5594         if (ret)
5595                 return ret;
5596
5597         fill_blending_from_plane_state(
5598                 plane_state, &plane_info->per_pixel_alpha,
5599                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5600
5601         return 0;
5602 }
5603
5604 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5605                                     struct dc_plane_state *dc_plane_state,
5606                                     struct drm_plane_state *plane_state,
5607                                     struct drm_crtc_state *crtc_state)
5608 {
5609         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5610         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5611         struct dc_scaling_info scaling_info;
5612         struct dc_plane_info plane_info;
5613         int ret;
5614         bool force_disable_dcc = false;
5615
5616         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5617         if (ret)
5618                 return ret;
5619
5620         dc_plane_state->src_rect = scaling_info.src_rect;
5621         dc_plane_state->dst_rect = scaling_info.dst_rect;
5622         dc_plane_state->clip_rect = scaling_info.clip_rect;
5623         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5624
5625         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5626         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5627                                           afb->tiling_flags,
5628                                           &plane_info,
5629                                           &dc_plane_state->address,
5630                                           afb->tmz_surface,
5631                                           force_disable_dcc);
5632         if (ret)
5633                 return ret;
5634
5635         dc_plane_state->format = plane_info.format;
5636         dc_plane_state->color_space = plane_info.color_space;
5637         dc_plane_state->format = plane_info.format;
5638         dc_plane_state->plane_size = plane_info.plane_size;
5639         dc_plane_state->rotation = plane_info.rotation;
5640         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5641         dc_plane_state->stereo_format = plane_info.stereo_format;
5642         dc_plane_state->tiling_info = plane_info.tiling_info;
5643         dc_plane_state->visible = plane_info.visible;
5644         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5645         dc_plane_state->global_alpha = plane_info.global_alpha;
5646         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5647         dc_plane_state->dcc = plane_info.dcc;
5648         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5649         dc_plane_state->flip_int_enabled = true;
5650
5651         /*
5652          * Always set input transfer function, since plane state is refreshed
5653          * every time.
5654          */
5655         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5656         if (ret)
5657                 return ret;
5658
5659         return 0;
5660 }
5661
5662 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5663                                            const struct dm_connector_state *dm_state,
5664                                            struct dc_stream_state *stream)
5665 {
5666         enum amdgpu_rmx_type rmx_type;
5667
5668         struct rect src = { 0 }; /* viewport in composition space*/
5669         struct rect dst = { 0 }; /* stream addressable area */
5670
5671         /* no mode. nothing to be done */
5672         if (!mode)
5673                 return;
5674
5675         /* Full screen scaling by default */
5676         src.width = mode->hdisplay;
5677         src.height = mode->vdisplay;
5678         dst.width = stream->timing.h_addressable;
5679         dst.height = stream->timing.v_addressable;
5680
5681         if (dm_state) {
5682                 rmx_type = dm_state->scaling;
5683                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5684                         if (src.width * dst.height <
5685                                         src.height * dst.width) {
5686                                 /* height needs less upscaling/more downscaling */
5687                                 dst.width = src.width *
5688                                                 dst.height / src.height;
5689                         } else {
5690                                 /* width needs less upscaling/more downscaling */
5691                                 dst.height = src.height *
5692                                                 dst.width / src.width;
5693                         }
5694                 } else if (rmx_type == RMX_CENTER) {
5695                         dst = src;
5696                 }
5697
5698                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5699                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5700
5701                 if (dm_state->underscan_enable) {
5702                         dst.x += dm_state->underscan_hborder / 2;
5703                         dst.y += dm_state->underscan_vborder / 2;
5704                         dst.width -= dm_state->underscan_hborder;
5705                         dst.height -= dm_state->underscan_vborder;
5706                 }
5707         }
5708
5709         stream->src = src;
5710         stream->dst = dst;
5711
5712         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5713                       dst.x, dst.y, dst.width, dst.height);
5714
5715 }
5716
5717 static enum dc_color_depth
5718 convert_color_depth_from_display_info(const struct drm_connector *connector,
5719                                       bool is_y420, int requested_bpc)
5720 {
5721         uint8_t bpc;
5722
5723         if (is_y420) {
5724                 bpc = 8;
5725
5726                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5727                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5728                         bpc = 16;
5729                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5730                         bpc = 12;
5731                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5732                         bpc = 10;
5733         } else {
5734                 bpc = (uint8_t)connector->display_info.bpc;
5735                 /* Assume 8 bpc by default if no bpc is specified. */
5736                 bpc = bpc ? bpc : 8;
5737         }
5738
5739         if (requested_bpc > 0) {
5740                 /*
5741                  * Cap display bpc based on the user requested value.
5742                  *
5743                  * The value for state->max_bpc may not correctly updated
5744                  * depending on when the connector gets added to the state
5745                  * or if this was called outside of atomic check, so it
5746                  * can't be used directly.
5747                  */
5748                 bpc = min_t(u8, bpc, requested_bpc);
5749
5750                 /* Round down to the nearest even number. */
5751                 bpc = bpc - (bpc & 1);
5752         }
5753
5754         switch (bpc) {
5755         case 0:
5756                 /*
5757                  * Temporary Work around, DRM doesn't parse color depth for
5758                  * EDID revision before 1.4
5759                  * TODO: Fix edid parsing
5760                  */
5761                 return COLOR_DEPTH_888;
5762         case 6:
5763                 return COLOR_DEPTH_666;
5764         case 8:
5765                 return COLOR_DEPTH_888;
5766         case 10:
5767                 return COLOR_DEPTH_101010;
5768         case 12:
5769                 return COLOR_DEPTH_121212;
5770         case 14:
5771                 return COLOR_DEPTH_141414;
5772         case 16:
5773                 return COLOR_DEPTH_161616;
5774         default:
5775                 return COLOR_DEPTH_UNDEFINED;
5776         }
5777 }
5778
5779 static enum dc_aspect_ratio
5780 get_aspect_ratio(const struct drm_display_mode *mode_in)
5781 {
5782         /* 1-1 mapping, since both enums follow the HDMI spec. */
5783         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5784 }
5785
5786 static enum dc_color_space
5787 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5788 {
5789         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5790
5791         switch (dc_crtc_timing->pixel_encoding) {
5792         case PIXEL_ENCODING_YCBCR422:
5793         case PIXEL_ENCODING_YCBCR444:
5794         case PIXEL_ENCODING_YCBCR420:
5795         {
5796                 /*
5797                  * 27030khz is the separation point between HDTV and SDTV
5798                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5799                  * respectively
5800                  */
5801                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5802                         if (dc_crtc_timing->flags.Y_ONLY)
5803                                 color_space =
5804                                         COLOR_SPACE_YCBCR709_LIMITED;
5805                         else
5806                                 color_space = COLOR_SPACE_YCBCR709;
5807                 } else {
5808                         if (dc_crtc_timing->flags.Y_ONLY)
5809                                 color_space =
5810                                         COLOR_SPACE_YCBCR601_LIMITED;
5811                         else
5812                                 color_space = COLOR_SPACE_YCBCR601;
5813                 }
5814
5815         }
5816         break;
5817         case PIXEL_ENCODING_RGB:
5818                 color_space = COLOR_SPACE_SRGB;
5819                 break;
5820
5821         default:
5822                 WARN_ON(1);
5823                 break;
5824         }
5825
5826         return color_space;
5827 }
5828
5829 static bool adjust_colour_depth_from_display_info(
5830         struct dc_crtc_timing *timing_out,
5831         const struct drm_display_info *info)
5832 {
5833         enum dc_color_depth depth = timing_out->display_color_depth;
5834         int normalized_clk;
5835         do {
5836                 normalized_clk = timing_out->pix_clk_100hz / 10;
5837                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5838                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5839                         normalized_clk /= 2;
5840                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5841                 switch (depth) {
5842                 case COLOR_DEPTH_888:
5843                         break;
5844                 case COLOR_DEPTH_101010:
5845                         normalized_clk = (normalized_clk * 30) / 24;
5846                         break;
5847                 case COLOR_DEPTH_121212:
5848                         normalized_clk = (normalized_clk * 36) / 24;
5849                         break;
5850                 case COLOR_DEPTH_161616:
5851                         normalized_clk = (normalized_clk * 48) / 24;
5852                         break;
5853                 default:
5854                         /* The above depths are the only ones valid for HDMI. */
5855                         return false;
5856                 }
5857                 if (normalized_clk <= info->max_tmds_clock) {
5858                         timing_out->display_color_depth = depth;
5859                         return true;
5860                 }
5861         } while (--depth > COLOR_DEPTH_666);
5862         return false;
5863 }
5864
5865 static void fill_stream_properties_from_drm_display_mode(
5866         struct dc_stream_state *stream,
5867         const struct drm_display_mode *mode_in,
5868         const struct drm_connector *connector,
5869         const struct drm_connector_state *connector_state,
5870         const struct dc_stream_state *old_stream,
5871         int requested_bpc)
5872 {
5873         struct dc_crtc_timing *timing_out = &stream->timing;
5874         const struct drm_display_info *info = &connector->display_info;
5875         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5876         struct hdmi_vendor_infoframe hv_frame;
5877         struct hdmi_avi_infoframe avi_frame;
5878
5879         memset(&hv_frame, 0, sizeof(hv_frame));
5880         memset(&avi_frame, 0, sizeof(avi_frame));
5881
5882         timing_out->h_border_left = 0;
5883         timing_out->h_border_right = 0;
5884         timing_out->v_border_top = 0;
5885         timing_out->v_border_bottom = 0;
5886         /* TODO: un-hardcode */
5887         if (drm_mode_is_420_only(info, mode_in)
5888                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5889                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5890         else if (drm_mode_is_420_also(info, mode_in)
5891                         && aconnector->force_yuv420_output)
5892                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5893         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5894                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5895                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5896         else
5897                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5898
5899         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5900         timing_out->display_color_depth = convert_color_depth_from_display_info(
5901                 connector,
5902                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5903                 requested_bpc);
5904         timing_out->scan_type = SCANNING_TYPE_NODATA;
5905         timing_out->hdmi_vic = 0;
5906
5907         if(old_stream) {
5908                 timing_out->vic = old_stream->timing.vic;
5909                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5910                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5911         } else {
5912                 timing_out->vic = drm_match_cea_mode(mode_in);
5913                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5914                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5915                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5916                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5917         }
5918
5919         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5920                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5921                 timing_out->vic = avi_frame.video_code;
5922                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5923                 timing_out->hdmi_vic = hv_frame.vic;
5924         }
5925
5926         if (is_freesync_video_mode(mode_in, aconnector)) {
5927                 timing_out->h_addressable = mode_in->hdisplay;
5928                 timing_out->h_total = mode_in->htotal;
5929                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5930                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5931                 timing_out->v_total = mode_in->vtotal;
5932                 timing_out->v_addressable = mode_in->vdisplay;
5933                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5934                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5935                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5936         } else {
5937                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5938                 timing_out->h_total = mode_in->crtc_htotal;
5939                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5940                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5941                 timing_out->v_total = mode_in->crtc_vtotal;
5942                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5943                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5944                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5945                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5946         }
5947
5948         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5949
5950         stream->output_color_space = get_output_color_space(timing_out);
5951
5952         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5953         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5954         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5955                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5956                     drm_mode_is_420_also(info, mode_in) &&
5957                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5958                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5959                         adjust_colour_depth_from_display_info(timing_out, info);
5960                 }
5961         }
5962 }
5963
5964 static void fill_audio_info(struct audio_info *audio_info,
5965                             const struct drm_connector *drm_connector,
5966                             const struct dc_sink *dc_sink)
5967 {
5968         int i = 0;
5969         int cea_revision = 0;
5970         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5971
5972         audio_info->manufacture_id = edid_caps->manufacturer_id;
5973         audio_info->product_id = edid_caps->product_id;
5974
5975         cea_revision = drm_connector->display_info.cea_rev;
5976
5977         strscpy(audio_info->display_name,
5978                 edid_caps->display_name,
5979                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5980
5981         if (cea_revision >= 3) {
5982                 audio_info->mode_count = edid_caps->audio_mode_count;
5983
5984                 for (i = 0; i < audio_info->mode_count; ++i) {
5985                         audio_info->modes[i].format_code =
5986                                         (enum audio_format_code)
5987                                         (edid_caps->audio_modes[i].format_code);
5988                         audio_info->modes[i].channel_count =
5989                                         edid_caps->audio_modes[i].channel_count;
5990                         audio_info->modes[i].sample_rates.all =
5991                                         edid_caps->audio_modes[i].sample_rate;
5992                         audio_info->modes[i].sample_size =
5993                                         edid_caps->audio_modes[i].sample_size;
5994                 }
5995         }
5996
5997         audio_info->flags.all = edid_caps->speaker_flags;
5998
5999         /* TODO: We only check for the progressive mode, check for interlace mode too */
6000         if (drm_connector->latency_present[0]) {
6001                 audio_info->video_latency = drm_connector->video_latency[0];
6002                 audio_info->audio_latency = drm_connector->audio_latency[0];
6003         }
6004
6005         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6006
6007 }
6008
6009 static void
6010 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6011                                       struct drm_display_mode *dst_mode)
6012 {
6013         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6014         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6015         dst_mode->crtc_clock = src_mode->crtc_clock;
6016         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6017         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6018         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6019         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6020         dst_mode->crtc_htotal = src_mode->crtc_htotal;
6021         dst_mode->crtc_hskew = src_mode->crtc_hskew;
6022         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6023         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6024         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6025         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6026         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6027 }
6028
6029 static void
6030 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6031                                         const struct drm_display_mode *native_mode,
6032                                         bool scale_enabled)
6033 {
6034         if (scale_enabled) {
6035                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6036         } else if (native_mode->clock == drm_mode->clock &&
6037                         native_mode->htotal == drm_mode->htotal &&
6038                         native_mode->vtotal == drm_mode->vtotal) {
6039                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6040         } else {
6041                 /* no scaling nor amdgpu inserted, no need to patch */
6042         }
6043 }
6044
6045 static struct dc_sink *
6046 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6047 {
6048         struct dc_sink_init_data sink_init_data = { 0 };
6049         struct dc_sink *sink = NULL;
6050         sink_init_data.link = aconnector->dc_link;
6051         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6052
6053         sink = dc_sink_create(&sink_init_data);
6054         if (!sink) {
6055                 DRM_ERROR("Failed to create sink!\n");
6056                 return NULL;
6057         }
6058         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6059
6060         return sink;
6061 }
6062
6063 static void set_multisync_trigger_params(
6064                 struct dc_stream_state *stream)
6065 {
6066         struct dc_stream_state *master = NULL;
6067
6068         if (stream->triggered_crtc_reset.enabled) {
6069                 master = stream->triggered_crtc_reset.event_source;
6070                 stream->triggered_crtc_reset.event =
6071                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6072                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6073                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6074         }
6075 }
6076
6077 static void set_master_stream(struct dc_stream_state *stream_set[],
6078                               int stream_count)
6079 {
6080         int j, highest_rfr = 0, master_stream = 0;
6081
6082         for (j = 0;  j < stream_count; j++) {
6083                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6084                         int refresh_rate = 0;
6085
6086                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6087                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6088                         if (refresh_rate > highest_rfr) {
6089                                 highest_rfr = refresh_rate;
6090                                 master_stream = j;
6091                         }
6092                 }
6093         }
6094         for (j = 0;  j < stream_count; j++) {
6095                 if (stream_set[j])
6096                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6097         }
6098 }
6099
6100 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6101 {
6102         int i = 0;
6103         struct dc_stream_state *stream;
6104
6105         if (context->stream_count < 2)
6106                 return;
6107         for (i = 0; i < context->stream_count ; i++) {
6108                 if (!context->streams[i])
6109                         continue;
6110                 /*
6111                  * TODO: add a function to read AMD VSDB bits and set
6112                  * crtc_sync_master.multi_sync_enabled flag
6113                  * For now it's set to false
6114                  */
6115         }
6116
6117         set_master_stream(context->streams, context->stream_count);
6118
6119         for (i = 0; i < context->stream_count ; i++) {
6120                 stream = context->streams[i];
6121
6122                 if (!stream)
6123                         continue;
6124
6125                 set_multisync_trigger_params(stream);
6126         }
6127 }
6128
6129 #if defined(CONFIG_DRM_AMD_DC_DCN)
6130 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6131                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6132                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6133 {
6134         stream->timing.flags.DSC = 0;
6135         dsc_caps->is_dsc_supported = false;
6136
6137         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6138                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6139                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6140                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6141                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6142                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6143                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6144                                 dsc_caps);
6145         }
6146 }
6147
6148 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6149                                     struct dc_sink *sink, struct dc_stream_state *stream,
6150                                     struct dsc_dec_dpcd_caps *dsc_caps,
6151                                     uint32_t max_dsc_target_bpp_limit_override)
6152 {
6153         const struct dc_link_settings *verified_link_cap = NULL;
6154         uint32_t link_bw_in_kbps;
6155         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6156         struct dc *dc = sink->ctx->dc;
6157         struct dc_dsc_bw_range bw_range = {0};
6158         struct dc_dsc_config dsc_cfg = {0};
6159
6160         verified_link_cap = dc_link_get_link_cap(stream->link);
6161         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6162         edp_min_bpp_x16 = 8 * 16;
6163         edp_max_bpp_x16 = 8 * 16;
6164
6165         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6166                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6167
6168         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6169                 edp_min_bpp_x16 = edp_max_bpp_x16;
6170
6171         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6172                                 dc->debug.dsc_min_slice_height_override,
6173                                 edp_min_bpp_x16, edp_max_bpp_x16,
6174                                 dsc_caps,
6175                                 &stream->timing,
6176                                 &bw_range)) {
6177
6178                 if (bw_range.max_kbps < link_bw_in_kbps) {
6179                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6180                                         dsc_caps,
6181                                         dc->debug.dsc_min_slice_height_override,
6182                                         max_dsc_target_bpp_limit_override,
6183                                         0,
6184                                         &stream->timing,
6185                                         &dsc_cfg)) {
6186                                 stream->timing.dsc_cfg = dsc_cfg;
6187                                 stream->timing.flags.DSC = 1;
6188                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6189                         }
6190                         return;
6191                 }
6192         }
6193
6194         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6195                                 dsc_caps,
6196                                 dc->debug.dsc_min_slice_height_override,
6197                                 max_dsc_target_bpp_limit_override,
6198                                 link_bw_in_kbps,
6199                                 &stream->timing,
6200                                 &dsc_cfg)) {
6201                 stream->timing.dsc_cfg = dsc_cfg;
6202                 stream->timing.flags.DSC = 1;
6203         }
6204 }
6205
6206 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6207                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6208                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6209 {
6210         struct drm_connector *drm_connector = &aconnector->base;
6211         uint32_t link_bandwidth_kbps;
6212         uint32_t max_dsc_target_bpp_limit_override = 0;
6213         struct dc *dc = sink->ctx->dc;
6214         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6215         uint32_t dsc_max_supported_bw_in_kbps;
6216
6217         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6218                                                         dc_link_get_link_cap(aconnector->dc_link));
6219
6220         if (stream->link && stream->link->local_sink)
6221                 max_dsc_target_bpp_limit_override =
6222                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6223
6224         /* Set DSC policy according to dsc_clock_en */
6225         dc_dsc_policy_set_enable_dsc_when_not_needed(
6226                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6227
6228         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6229             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6230
6231                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6232
6233         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6234                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6235                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6236                                                 dsc_caps,
6237                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6238                                                 max_dsc_target_bpp_limit_override,
6239                                                 link_bandwidth_kbps,
6240                                                 &stream->timing,
6241                                                 &stream->timing.dsc_cfg)) {
6242                                 stream->timing.flags.DSC = 1;
6243                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6244                                                                  __func__, drm_connector->name);
6245                         }
6246                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6247                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6248                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6249                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6250
6251                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6252                                         max_supported_bw_in_kbps > 0 &&
6253                                         dsc_max_supported_bw_in_kbps > 0)
6254                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6255                                                 dsc_caps,
6256                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6257                                                 max_dsc_target_bpp_limit_override,
6258                                                 dsc_max_supported_bw_in_kbps,
6259                                                 &stream->timing,
6260                                                 &stream->timing.dsc_cfg)) {
6261                                         stream->timing.flags.DSC = 1;
6262                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6263                                                                          __func__, drm_connector->name);
6264                                 }
6265                 }
6266         }
6267
6268         /* Overwrite the stream flag if DSC is enabled through debugfs */
6269         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6270                 stream->timing.flags.DSC = 1;
6271
6272         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6273                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6274
6275         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6276                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6277
6278         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6279                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6280 }
6281 #endif /* CONFIG_DRM_AMD_DC_DCN */
6282
6283 /**
6284  * DOC: FreeSync Video
6285  *
6286  * When a userspace application wants to play a video, the content follows a
6287  * standard format definition that usually specifies the FPS for that format.
6288  * The below list illustrates some video format and the expected FPS,
6289  * respectively:
6290  *
6291  * - TV/NTSC (23.976 FPS)
6292  * - Cinema (24 FPS)
6293  * - TV/PAL (25 FPS)
6294  * - TV/NTSC (29.97 FPS)
6295  * - TV/NTSC (30 FPS)
6296  * - Cinema HFR (48 FPS)
6297  * - TV/PAL (50 FPS)
6298  * - Commonly used (60 FPS)
6299  * - Multiples of 24 (48,72,96,120 FPS)
6300  *
6301  * The list of standards video format is not huge and can be added to the
6302  * connector modeset list beforehand. With that, userspace can leverage
6303  * FreeSync to extends the front porch in order to attain the target refresh
6304  * rate. Such a switch will happen seamlessly, without screen blanking or
6305  * reprogramming of the output in any other way. If the userspace requests a
6306  * modesetting change compatible with FreeSync modes that only differ in the
6307  * refresh rate, DC will skip the full update and avoid blink during the
6308  * transition. For example, the video player can change the modesetting from
6309  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6310  * causing any display blink. This same concept can be applied to a mode
6311  * setting change.
6312  */
6313 static struct drm_display_mode *
6314 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6315                           bool use_probed_modes)
6316 {
6317         struct drm_display_mode *m, *m_pref = NULL;
6318         u16 current_refresh, highest_refresh;
6319         struct list_head *list_head = use_probed_modes ?
6320                                                     &aconnector->base.probed_modes :
6321                                                     &aconnector->base.modes;
6322
6323         if (aconnector->freesync_vid_base.clock != 0)
6324                 return &aconnector->freesync_vid_base;
6325
6326         /* Find the preferred mode */
6327         list_for_each_entry (m, list_head, head) {
6328                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6329                         m_pref = m;
6330                         break;
6331                 }
6332         }
6333
6334         if (!m_pref) {
6335                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6336                 m_pref = list_first_entry_or_null(
6337                         &aconnector->base.modes, struct drm_display_mode, head);
6338                 if (!m_pref) {
6339                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6340                         return NULL;
6341                 }
6342         }
6343
6344         highest_refresh = drm_mode_vrefresh(m_pref);
6345
6346         /*
6347          * Find the mode with highest refresh rate with same resolution.
6348          * For some monitors, preferred mode is not the mode with highest
6349          * supported refresh rate.
6350          */
6351         list_for_each_entry (m, list_head, head) {
6352                 current_refresh  = drm_mode_vrefresh(m);
6353
6354                 if (m->hdisplay == m_pref->hdisplay &&
6355                     m->vdisplay == m_pref->vdisplay &&
6356                     highest_refresh < current_refresh) {
6357                         highest_refresh = current_refresh;
6358                         m_pref = m;
6359                 }
6360         }
6361
6362         aconnector->freesync_vid_base = *m_pref;
6363         return m_pref;
6364 }
6365
6366 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6367                                    struct amdgpu_dm_connector *aconnector)
6368 {
6369         struct drm_display_mode *high_mode;
6370         int timing_diff;
6371
6372         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6373         if (!high_mode || !mode)
6374                 return false;
6375
6376         timing_diff = high_mode->vtotal - mode->vtotal;
6377
6378         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6379             high_mode->hdisplay != mode->hdisplay ||
6380             high_mode->vdisplay != mode->vdisplay ||
6381             high_mode->hsync_start != mode->hsync_start ||
6382             high_mode->hsync_end != mode->hsync_end ||
6383             high_mode->htotal != mode->htotal ||
6384             high_mode->hskew != mode->hskew ||
6385             high_mode->vscan != mode->vscan ||
6386             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6387             high_mode->vsync_end - mode->vsync_end != timing_diff)
6388                 return false;
6389         else
6390                 return true;
6391 }
6392
6393 static struct dc_stream_state *
6394 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6395                        const struct drm_display_mode *drm_mode,
6396                        const struct dm_connector_state *dm_state,
6397                        const struct dc_stream_state *old_stream,
6398                        int requested_bpc)
6399 {
6400         struct drm_display_mode *preferred_mode = NULL;
6401         struct drm_connector *drm_connector;
6402         const struct drm_connector_state *con_state =
6403                 dm_state ? &dm_state->base : NULL;
6404         struct dc_stream_state *stream = NULL;
6405         struct drm_display_mode mode = *drm_mode;
6406         struct drm_display_mode saved_mode;
6407         struct drm_display_mode *freesync_mode = NULL;
6408         bool native_mode_found = false;
6409         bool recalculate_timing = false;
6410         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6411         int mode_refresh;
6412         int preferred_refresh = 0;
6413 #if defined(CONFIG_DRM_AMD_DC_DCN)
6414         struct dsc_dec_dpcd_caps dsc_caps;
6415 #endif
6416         struct dc_sink *sink = NULL;
6417
6418         memset(&saved_mode, 0, sizeof(saved_mode));
6419
6420         if (aconnector == NULL) {
6421                 DRM_ERROR("aconnector is NULL!\n");
6422                 return stream;
6423         }
6424
6425         drm_connector = &aconnector->base;
6426
6427         if (!aconnector->dc_sink) {
6428                 sink = create_fake_sink(aconnector);
6429                 if (!sink)
6430                         return stream;
6431         } else {
6432                 sink = aconnector->dc_sink;
6433                 dc_sink_retain(sink);
6434         }
6435
6436         stream = dc_create_stream_for_sink(sink);
6437
6438         if (stream == NULL) {
6439                 DRM_ERROR("Failed to create stream for sink!\n");
6440                 goto finish;
6441         }
6442
6443         stream->dm_stream_context = aconnector;
6444
6445         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6446                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6447
6448         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6449                 /* Search for preferred mode */
6450                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6451                         native_mode_found = true;
6452                         break;
6453                 }
6454         }
6455         if (!native_mode_found)
6456                 preferred_mode = list_first_entry_or_null(
6457                                 &aconnector->base.modes,
6458                                 struct drm_display_mode,
6459                                 head);
6460
6461         mode_refresh = drm_mode_vrefresh(&mode);
6462
6463         if (preferred_mode == NULL) {
6464                 /*
6465                  * This may not be an error, the use case is when we have no
6466                  * usermode calls to reset and set mode upon hotplug. In this
6467                  * case, we call set mode ourselves to restore the previous mode
6468                  * and the modelist may not be filled in in time.
6469                  */
6470                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6471         } else {
6472                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6473                 if (recalculate_timing) {
6474                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6475                         saved_mode = mode;
6476                         mode = *freesync_mode;
6477                 } else {
6478                         decide_crtc_timing_for_drm_display_mode(
6479                                 &mode, preferred_mode, scale);
6480
6481                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6482                 }
6483         }
6484
6485         if (recalculate_timing)
6486                 drm_mode_set_crtcinfo(&saved_mode, 0);
6487         else if (!dm_state)
6488                 drm_mode_set_crtcinfo(&mode, 0);
6489
6490        /*
6491         * If scaling is enabled and refresh rate didn't change
6492         * we copy the vic and polarities of the old timings
6493         */
6494         if (!scale || mode_refresh != preferred_refresh)
6495                 fill_stream_properties_from_drm_display_mode(
6496                         stream, &mode, &aconnector->base, con_state, NULL,
6497                         requested_bpc);
6498         else
6499                 fill_stream_properties_from_drm_display_mode(
6500                         stream, &mode, &aconnector->base, con_state, old_stream,
6501                         requested_bpc);
6502
6503 #if defined(CONFIG_DRM_AMD_DC_DCN)
6504         /* SST DSC determination policy */
6505         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6506         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6507                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6508 #endif
6509
6510         update_stream_scaling_settings(&mode, dm_state, stream);
6511
6512         fill_audio_info(
6513                 &stream->audio_info,
6514                 drm_connector,
6515                 sink);
6516
6517         update_stream_signal(stream, sink);
6518
6519         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6520                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6521
6522         if (stream->link->psr_settings.psr_feature_enabled) {
6523                 //
6524                 // should decide stream support vsc sdp colorimetry capability
6525                 // before building vsc info packet
6526                 //
6527                 stream->use_vsc_sdp_for_colorimetry = false;
6528                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6529                         stream->use_vsc_sdp_for_colorimetry =
6530                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6531                 } else {
6532                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6533                                 stream->use_vsc_sdp_for_colorimetry = true;
6534                 }
6535                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6536                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6537
6538         }
6539 finish:
6540         dc_sink_release(sink);
6541
6542         return stream;
6543 }
6544
6545 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6546 {
6547         drm_crtc_cleanup(crtc);
6548         kfree(crtc);
6549 }
6550
6551 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6552                                   struct drm_crtc_state *state)
6553 {
6554         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6555
6556         /* TODO Destroy dc_stream objects are stream object is flattened */
6557         if (cur->stream)
6558                 dc_stream_release(cur->stream);
6559
6560
6561         __drm_atomic_helper_crtc_destroy_state(state);
6562
6563
6564         kfree(state);
6565 }
6566
6567 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6568 {
6569         struct dm_crtc_state *state;
6570
6571         if (crtc->state)
6572                 dm_crtc_destroy_state(crtc, crtc->state);
6573
6574         state = kzalloc(sizeof(*state), GFP_KERNEL);
6575         if (WARN_ON(!state))
6576                 return;
6577
6578         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6579 }
6580
6581 static struct drm_crtc_state *
6582 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6583 {
6584         struct dm_crtc_state *state, *cur;
6585
6586         cur = to_dm_crtc_state(crtc->state);
6587
6588         if (WARN_ON(!crtc->state))
6589                 return NULL;
6590
6591         state = kzalloc(sizeof(*state), GFP_KERNEL);
6592         if (!state)
6593                 return NULL;
6594
6595         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6596
6597         if (cur->stream) {
6598                 state->stream = cur->stream;
6599                 dc_stream_retain(state->stream);
6600         }
6601
6602         state->active_planes = cur->active_planes;
6603         state->vrr_infopacket = cur->vrr_infopacket;
6604         state->abm_level = cur->abm_level;
6605         state->vrr_supported = cur->vrr_supported;
6606         state->freesync_config = cur->freesync_config;
6607         state->cm_has_degamma = cur->cm_has_degamma;
6608         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6609         state->force_dpms_off = cur->force_dpms_off;
6610         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6611
6612         return &state->base;
6613 }
6614
6615 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6616 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6617 {
6618         crtc_debugfs_init(crtc);
6619
6620         return 0;
6621 }
6622 #endif
6623
6624 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6625 {
6626         enum dc_irq_source irq_source;
6627         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6628         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6629         int rc;
6630
6631         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6632
6633         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6634
6635         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6636                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6637         return rc;
6638 }
6639
6640 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6641 {
6642         enum dc_irq_source irq_source;
6643         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6644         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6645         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6646 #if defined(CONFIG_DRM_AMD_DC_DCN)
6647         struct amdgpu_display_manager *dm = &adev->dm;
6648         struct vblank_control_work *work;
6649 #endif
6650         int rc = 0;
6651
6652         if (enable) {
6653                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6654                 if (amdgpu_dm_vrr_active(acrtc_state))
6655                         rc = dm_set_vupdate_irq(crtc, true);
6656         } else {
6657                 /* vblank irq off -> vupdate irq off */
6658                 rc = dm_set_vupdate_irq(crtc, false);
6659         }
6660
6661         if (rc)
6662                 return rc;
6663
6664         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6665
6666         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6667                 return -EBUSY;
6668
6669         if (amdgpu_in_reset(adev))
6670                 return 0;
6671
6672 #if defined(CONFIG_DRM_AMD_DC_DCN)
6673         if (dm->vblank_control_workqueue) {
6674                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6675                 if (!work)
6676                         return -ENOMEM;
6677
6678                 INIT_WORK(&work->work, vblank_control_worker);
6679                 work->dm = dm;
6680                 work->acrtc = acrtc;
6681                 work->enable = enable;
6682
6683                 if (acrtc_state->stream) {
6684                         dc_stream_retain(acrtc_state->stream);
6685                         work->stream = acrtc_state->stream;
6686                 }
6687
6688                 queue_work(dm->vblank_control_workqueue, &work->work);
6689         }
6690 #endif
6691
6692         return 0;
6693 }
6694
6695 static int dm_enable_vblank(struct drm_crtc *crtc)
6696 {
6697         return dm_set_vblank(crtc, true);
6698 }
6699
6700 static void dm_disable_vblank(struct drm_crtc *crtc)
6701 {
6702         dm_set_vblank(crtc, false);
6703 }
6704
6705 /* Implemented only the options currently availible for the driver */
6706 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6707         .reset = dm_crtc_reset_state,
6708         .destroy = amdgpu_dm_crtc_destroy,
6709         .set_config = drm_atomic_helper_set_config,
6710         .page_flip = drm_atomic_helper_page_flip,
6711         .atomic_duplicate_state = dm_crtc_duplicate_state,
6712         .atomic_destroy_state = dm_crtc_destroy_state,
6713         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6714         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6715         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6716         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6717         .enable_vblank = dm_enable_vblank,
6718         .disable_vblank = dm_disable_vblank,
6719         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6720 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6721         .late_register = amdgpu_dm_crtc_late_register,
6722 #endif
6723 };
6724
6725 static enum drm_connector_status
6726 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6727 {
6728         bool connected;
6729         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6730
6731         /*
6732          * Notes:
6733          * 1. This interface is NOT called in context of HPD irq.
6734          * 2. This interface *is called* in context of user-mode ioctl. Which
6735          * makes it a bad place for *any* MST-related activity.
6736          */
6737
6738         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6739             !aconnector->fake_enable)
6740                 connected = (aconnector->dc_sink != NULL);
6741         else
6742                 connected = (aconnector->base.force == DRM_FORCE_ON);
6743
6744         update_subconnector_property(aconnector);
6745
6746         return (connected ? connector_status_connected :
6747                         connector_status_disconnected);
6748 }
6749
6750 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6751                                             struct drm_connector_state *connector_state,
6752                                             struct drm_property *property,
6753                                             uint64_t val)
6754 {
6755         struct drm_device *dev = connector->dev;
6756         struct amdgpu_device *adev = drm_to_adev(dev);
6757         struct dm_connector_state *dm_old_state =
6758                 to_dm_connector_state(connector->state);
6759         struct dm_connector_state *dm_new_state =
6760                 to_dm_connector_state(connector_state);
6761
6762         int ret = -EINVAL;
6763
6764         if (property == dev->mode_config.scaling_mode_property) {
6765                 enum amdgpu_rmx_type rmx_type;
6766
6767                 switch (val) {
6768                 case DRM_MODE_SCALE_CENTER:
6769                         rmx_type = RMX_CENTER;
6770                         break;
6771                 case DRM_MODE_SCALE_ASPECT:
6772                         rmx_type = RMX_ASPECT;
6773                         break;
6774                 case DRM_MODE_SCALE_FULLSCREEN:
6775                         rmx_type = RMX_FULL;
6776                         break;
6777                 case DRM_MODE_SCALE_NONE:
6778                 default:
6779                         rmx_type = RMX_OFF;
6780                         break;
6781                 }
6782
6783                 if (dm_old_state->scaling == rmx_type)
6784                         return 0;
6785
6786                 dm_new_state->scaling = rmx_type;
6787                 ret = 0;
6788         } else if (property == adev->mode_info.underscan_hborder_property) {
6789                 dm_new_state->underscan_hborder = val;
6790                 ret = 0;
6791         } else if (property == adev->mode_info.underscan_vborder_property) {
6792                 dm_new_state->underscan_vborder = val;
6793                 ret = 0;
6794         } else if (property == adev->mode_info.underscan_property) {
6795                 dm_new_state->underscan_enable = val;
6796                 ret = 0;
6797         } else if (property == adev->mode_info.abm_level_property) {
6798                 dm_new_state->abm_level = val;
6799                 ret = 0;
6800         }
6801
6802         return ret;
6803 }
6804
6805 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6806                                             const struct drm_connector_state *state,
6807                                             struct drm_property *property,
6808                                             uint64_t *val)
6809 {
6810         struct drm_device *dev = connector->dev;
6811         struct amdgpu_device *adev = drm_to_adev(dev);
6812         struct dm_connector_state *dm_state =
6813                 to_dm_connector_state(state);
6814         int ret = -EINVAL;
6815
6816         if (property == dev->mode_config.scaling_mode_property) {
6817                 switch (dm_state->scaling) {
6818                 case RMX_CENTER:
6819                         *val = DRM_MODE_SCALE_CENTER;
6820                         break;
6821                 case RMX_ASPECT:
6822                         *val = DRM_MODE_SCALE_ASPECT;
6823                         break;
6824                 case RMX_FULL:
6825                         *val = DRM_MODE_SCALE_FULLSCREEN;
6826                         break;
6827                 case RMX_OFF:
6828                 default:
6829                         *val = DRM_MODE_SCALE_NONE;
6830                         break;
6831                 }
6832                 ret = 0;
6833         } else if (property == adev->mode_info.underscan_hborder_property) {
6834                 *val = dm_state->underscan_hborder;
6835                 ret = 0;
6836         } else if (property == adev->mode_info.underscan_vborder_property) {
6837                 *val = dm_state->underscan_vborder;
6838                 ret = 0;
6839         } else if (property == adev->mode_info.underscan_property) {
6840                 *val = dm_state->underscan_enable;
6841                 ret = 0;
6842         } else if (property == adev->mode_info.abm_level_property) {
6843                 *val = dm_state->abm_level;
6844                 ret = 0;
6845         }
6846
6847         return ret;
6848 }
6849
6850 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6851 {
6852         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6853
6854         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6855 }
6856
6857 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6858 {
6859         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6860         const struct dc_link *link = aconnector->dc_link;
6861         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6862         struct amdgpu_display_manager *dm = &adev->dm;
6863         int i;
6864
6865         /*
6866          * Call only if mst_mgr was iniitalized before since it's not done
6867          * for all connector types.
6868          */
6869         if (aconnector->mst_mgr.dev)
6870                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6871
6872 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6873         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6874         for (i = 0; i < dm->num_of_edps; i++) {
6875                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6876                         backlight_device_unregister(dm->backlight_dev[i]);
6877                         dm->backlight_dev[i] = NULL;
6878                 }
6879         }
6880 #endif
6881
6882         if (aconnector->dc_em_sink)
6883                 dc_sink_release(aconnector->dc_em_sink);
6884         aconnector->dc_em_sink = NULL;
6885         if (aconnector->dc_sink)
6886                 dc_sink_release(aconnector->dc_sink);
6887         aconnector->dc_sink = NULL;
6888
6889         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6890         drm_connector_unregister(connector);
6891         drm_connector_cleanup(connector);
6892         if (aconnector->i2c) {
6893                 i2c_del_adapter(&aconnector->i2c->base);
6894                 kfree(aconnector->i2c);
6895         }
6896         kfree(aconnector->dm_dp_aux.aux.name);
6897
6898         kfree(connector);
6899 }
6900
6901 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6902 {
6903         struct dm_connector_state *state =
6904                 to_dm_connector_state(connector->state);
6905
6906         if (connector->state)
6907                 __drm_atomic_helper_connector_destroy_state(connector->state);
6908
6909         kfree(state);
6910
6911         state = kzalloc(sizeof(*state), GFP_KERNEL);
6912
6913         if (state) {
6914                 state->scaling = RMX_OFF;
6915                 state->underscan_enable = false;
6916                 state->underscan_hborder = 0;
6917                 state->underscan_vborder = 0;
6918                 state->base.max_requested_bpc = 8;
6919                 state->vcpi_slots = 0;
6920                 state->pbn = 0;
6921                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6922                         state->abm_level = amdgpu_dm_abm_level;
6923
6924                 __drm_atomic_helper_connector_reset(connector, &state->base);
6925         }
6926 }
6927
6928 struct drm_connector_state *
6929 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6930 {
6931         struct dm_connector_state *state =
6932                 to_dm_connector_state(connector->state);
6933
6934         struct dm_connector_state *new_state =
6935                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6936
6937         if (!new_state)
6938                 return NULL;
6939
6940         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6941
6942         new_state->freesync_capable = state->freesync_capable;
6943         new_state->abm_level = state->abm_level;
6944         new_state->scaling = state->scaling;
6945         new_state->underscan_enable = state->underscan_enable;
6946         new_state->underscan_hborder = state->underscan_hborder;
6947         new_state->underscan_vborder = state->underscan_vborder;
6948         new_state->vcpi_slots = state->vcpi_slots;
6949         new_state->pbn = state->pbn;
6950         return &new_state->base;
6951 }
6952
6953 static int
6954 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6955 {
6956         struct amdgpu_dm_connector *amdgpu_dm_connector =
6957                 to_amdgpu_dm_connector(connector);
6958         int r;
6959
6960         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6961             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6962                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6963                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6964                 if (r)
6965                         return r;
6966         }
6967
6968 #if defined(CONFIG_DEBUG_FS)
6969         connector_debugfs_init(amdgpu_dm_connector);
6970 #endif
6971
6972         return 0;
6973 }
6974
6975 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6976         .reset = amdgpu_dm_connector_funcs_reset,
6977         .detect = amdgpu_dm_connector_detect,
6978         .fill_modes = drm_helper_probe_single_connector_modes,
6979         .destroy = amdgpu_dm_connector_destroy,
6980         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6981         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6982         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6983         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6984         .late_register = amdgpu_dm_connector_late_register,
6985         .early_unregister = amdgpu_dm_connector_unregister
6986 };
6987
6988 static int get_modes(struct drm_connector *connector)
6989 {
6990         return amdgpu_dm_connector_get_modes(connector);
6991 }
6992
6993 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6994 {
6995         struct dc_sink_init_data init_params = {
6996                         .link = aconnector->dc_link,
6997                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6998         };
6999         struct edid *edid;
7000
7001         if (!aconnector->base.edid_blob_ptr) {
7002                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7003                                 aconnector->base.name);
7004
7005                 aconnector->base.force = DRM_FORCE_OFF;
7006                 aconnector->base.override_edid = false;
7007                 return;
7008         }
7009
7010         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7011
7012         aconnector->edid = edid;
7013
7014         aconnector->dc_em_sink = dc_link_add_remote_sink(
7015                 aconnector->dc_link,
7016                 (uint8_t *)edid,
7017                 (edid->extensions + 1) * EDID_LENGTH,
7018                 &init_params);
7019
7020         if (aconnector->base.force == DRM_FORCE_ON) {
7021                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7022                 aconnector->dc_link->local_sink :
7023                 aconnector->dc_em_sink;
7024                 dc_sink_retain(aconnector->dc_sink);
7025         }
7026 }
7027
7028 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7029 {
7030         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7031
7032         /*
7033          * In case of headless boot with force on for DP managed connector
7034          * Those settings have to be != 0 to get initial modeset
7035          */
7036         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7037                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7038                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7039         }
7040
7041
7042         aconnector->base.override_edid = true;
7043         create_eml_sink(aconnector);
7044 }
7045
7046 struct dc_stream_state *
7047 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7048                                 const struct drm_display_mode *drm_mode,
7049                                 const struct dm_connector_state *dm_state,
7050                                 const struct dc_stream_state *old_stream)
7051 {
7052         struct drm_connector *connector = &aconnector->base;
7053         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7054         struct dc_stream_state *stream;
7055         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7056         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7057         enum dc_status dc_result = DC_OK;
7058
7059         do {
7060                 stream = create_stream_for_sink(aconnector, drm_mode,
7061                                                 dm_state, old_stream,
7062                                                 requested_bpc);
7063                 if (stream == NULL) {
7064                         DRM_ERROR("Failed to create stream for sink!\n");
7065                         break;
7066                 }
7067
7068                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7069
7070                 if (dc_result != DC_OK) {
7071                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7072                                       drm_mode->hdisplay,
7073                                       drm_mode->vdisplay,
7074                                       drm_mode->clock,
7075                                       dc_result,
7076                                       dc_status_to_str(dc_result));
7077
7078                         dc_stream_release(stream);
7079                         stream = NULL;
7080                         requested_bpc -= 2; /* lower bpc to retry validation */
7081                 }
7082
7083         } while (stream == NULL && requested_bpc >= 6);
7084
7085         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7086                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7087
7088                 aconnector->force_yuv420_output = true;
7089                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7090                                                 dm_state, old_stream);
7091                 aconnector->force_yuv420_output = false;
7092         }
7093
7094         return stream;
7095 }
7096
7097 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7098                                    struct drm_display_mode *mode)
7099 {
7100         int result = MODE_ERROR;
7101         struct dc_sink *dc_sink;
7102         /* TODO: Unhardcode stream count */
7103         struct dc_stream_state *stream;
7104         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7105
7106         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7107                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7108                 return result;
7109
7110         /*
7111          * Only run this the first time mode_valid is called to initilialize
7112          * EDID mgmt
7113          */
7114         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7115                 !aconnector->dc_em_sink)
7116                 handle_edid_mgmt(aconnector);
7117
7118         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7119
7120         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7121                                 aconnector->base.force != DRM_FORCE_ON) {
7122                 DRM_ERROR("dc_sink is NULL!\n");
7123                 goto fail;
7124         }
7125
7126         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7127         if (stream) {
7128                 dc_stream_release(stream);
7129                 result = MODE_OK;
7130         }
7131
7132 fail:
7133         /* TODO: error handling*/
7134         return result;
7135 }
7136
7137 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7138                                 struct dc_info_packet *out)
7139 {
7140         struct hdmi_drm_infoframe frame;
7141         unsigned char buf[30]; /* 26 + 4 */
7142         ssize_t len;
7143         int ret, i;
7144
7145         memset(out, 0, sizeof(*out));
7146
7147         if (!state->hdr_output_metadata)
7148                 return 0;
7149
7150         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7151         if (ret)
7152                 return ret;
7153
7154         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7155         if (len < 0)
7156                 return (int)len;
7157
7158         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7159         if (len != 30)
7160                 return -EINVAL;
7161
7162         /* Prepare the infopacket for DC. */
7163         switch (state->connector->connector_type) {
7164         case DRM_MODE_CONNECTOR_HDMIA:
7165                 out->hb0 = 0x87; /* type */
7166                 out->hb1 = 0x01; /* version */
7167                 out->hb2 = 0x1A; /* length */
7168                 out->sb[0] = buf[3]; /* checksum */
7169                 i = 1;
7170                 break;
7171
7172         case DRM_MODE_CONNECTOR_DisplayPort:
7173         case DRM_MODE_CONNECTOR_eDP:
7174                 out->hb0 = 0x00; /* sdp id, zero */
7175                 out->hb1 = 0x87; /* type */
7176                 out->hb2 = 0x1D; /* payload len - 1 */
7177                 out->hb3 = (0x13 << 2); /* sdp version */
7178                 out->sb[0] = 0x01; /* version */
7179                 out->sb[1] = 0x1A; /* length */
7180                 i = 2;
7181                 break;
7182
7183         default:
7184                 return -EINVAL;
7185         }
7186
7187         memcpy(&out->sb[i], &buf[4], 26);
7188         out->valid = true;
7189
7190         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7191                        sizeof(out->sb), false);
7192
7193         return 0;
7194 }
7195
7196 static int
7197 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7198                                  struct drm_atomic_state *state)
7199 {
7200         struct drm_connector_state *new_con_state =
7201                 drm_atomic_get_new_connector_state(state, conn);
7202         struct drm_connector_state *old_con_state =
7203                 drm_atomic_get_old_connector_state(state, conn);
7204         struct drm_crtc *crtc = new_con_state->crtc;
7205         struct drm_crtc_state *new_crtc_state;
7206         int ret;
7207
7208         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7209
7210         if (!crtc)
7211                 return 0;
7212
7213         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7214                 struct dc_info_packet hdr_infopacket;
7215
7216                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7217                 if (ret)
7218                         return ret;
7219
7220                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7221                 if (IS_ERR(new_crtc_state))
7222                         return PTR_ERR(new_crtc_state);
7223
7224                 /*
7225                  * DC considers the stream backends changed if the
7226                  * static metadata changes. Forcing the modeset also
7227                  * gives a simple way for userspace to switch from
7228                  * 8bpc to 10bpc when setting the metadata to enter
7229                  * or exit HDR.
7230                  *
7231                  * Changing the static metadata after it's been
7232                  * set is permissible, however. So only force a
7233                  * modeset if we're entering or exiting HDR.
7234                  */
7235                 new_crtc_state->mode_changed =
7236                         !old_con_state->hdr_output_metadata ||
7237                         !new_con_state->hdr_output_metadata;
7238         }
7239
7240         return 0;
7241 }
7242
7243 static const struct drm_connector_helper_funcs
7244 amdgpu_dm_connector_helper_funcs = {
7245         /*
7246          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7247          * modes will be filtered by drm_mode_validate_size(), and those modes
7248          * are missing after user start lightdm. So we need to renew modes list.
7249          * in get_modes call back, not just return the modes count
7250          */
7251         .get_modes = get_modes,
7252         .mode_valid = amdgpu_dm_connector_mode_valid,
7253         .atomic_check = amdgpu_dm_connector_atomic_check,
7254 };
7255
7256 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7257 {
7258 }
7259
7260 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7261 {
7262         struct drm_atomic_state *state = new_crtc_state->state;
7263         struct drm_plane *plane;
7264         int num_active = 0;
7265
7266         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7267                 struct drm_plane_state *new_plane_state;
7268
7269                 /* Cursor planes are "fake". */
7270                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7271                         continue;
7272
7273                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7274
7275                 if (!new_plane_state) {
7276                         /*
7277                          * The plane is enable on the CRTC and hasn't changed
7278                          * state. This means that it previously passed
7279                          * validation and is therefore enabled.
7280                          */
7281                         num_active += 1;
7282                         continue;
7283                 }
7284
7285                 /* We need a framebuffer to be considered enabled. */
7286                 num_active += (new_plane_state->fb != NULL);
7287         }
7288
7289         return num_active;
7290 }
7291
7292 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7293                                          struct drm_crtc_state *new_crtc_state)
7294 {
7295         struct dm_crtc_state *dm_new_crtc_state =
7296                 to_dm_crtc_state(new_crtc_state);
7297
7298         dm_new_crtc_state->active_planes = 0;
7299
7300         if (!dm_new_crtc_state->stream)
7301                 return;
7302
7303         dm_new_crtc_state->active_planes =
7304                 count_crtc_active_planes(new_crtc_state);
7305 }
7306
7307 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7308                                        struct drm_atomic_state *state)
7309 {
7310         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7311                                                                           crtc);
7312         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7313         struct dc *dc = adev->dm.dc;
7314         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7315         int ret = -EINVAL;
7316
7317         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7318
7319         dm_update_crtc_active_planes(crtc, crtc_state);
7320
7321         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7322                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7323                 return ret;
7324         }
7325
7326         /*
7327          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7328          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7329          * planes are disabled, which is not supported by the hardware. And there is legacy
7330          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7331          */
7332         if (crtc_state->enable &&
7333             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7334                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7335                 return -EINVAL;
7336         }
7337
7338         /* In some use cases, like reset, no stream is attached */
7339         if (!dm_crtc_state->stream)
7340                 return 0;
7341
7342         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7343                 return 0;
7344
7345         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7346         return ret;
7347 }
7348
7349 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7350                                       const struct drm_display_mode *mode,
7351                                       struct drm_display_mode *adjusted_mode)
7352 {
7353         return true;
7354 }
7355
7356 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7357         .disable = dm_crtc_helper_disable,
7358         .atomic_check = dm_crtc_helper_atomic_check,
7359         .mode_fixup = dm_crtc_helper_mode_fixup,
7360         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7361 };
7362
7363 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7364 {
7365
7366 }
7367
7368 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7369 {
7370         switch (display_color_depth) {
7371                 case COLOR_DEPTH_666:
7372                         return 6;
7373                 case COLOR_DEPTH_888:
7374                         return 8;
7375                 case COLOR_DEPTH_101010:
7376                         return 10;
7377                 case COLOR_DEPTH_121212:
7378                         return 12;
7379                 case COLOR_DEPTH_141414:
7380                         return 14;
7381                 case COLOR_DEPTH_161616:
7382                         return 16;
7383                 default:
7384                         break;
7385                 }
7386         return 0;
7387 }
7388
7389 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7390                                           struct drm_crtc_state *crtc_state,
7391                                           struct drm_connector_state *conn_state)
7392 {
7393         struct drm_atomic_state *state = crtc_state->state;
7394         struct drm_connector *connector = conn_state->connector;
7395         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7396         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7397         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7398         struct drm_dp_mst_topology_mgr *mst_mgr;
7399         struct drm_dp_mst_port *mst_port;
7400         enum dc_color_depth color_depth;
7401         int clock, bpp = 0;
7402         bool is_y420 = false;
7403
7404         if (!aconnector->port || !aconnector->dc_sink)
7405                 return 0;
7406
7407         mst_port = aconnector->port;
7408         mst_mgr = &aconnector->mst_port->mst_mgr;
7409
7410         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7411                 return 0;
7412
7413         if (!state->duplicated) {
7414                 int max_bpc = conn_state->max_requested_bpc;
7415                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7416                                 aconnector->force_yuv420_output;
7417                 color_depth = convert_color_depth_from_display_info(connector,
7418                                                                     is_y420,
7419                                                                     max_bpc);
7420                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7421                 clock = adjusted_mode->clock;
7422                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7423         }
7424         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7425                                                                            mst_mgr,
7426                                                                            mst_port,
7427                                                                            dm_new_connector_state->pbn,
7428                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7429         if (dm_new_connector_state->vcpi_slots < 0) {
7430                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7431                 return dm_new_connector_state->vcpi_slots;
7432         }
7433         return 0;
7434 }
7435
7436 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7437         .disable = dm_encoder_helper_disable,
7438         .atomic_check = dm_encoder_helper_atomic_check
7439 };
7440
7441 #if defined(CONFIG_DRM_AMD_DC_DCN)
7442 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7443                                             struct dc_state *dc_state,
7444                                             struct dsc_mst_fairness_vars *vars)
7445 {
7446         struct dc_stream_state *stream = NULL;
7447         struct drm_connector *connector;
7448         struct drm_connector_state *new_con_state;
7449         struct amdgpu_dm_connector *aconnector;
7450         struct dm_connector_state *dm_conn_state;
7451         int i, j;
7452         int vcpi, pbn_div, pbn, slot_num = 0;
7453
7454         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7455
7456                 aconnector = to_amdgpu_dm_connector(connector);
7457
7458                 if (!aconnector->port)
7459                         continue;
7460
7461                 if (!new_con_state || !new_con_state->crtc)
7462                         continue;
7463
7464                 dm_conn_state = to_dm_connector_state(new_con_state);
7465
7466                 for (j = 0; j < dc_state->stream_count; j++) {
7467                         stream = dc_state->streams[j];
7468                         if (!stream)
7469                                 continue;
7470
7471                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7472                                 break;
7473
7474                         stream = NULL;
7475                 }
7476
7477                 if (!stream)
7478                         continue;
7479
7480                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7481                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7482                 for (j = 0; j < dc_state->stream_count; j++) {
7483                         if (vars[j].aconnector == aconnector) {
7484                                 pbn = vars[j].pbn;
7485                                 break;
7486                         }
7487                 }
7488
7489                 if (j == dc_state->stream_count)
7490                         continue;
7491
7492                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7493
7494                 if (stream->timing.flags.DSC != 1) {
7495                         dm_conn_state->pbn = pbn;
7496                         dm_conn_state->vcpi_slots = slot_num;
7497
7498                         drm_dp_mst_atomic_enable_dsc(state,
7499                                                      aconnector->port,
7500                                                      dm_conn_state->pbn,
7501                                                      0,
7502                                                      false);
7503                         continue;
7504                 }
7505
7506                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7507                                                     aconnector->port,
7508                                                     pbn, pbn_div,
7509                                                     true);
7510                 if (vcpi < 0)
7511                         return vcpi;
7512
7513                 dm_conn_state->pbn = pbn;
7514                 dm_conn_state->vcpi_slots = vcpi;
7515         }
7516         return 0;
7517 }
7518 #endif
7519
7520 static void dm_drm_plane_reset(struct drm_plane *plane)
7521 {
7522         struct dm_plane_state *amdgpu_state = NULL;
7523
7524         if (plane->state)
7525                 plane->funcs->atomic_destroy_state(plane, plane->state);
7526
7527         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7528         WARN_ON(amdgpu_state == NULL);
7529
7530         if (amdgpu_state)
7531                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7532 }
7533
7534 static struct drm_plane_state *
7535 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7536 {
7537         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7538
7539         old_dm_plane_state = to_dm_plane_state(plane->state);
7540         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7541         if (!dm_plane_state)
7542                 return NULL;
7543
7544         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7545
7546         if (old_dm_plane_state->dc_state) {
7547                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7548                 dc_plane_state_retain(dm_plane_state->dc_state);
7549         }
7550
7551         return &dm_plane_state->base;
7552 }
7553
7554 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7555                                 struct drm_plane_state *state)
7556 {
7557         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7558
7559         if (dm_plane_state->dc_state)
7560                 dc_plane_state_release(dm_plane_state->dc_state);
7561
7562         drm_atomic_helper_plane_destroy_state(plane, state);
7563 }
7564
7565 static const struct drm_plane_funcs dm_plane_funcs = {
7566         .update_plane   = drm_atomic_helper_update_plane,
7567         .disable_plane  = drm_atomic_helper_disable_plane,
7568         .destroy        = drm_primary_helper_destroy,
7569         .reset = dm_drm_plane_reset,
7570         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7571         .atomic_destroy_state = dm_drm_plane_destroy_state,
7572         .format_mod_supported = dm_plane_format_mod_supported,
7573 };
7574
7575 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7576                                       struct drm_plane_state *new_state)
7577 {
7578         struct amdgpu_framebuffer *afb;
7579         struct drm_gem_object *obj;
7580         struct amdgpu_device *adev;
7581         struct amdgpu_bo *rbo;
7582         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7583         struct list_head list;
7584         struct ttm_validate_buffer tv;
7585         struct ww_acquire_ctx ticket;
7586         uint32_t domain;
7587         int r;
7588
7589         if (!new_state->fb) {
7590                 DRM_DEBUG_KMS("No FB bound\n");
7591                 return 0;
7592         }
7593
7594         afb = to_amdgpu_framebuffer(new_state->fb);
7595         obj = new_state->fb->obj[0];
7596         rbo = gem_to_amdgpu_bo(obj);
7597         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7598         INIT_LIST_HEAD(&list);
7599
7600         tv.bo = &rbo->tbo;
7601         tv.num_shared = 1;
7602         list_add(&tv.head, &list);
7603
7604         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7605         if (r) {
7606                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7607                 return r;
7608         }
7609
7610         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7611                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7612         else
7613                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7614
7615         r = amdgpu_bo_pin(rbo, domain);
7616         if (unlikely(r != 0)) {
7617                 if (r != -ERESTARTSYS)
7618                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7619                 ttm_eu_backoff_reservation(&ticket, &list);
7620                 return r;
7621         }
7622
7623         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7624         if (unlikely(r != 0)) {
7625                 amdgpu_bo_unpin(rbo);
7626                 ttm_eu_backoff_reservation(&ticket, &list);
7627                 DRM_ERROR("%p bind failed\n", rbo);
7628                 return r;
7629         }
7630
7631         ttm_eu_backoff_reservation(&ticket, &list);
7632
7633         afb->address = amdgpu_bo_gpu_offset(rbo);
7634
7635         amdgpu_bo_ref(rbo);
7636
7637         /**
7638          * We don't do surface updates on planes that have been newly created,
7639          * but we also don't have the afb->address during atomic check.
7640          *
7641          * Fill in buffer attributes depending on the address here, but only on
7642          * newly created planes since they're not being used by DC yet and this
7643          * won't modify global state.
7644          */
7645         dm_plane_state_old = to_dm_plane_state(plane->state);
7646         dm_plane_state_new = to_dm_plane_state(new_state);
7647
7648         if (dm_plane_state_new->dc_state &&
7649             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7650                 struct dc_plane_state *plane_state =
7651                         dm_plane_state_new->dc_state;
7652                 bool force_disable_dcc = !plane_state->dcc.enable;
7653
7654                 fill_plane_buffer_attributes(
7655                         adev, afb, plane_state->format, plane_state->rotation,
7656                         afb->tiling_flags,
7657                         &plane_state->tiling_info, &plane_state->plane_size,
7658                         &plane_state->dcc, &plane_state->address,
7659                         afb->tmz_surface, force_disable_dcc);
7660         }
7661
7662         return 0;
7663 }
7664
7665 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7666                                        struct drm_plane_state *old_state)
7667 {
7668         struct amdgpu_bo *rbo;
7669         int r;
7670
7671         if (!old_state->fb)
7672                 return;
7673
7674         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7675         r = amdgpu_bo_reserve(rbo, false);
7676         if (unlikely(r)) {
7677                 DRM_ERROR("failed to reserve rbo before unpin\n");
7678                 return;
7679         }
7680
7681         amdgpu_bo_unpin(rbo);
7682         amdgpu_bo_unreserve(rbo);
7683         amdgpu_bo_unref(&rbo);
7684 }
7685
7686 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7687                                        struct drm_crtc_state *new_crtc_state)
7688 {
7689         struct drm_framebuffer *fb = state->fb;
7690         int min_downscale, max_upscale;
7691         int min_scale = 0;
7692         int max_scale = INT_MAX;
7693
7694         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7695         if (fb && state->crtc) {
7696                 /* Validate viewport to cover the case when only the position changes */
7697                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7698                         int viewport_width = state->crtc_w;
7699                         int viewport_height = state->crtc_h;
7700
7701                         if (state->crtc_x < 0)
7702                                 viewport_width += state->crtc_x;
7703                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7704                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7705
7706                         if (state->crtc_y < 0)
7707                                 viewport_height += state->crtc_y;
7708                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7709                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7710
7711                         if (viewport_width < 0 || viewport_height < 0) {
7712                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7713                                 return -EINVAL;
7714                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7715                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7716                                 return -EINVAL;
7717                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7718                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7719                                 return -EINVAL;
7720                         }
7721
7722                 }
7723
7724                 /* Get min/max allowed scaling factors from plane caps. */
7725                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7726                                              &min_downscale, &max_upscale);
7727                 /*
7728                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7729                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7730                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7731                  */
7732                 min_scale = (1000 << 16) / max_upscale;
7733                 max_scale = (1000 << 16) / min_downscale;
7734         }
7735
7736         return drm_atomic_helper_check_plane_state(
7737                 state, new_crtc_state, min_scale, max_scale, true, true);
7738 }
7739
7740 static int dm_plane_atomic_check(struct drm_plane *plane,
7741                                  struct drm_atomic_state *state)
7742 {
7743         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7744                                                                                  plane);
7745         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7746         struct dc *dc = adev->dm.dc;
7747         struct dm_plane_state *dm_plane_state;
7748         struct dc_scaling_info scaling_info;
7749         struct drm_crtc_state *new_crtc_state;
7750         int ret;
7751
7752         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7753
7754         dm_plane_state = to_dm_plane_state(new_plane_state);
7755
7756         if (!dm_plane_state->dc_state)
7757                 return 0;
7758
7759         new_crtc_state =
7760                 drm_atomic_get_new_crtc_state(state,
7761                                               new_plane_state->crtc);
7762         if (!new_crtc_state)
7763                 return -EINVAL;
7764
7765         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7766         if (ret)
7767                 return ret;
7768
7769         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7770         if (ret)
7771                 return ret;
7772
7773         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7774                 return 0;
7775
7776         return -EINVAL;
7777 }
7778
7779 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7780                                        struct drm_atomic_state *state)
7781 {
7782         /* Only support async updates on cursor planes. */
7783         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7784                 return -EINVAL;
7785
7786         return 0;
7787 }
7788
7789 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7790                                          struct drm_atomic_state *state)
7791 {
7792         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7793                                                                            plane);
7794         struct drm_plane_state *old_state =
7795                 drm_atomic_get_old_plane_state(state, plane);
7796
7797         trace_amdgpu_dm_atomic_update_cursor(new_state);
7798
7799         swap(plane->state->fb, new_state->fb);
7800
7801         plane->state->src_x = new_state->src_x;
7802         plane->state->src_y = new_state->src_y;
7803         plane->state->src_w = new_state->src_w;
7804         plane->state->src_h = new_state->src_h;
7805         plane->state->crtc_x = new_state->crtc_x;
7806         plane->state->crtc_y = new_state->crtc_y;
7807         plane->state->crtc_w = new_state->crtc_w;
7808         plane->state->crtc_h = new_state->crtc_h;
7809
7810         handle_cursor_update(plane, old_state);
7811 }
7812
7813 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7814         .prepare_fb = dm_plane_helper_prepare_fb,
7815         .cleanup_fb = dm_plane_helper_cleanup_fb,
7816         .atomic_check = dm_plane_atomic_check,
7817         .atomic_async_check = dm_plane_atomic_async_check,
7818         .atomic_async_update = dm_plane_atomic_async_update
7819 };
7820
7821 /*
7822  * TODO: these are currently initialized to rgb formats only.
7823  * For future use cases we should either initialize them dynamically based on
7824  * plane capabilities, or initialize this array to all formats, so internal drm
7825  * check will succeed, and let DC implement proper check
7826  */
7827 static const uint32_t rgb_formats[] = {
7828         DRM_FORMAT_XRGB8888,
7829         DRM_FORMAT_ARGB8888,
7830         DRM_FORMAT_RGBA8888,
7831         DRM_FORMAT_XRGB2101010,
7832         DRM_FORMAT_XBGR2101010,
7833         DRM_FORMAT_ARGB2101010,
7834         DRM_FORMAT_ABGR2101010,
7835         DRM_FORMAT_XRGB16161616,
7836         DRM_FORMAT_XBGR16161616,
7837         DRM_FORMAT_ARGB16161616,
7838         DRM_FORMAT_ABGR16161616,
7839         DRM_FORMAT_XBGR8888,
7840         DRM_FORMAT_ABGR8888,
7841         DRM_FORMAT_RGB565,
7842 };
7843
7844 static const uint32_t overlay_formats[] = {
7845         DRM_FORMAT_XRGB8888,
7846         DRM_FORMAT_ARGB8888,
7847         DRM_FORMAT_RGBA8888,
7848         DRM_FORMAT_XBGR8888,
7849         DRM_FORMAT_ABGR8888,
7850         DRM_FORMAT_RGB565
7851 };
7852
7853 static const u32 cursor_formats[] = {
7854         DRM_FORMAT_ARGB8888
7855 };
7856
7857 static int get_plane_formats(const struct drm_plane *plane,
7858                              const struct dc_plane_cap *plane_cap,
7859                              uint32_t *formats, int max_formats)
7860 {
7861         int i, num_formats = 0;
7862
7863         /*
7864          * TODO: Query support for each group of formats directly from
7865          * DC plane caps. This will require adding more formats to the
7866          * caps list.
7867          */
7868
7869         switch (plane->type) {
7870         case DRM_PLANE_TYPE_PRIMARY:
7871                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7872                         if (num_formats >= max_formats)
7873                                 break;
7874
7875                         formats[num_formats++] = rgb_formats[i];
7876                 }
7877
7878                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7879                         formats[num_formats++] = DRM_FORMAT_NV12;
7880                 if (plane_cap && plane_cap->pixel_format_support.p010)
7881                         formats[num_formats++] = DRM_FORMAT_P010;
7882                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7883                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7884                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7885                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7886                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7887                 }
7888                 break;
7889
7890         case DRM_PLANE_TYPE_OVERLAY:
7891                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7892                         if (num_formats >= max_formats)
7893                                 break;
7894
7895                         formats[num_formats++] = overlay_formats[i];
7896                 }
7897                 break;
7898
7899         case DRM_PLANE_TYPE_CURSOR:
7900                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7901                         if (num_formats >= max_formats)
7902                                 break;
7903
7904                         formats[num_formats++] = cursor_formats[i];
7905                 }
7906                 break;
7907         }
7908
7909         return num_formats;
7910 }
7911
7912 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7913                                 struct drm_plane *plane,
7914                                 unsigned long possible_crtcs,
7915                                 const struct dc_plane_cap *plane_cap)
7916 {
7917         uint32_t formats[32];
7918         int num_formats;
7919         int res = -EPERM;
7920         unsigned int supported_rotations;
7921         uint64_t *modifiers = NULL;
7922
7923         num_formats = get_plane_formats(plane, plane_cap, formats,
7924                                         ARRAY_SIZE(formats));
7925
7926         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7927         if (res)
7928                 return res;
7929
7930         if (modifiers == NULL)
7931                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7932
7933         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7934                                        &dm_plane_funcs, formats, num_formats,
7935                                        modifiers, plane->type, NULL);
7936         kfree(modifiers);
7937         if (res)
7938                 return res;
7939
7940         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7941             plane_cap && plane_cap->per_pixel_alpha) {
7942                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7943                                           BIT(DRM_MODE_BLEND_PREMULTI);
7944
7945                 drm_plane_create_alpha_property(plane);
7946                 drm_plane_create_blend_mode_property(plane, blend_caps);
7947         }
7948
7949         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7950             plane_cap &&
7951             (plane_cap->pixel_format_support.nv12 ||
7952              plane_cap->pixel_format_support.p010)) {
7953                 /* This only affects YUV formats. */
7954                 drm_plane_create_color_properties(
7955                         plane,
7956                         BIT(DRM_COLOR_YCBCR_BT601) |
7957                         BIT(DRM_COLOR_YCBCR_BT709) |
7958                         BIT(DRM_COLOR_YCBCR_BT2020),
7959                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7960                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7961                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7962         }
7963
7964         supported_rotations =
7965                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7966                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7967
7968         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7969             plane->type != DRM_PLANE_TYPE_CURSOR)
7970                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7971                                                    supported_rotations);
7972
7973         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7974
7975         /* Create (reset) the plane state */
7976         if (plane->funcs->reset)
7977                 plane->funcs->reset(plane);
7978
7979         return 0;
7980 }
7981
7982 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7983                                struct drm_plane *plane,
7984                                uint32_t crtc_index)
7985 {
7986         struct amdgpu_crtc *acrtc = NULL;
7987         struct drm_plane *cursor_plane;
7988
7989         int res = -ENOMEM;
7990
7991         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7992         if (!cursor_plane)
7993                 goto fail;
7994
7995         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7996         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7997
7998         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7999         if (!acrtc)
8000                 goto fail;
8001
8002         res = drm_crtc_init_with_planes(
8003                         dm->ddev,
8004                         &acrtc->base,
8005                         plane,
8006                         cursor_plane,
8007                         &amdgpu_dm_crtc_funcs, NULL);
8008
8009         if (res)
8010                 goto fail;
8011
8012         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8013
8014         /* Create (reset) the plane state */
8015         if (acrtc->base.funcs->reset)
8016                 acrtc->base.funcs->reset(&acrtc->base);
8017
8018         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8019         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8020
8021         acrtc->crtc_id = crtc_index;
8022         acrtc->base.enabled = false;
8023         acrtc->otg_inst = -1;
8024
8025         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8026         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8027                                    true, MAX_COLOR_LUT_ENTRIES);
8028         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8029
8030         return 0;
8031
8032 fail:
8033         kfree(acrtc);
8034         kfree(cursor_plane);
8035         return res;
8036 }
8037
8038
8039 static int to_drm_connector_type(enum signal_type st)
8040 {
8041         switch (st) {
8042         case SIGNAL_TYPE_HDMI_TYPE_A:
8043                 return DRM_MODE_CONNECTOR_HDMIA;
8044         case SIGNAL_TYPE_EDP:
8045                 return DRM_MODE_CONNECTOR_eDP;
8046         case SIGNAL_TYPE_LVDS:
8047                 return DRM_MODE_CONNECTOR_LVDS;
8048         case SIGNAL_TYPE_RGB:
8049                 return DRM_MODE_CONNECTOR_VGA;
8050         case SIGNAL_TYPE_DISPLAY_PORT:
8051         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8052                 return DRM_MODE_CONNECTOR_DisplayPort;
8053         case SIGNAL_TYPE_DVI_DUAL_LINK:
8054         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8055                 return DRM_MODE_CONNECTOR_DVID;
8056         case SIGNAL_TYPE_VIRTUAL:
8057                 return DRM_MODE_CONNECTOR_VIRTUAL;
8058
8059         default:
8060                 return DRM_MODE_CONNECTOR_Unknown;
8061         }
8062 }
8063
8064 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8065 {
8066         struct drm_encoder *encoder;
8067
8068         /* There is only one encoder per connector */
8069         drm_connector_for_each_possible_encoder(connector, encoder)
8070                 return encoder;
8071
8072         return NULL;
8073 }
8074
8075 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8076 {
8077         struct drm_encoder *encoder;
8078         struct amdgpu_encoder *amdgpu_encoder;
8079
8080         encoder = amdgpu_dm_connector_to_encoder(connector);
8081
8082         if (encoder == NULL)
8083                 return;
8084
8085         amdgpu_encoder = to_amdgpu_encoder(encoder);
8086
8087         amdgpu_encoder->native_mode.clock = 0;
8088
8089         if (!list_empty(&connector->probed_modes)) {
8090                 struct drm_display_mode *preferred_mode = NULL;
8091
8092                 list_for_each_entry(preferred_mode,
8093                                     &connector->probed_modes,
8094                                     head) {
8095                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8096                                 amdgpu_encoder->native_mode = *preferred_mode;
8097
8098                         break;
8099                 }
8100
8101         }
8102 }
8103
8104 static struct drm_display_mode *
8105 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8106                              char *name,
8107                              int hdisplay, int vdisplay)
8108 {
8109         struct drm_device *dev = encoder->dev;
8110         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8111         struct drm_display_mode *mode = NULL;
8112         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8113
8114         mode = drm_mode_duplicate(dev, native_mode);
8115
8116         if (mode == NULL)
8117                 return NULL;
8118
8119         mode->hdisplay = hdisplay;
8120         mode->vdisplay = vdisplay;
8121         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8122         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8123
8124         return mode;
8125
8126 }
8127
8128 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8129                                                  struct drm_connector *connector)
8130 {
8131         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8132         struct drm_display_mode *mode = NULL;
8133         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8134         struct amdgpu_dm_connector *amdgpu_dm_connector =
8135                                 to_amdgpu_dm_connector(connector);
8136         int i;
8137         int n;
8138         struct mode_size {
8139                 char name[DRM_DISPLAY_MODE_LEN];
8140                 int w;
8141                 int h;
8142         } common_modes[] = {
8143                 {  "640x480",  640,  480},
8144                 {  "800x600",  800,  600},
8145                 { "1024x768", 1024,  768},
8146                 { "1280x720", 1280,  720},
8147                 { "1280x800", 1280,  800},
8148                 {"1280x1024", 1280, 1024},
8149                 { "1440x900", 1440,  900},
8150                 {"1680x1050", 1680, 1050},
8151                 {"1600x1200", 1600, 1200},
8152                 {"1920x1080", 1920, 1080},
8153                 {"1920x1200", 1920, 1200}
8154         };
8155
8156         n = ARRAY_SIZE(common_modes);
8157
8158         for (i = 0; i < n; i++) {
8159                 struct drm_display_mode *curmode = NULL;
8160                 bool mode_existed = false;
8161
8162                 if (common_modes[i].w > native_mode->hdisplay ||
8163                     common_modes[i].h > native_mode->vdisplay ||
8164                    (common_modes[i].w == native_mode->hdisplay &&
8165                     common_modes[i].h == native_mode->vdisplay))
8166                         continue;
8167
8168                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8169                         if (common_modes[i].w == curmode->hdisplay &&
8170                             common_modes[i].h == curmode->vdisplay) {
8171                                 mode_existed = true;
8172                                 break;
8173                         }
8174                 }
8175
8176                 if (mode_existed)
8177                         continue;
8178
8179                 mode = amdgpu_dm_create_common_mode(encoder,
8180                                 common_modes[i].name, common_modes[i].w,
8181                                 common_modes[i].h);
8182                 if (!mode)
8183                         continue;
8184
8185                 drm_mode_probed_add(connector, mode);
8186                 amdgpu_dm_connector->num_modes++;
8187         }
8188 }
8189
8190 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8191 {
8192         struct drm_encoder *encoder;
8193         struct amdgpu_encoder *amdgpu_encoder;
8194         const struct drm_display_mode *native_mode;
8195
8196         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8197             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8198                 return;
8199
8200         encoder = amdgpu_dm_connector_to_encoder(connector);
8201         if (!encoder)
8202                 return;
8203
8204         amdgpu_encoder = to_amdgpu_encoder(encoder);
8205
8206         native_mode = &amdgpu_encoder->native_mode;
8207         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8208                 return;
8209
8210         drm_connector_set_panel_orientation_with_quirk(connector,
8211                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8212                                                        native_mode->hdisplay,
8213                                                        native_mode->vdisplay);
8214 }
8215
8216 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8217                                               struct edid *edid)
8218 {
8219         struct amdgpu_dm_connector *amdgpu_dm_connector =
8220                         to_amdgpu_dm_connector(connector);
8221
8222         if (edid) {
8223                 /* empty probed_modes */
8224                 INIT_LIST_HEAD(&connector->probed_modes);
8225                 amdgpu_dm_connector->num_modes =
8226                                 drm_add_edid_modes(connector, edid);
8227
8228                 /* sorting the probed modes before calling function
8229                  * amdgpu_dm_get_native_mode() since EDID can have
8230                  * more than one preferred mode. The modes that are
8231                  * later in the probed mode list could be of higher
8232                  * and preferred resolution. For example, 3840x2160
8233                  * resolution in base EDID preferred timing and 4096x2160
8234                  * preferred resolution in DID extension block later.
8235                  */
8236                 drm_mode_sort(&connector->probed_modes);
8237                 amdgpu_dm_get_native_mode(connector);
8238
8239                 /* Freesync capabilities are reset by calling
8240                  * drm_add_edid_modes() and need to be
8241                  * restored here.
8242                  */
8243                 amdgpu_dm_update_freesync_caps(connector, edid);
8244
8245                 amdgpu_set_panel_orientation(connector);
8246         } else {
8247                 amdgpu_dm_connector->num_modes = 0;
8248         }
8249 }
8250
8251 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8252                               struct drm_display_mode *mode)
8253 {
8254         struct drm_display_mode *m;
8255
8256         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8257                 if (drm_mode_equal(m, mode))
8258                         return true;
8259         }
8260
8261         return false;
8262 }
8263
8264 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8265 {
8266         const struct drm_display_mode *m;
8267         struct drm_display_mode *new_mode;
8268         uint i;
8269         uint32_t new_modes_count = 0;
8270
8271         /* Standard FPS values
8272          *
8273          * 23.976       - TV/NTSC
8274          * 24           - Cinema
8275          * 25           - TV/PAL
8276          * 29.97        - TV/NTSC
8277          * 30           - TV/NTSC
8278          * 48           - Cinema HFR
8279          * 50           - TV/PAL
8280          * 60           - Commonly used
8281          * 48,72,96,120 - Multiples of 24
8282          */
8283         static const uint32_t common_rates[] = {
8284                 23976, 24000, 25000, 29970, 30000,
8285                 48000, 50000, 60000, 72000, 96000, 120000
8286         };
8287
8288         /*
8289          * Find mode with highest refresh rate with the same resolution
8290          * as the preferred mode. Some monitors report a preferred mode
8291          * with lower resolution than the highest refresh rate supported.
8292          */
8293
8294         m = get_highest_refresh_rate_mode(aconnector, true);
8295         if (!m)
8296                 return 0;
8297
8298         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8299                 uint64_t target_vtotal, target_vtotal_diff;
8300                 uint64_t num, den;
8301
8302                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8303                         continue;
8304
8305                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8306                     common_rates[i] > aconnector->max_vfreq * 1000)
8307                         continue;
8308
8309                 num = (unsigned long long)m->clock * 1000 * 1000;
8310                 den = common_rates[i] * (unsigned long long)m->htotal;
8311                 target_vtotal = div_u64(num, den);
8312                 target_vtotal_diff = target_vtotal - m->vtotal;
8313
8314                 /* Check for illegal modes */
8315                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8316                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8317                     m->vtotal + target_vtotal_diff < m->vsync_end)
8318                         continue;
8319
8320                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8321                 if (!new_mode)
8322                         goto out;
8323
8324                 new_mode->vtotal += (u16)target_vtotal_diff;
8325                 new_mode->vsync_start += (u16)target_vtotal_diff;
8326                 new_mode->vsync_end += (u16)target_vtotal_diff;
8327                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8328                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8329
8330                 if (!is_duplicate_mode(aconnector, new_mode)) {
8331                         drm_mode_probed_add(&aconnector->base, new_mode);
8332                         new_modes_count += 1;
8333                 } else
8334                         drm_mode_destroy(aconnector->base.dev, new_mode);
8335         }
8336  out:
8337         return new_modes_count;
8338 }
8339
8340 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8341                                                    struct edid *edid)
8342 {
8343         struct amdgpu_dm_connector *amdgpu_dm_connector =
8344                 to_amdgpu_dm_connector(connector);
8345
8346         if (!edid)
8347                 return;
8348
8349         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8350                 amdgpu_dm_connector->num_modes +=
8351                         add_fs_modes(amdgpu_dm_connector);
8352 }
8353
8354 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8355 {
8356         struct amdgpu_dm_connector *amdgpu_dm_connector =
8357                         to_amdgpu_dm_connector(connector);
8358         struct drm_encoder *encoder;
8359         struct edid *edid = amdgpu_dm_connector->edid;
8360
8361         encoder = amdgpu_dm_connector_to_encoder(connector);
8362
8363         if (!drm_edid_is_valid(edid)) {
8364                 amdgpu_dm_connector->num_modes =
8365                                 drm_add_modes_noedid(connector, 640, 480);
8366         } else {
8367                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8368                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8369                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8370         }
8371         amdgpu_dm_fbc_init(connector);
8372
8373         return amdgpu_dm_connector->num_modes;
8374 }
8375
8376 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8377                                      struct amdgpu_dm_connector *aconnector,
8378                                      int connector_type,
8379                                      struct dc_link *link,
8380                                      int link_index)
8381 {
8382         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8383
8384         /*
8385          * Some of the properties below require access to state, like bpc.
8386          * Allocate some default initial connector state with our reset helper.
8387          */
8388         if (aconnector->base.funcs->reset)
8389                 aconnector->base.funcs->reset(&aconnector->base);
8390
8391         aconnector->connector_id = link_index;
8392         aconnector->dc_link = link;
8393         aconnector->base.interlace_allowed = false;
8394         aconnector->base.doublescan_allowed = false;
8395         aconnector->base.stereo_allowed = false;
8396         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8397         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8398         aconnector->audio_inst = -1;
8399         mutex_init(&aconnector->hpd_lock);
8400
8401         /*
8402          * configure support HPD hot plug connector_>polled default value is 0
8403          * which means HPD hot plug not supported
8404          */
8405         switch (connector_type) {
8406         case DRM_MODE_CONNECTOR_HDMIA:
8407                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8408                 aconnector->base.ycbcr_420_allowed =
8409                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8410                 break;
8411         case DRM_MODE_CONNECTOR_DisplayPort:
8412                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8413                 link->link_enc = link_enc_cfg_get_link_enc(link);
8414                 ASSERT(link->link_enc);
8415                 if (link->link_enc)
8416                         aconnector->base.ycbcr_420_allowed =
8417                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8418                 break;
8419         case DRM_MODE_CONNECTOR_DVID:
8420                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8421                 break;
8422         default:
8423                 break;
8424         }
8425
8426         drm_object_attach_property(&aconnector->base.base,
8427                                 dm->ddev->mode_config.scaling_mode_property,
8428                                 DRM_MODE_SCALE_NONE);
8429
8430         drm_object_attach_property(&aconnector->base.base,
8431                                 adev->mode_info.underscan_property,
8432                                 UNDERSCAN_OFF);
8433         drm_object_attach_property(&aconnector->base.base,
8434                                 adev->mode_info.underscan_hborder_property,
8435                                 0);
8436         drm_object_attach_property(&aconnector->base.base,
8437                                 adev->mode_info.underscan_vborder_property,
8438                                 0);
8439
8440         if (!aconnector->mst_port)
8441                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8442
8443         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8444         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8445         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8446
8447         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8448             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8449                 drm_object_attach_property(&aconnector->base.base,
8450                                 adev->mode_info.abm_level_property, 0);
8451         }
8452
8453         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8454             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8455             connector_type == DRM_MODE_CONNECTOR_eDP) {
8456                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8457
8458                 if (!aconnector->mst_port)
8459                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8460
8461 #ifdef CONFIG_DRM_AMD_DC_HDCP
8462                 if (adev->dm.hdcp_workqueue)
8463                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8464 #endif
8465         }
8466 }
8467
8468 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8469                               struct i2c_msg *msgs, int num)
8470 {
8471         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8472         struct ddc_service *ddc_service = i2c->ddc_service;
8473         struct i2c_command cmd;
8474         int i;
8475         int result = -EIO;
8476
8477         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8478
8479         if (!cmd.payloads)
8480                 return result;
8481
8482         cmd.number_of_payloads = num;
8483         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8484         cmd.speed = 100;
8485
8486         for (i = 0; i < num; i++) {
8487                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8488                 cmd.payloads[i].address = msgs[i].addr;
8489                 cmd.payloads[i].length = msgs[i].len;
8490                 cmd.payloads[i].data = msgs[i].buf;
8491         }
8492
8493         if (dc_submit_i2c(
8494                         ddc_service->ctx->dc,
8495                         ddc_service->ddc_pin->hw_info.ddc_channel,
8496                         &cmd))
8497                 result = num;
8498
8499         kfree(cmd.payloads);
8500         return result;
8501 }
8502
8503 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8504 {
8505         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8506 }
8507
8508 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8509         .master_xfer = amdgpu_dm_i2c_xfer,
8510         .functionality = amdgpu_dm_i2c_func,
8511 };
8512
8513 static struct amdgpu_i2c_adapter *
8514 create_i2c(struct ddc_service *ddc_service,
8515            int link_index,
8516            int *res)
8517 {
8518         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8519         struct amdgpu_i2c_adapter *i2c;
8520
8521         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8522         if (!i2c)
8523                 return NULL;
8524         i2c->base.owner = THIS_MODULE;
8525         i2c->base.class = I2C_CLASS_DDC;
8526         i2c->base.dev.parent = &adev->pdev->dev;
8527         i2c->base.algo = &amdgpu_dm_i2c_algo;
8528         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8529         i2c_set_adapdata(&i2c->base, i2c);
8530         i2c->ddc_service = ddc_service;
8531         if (i2c->ddc_service->ddc_pin)
8532                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8533
8534         return i2c;
8535 }
8536
8537
8538 /*
8539  * Note: this function assumes that dc_link_detect() was called for the
8540  * dc_link which will be represented by this aconnector.
8541  */
8542 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8543                                     struct amdgpu_dm_connector *aconnector,
8544                                     uint32_t link_index,
8545                                     struct amdgpu_encoder *aencoder)
8546 {
8547         int res = 0;
8548         int connector_type;
8549         struct dc *dc = dm->dc;
8550         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8551         struct amdgpu_i2c_adapter *i2c;
8552
8553         link->priv = aconnector;
8554
8555         DRM_DEBUG_DRIVER("%s()\n", __func__);
8556
8557         i2c = create_i2c(link->ddc, link->link_index, &res);
8558         if (!i2c) {
8559                 DRM_ERROR("Failed to create i2c adapter data\n");
8560                 return -ENOMEM;
8561         }
8562
8563         aconnector->i2c = i2c;
8564         res = i2c_add_adapter(&i2c->base);
8565
8566         if (res) {
8567                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8568                 goto out_free;
8569         }
8570
8571         connector_type = to_drm_connector_type(link->connector_signal);
8572
8573         res = drm_connector_init_with_ddc(
8574                         dm->ddev,
8575                         &aconnector->base,
8576                         &amdgpu_dm_connector_funcs,
8577                         connector_type,
8578                         &i2c->base);
8579
8580         if (res) {
8581                 DRM_ERROR("connector_init failed\n");
8582                 aconnector->connector_id = -1;
8583                 goto out_free;
8584         }
8585
8586         drm_connector_helper_add(
8587                         &aconnector->base,
8588                         &amdgpu_dm_connector_helper_funcs);
8589
8590         amdgpu_dm_connector_init_helper(
8591                 dm,
8592                 aconnector,
8593                 connector_type,
8594                 link,
8595                 link_index);
8596
8597         drm_connector_attach_encoder(
8598                 &aconnector->base, &aencoder->base);
8599
8600         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8601                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8602                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8603
8604 out_free:
8605         if (res) {
8606                 kfree(i2c);
8607                 aconnector->i2c = NULL;
8608         }
8609         return res;
8610 }
8611
8612 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8613 {
8614         switch (adev->mode_info.num_crtc) {
8615         case 1:
8616                 return 0x1;
8617         case 2:
8618                 return 0x3;
8619         case 3:
8620                 return 0x7;
8621         case 4:
8622                 return 0xf;
8623         case 5:
8624                 return 0x1f;
8625         case 6:
8626         default:
8627                 return 0x3f;
8628         }
8629 }
8630
8631 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8632                                   struct amdgpu_encoder *aencoder,
8633                                   uint32_t link_index)
8634 {
8635         struct amdgpu_device *adev = drm_to_adev(dev);
8636
8637         int res = drm_encoder_init(dev,
8638                                    &aencoder->base,
8639                                    &amdgpu_dm_encoder_funcs,
8640                                    DRM_MODE_ENCODER_TMDS,
8641                                    NULL);
8642
8643         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8644
8645         if (!res)
8646                 aencoder->encoder_id = link_index;
8647         else
8648                 aencoder->encoder_id = -1;
8649
8650         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8651
8652         return res;
8653 }
8654
8655 static void manage_dm_interrupts(struct amdgpu_device *adev,
8656                                  struct amdgpu_crtc *acrtc,
8657                                  bool enable)
8658 {
8659         /*
8660          * We have no guarantee that the frontend index maps to the same
8661          * backend index - some even map to more than one.
8662          *
8663          * TODO: Use a different interrupt or check DC itself for the mapping.
8664          */
8665         int irq_type =
8666                 amdgpu_display_crtc_idx_to_irq_type(
8667                         adev,
8668                         acrtc->crtc_id);
8669
8670         if (enable) {
8671                 drm_crtc_vblank_on(&acrtc->base);
8672                 amdgpu_irq_get(
8673                         adev,
8674                         &adev->pageflip_irq,
8675                         irq_type);
8676 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8677                 amdgpu_irq_get(
8678                         adev,
8679                         &adev->vline0_irq,
8680                         irq_type);
8681 #endif
8682         } else {
8683 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8684                 amdgpu_irq_put(
8685                         adev,
8686                         &adev->vline0_irq,
8687                         irq_type);
8688 #endif
8689                 amdgpu_irq_put(
8690                         adev,
8691                         &adev->pageflip_irq,
8692                         irq_type);
8693                 drm_crtc_vblank_off(&acrtc->base);
8694         }
8695 }
8696
8697 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8698                                       struct amdgpu_crtc *acrtc)
8699 {
8700         int irq_type =
8701                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8702
8703         /**
8704          * This reads the current state for the IRQ and force reapplies
8705          * the setting to hardware.
8706          */
8707         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8708 }
8709
8710 static bool
8711 is_scaling_state_different(const struct dm_connector_state *dm_state,
8712                            const struct dm_connector_state *old_dm_state)
8713 {
8714         if (dm_state->scaling != old_dm_state->scaling)
8715                 return true;
8716         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8717                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8718                         return true;
8719         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8720                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8721                         return true;
8722         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8723                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8724                 return true;
8725         return false;
8726 }
8727
8728 #ifdef CONFIG_DRM_AMD_DC_HDCP
8729 static bool is_content_protection_different(struct drm_connector_state *state,
8730                                             const struct drm_connector_state *old_state,
8731                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8732 {
8733         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8734         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8735
8736         /* Handle: Type0/1 change */
8737         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8738             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8739                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8740                 return true;
8741         }
8742
8743         /* CP is being re enabled, ignore this
8744          *
8745          * Handles:     ENABLED -> DESIRED
8746          */
8747         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8748             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8749                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8750                 return false;
8751         }
8752
8753         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8754          *
8755          * Handles:     UNDESIRED -> ENABLED
8756          */
8757         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8758             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8759                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8760
8761         /* Stream removed and re-enabled
8762          *
8763          * Can sometimes overlap with the HPD case,
8764          * thus set update_hdcp to false to avoid
8765          * setting HDCP multiple times.
8766          *
8767          * Handles:     DESIRED -> DESIRED (Special case)
8768          */
8769         if (!(old_state->crtc && old_state->crtc->enabled) &&
8770                 state->crtc && state->crtc->enabled &&
8771                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8772                 dm_con_state->update_hdcp = false;
8773                 return true;
8774         }
8775
8776         /* Hot-plug, headless s3, dpms
8777          *
8778          * Only start HDCP if the display is connected/enabled.
8779          * update_hdcp flag will be set to false until the next
8780          * HPD comes in.
8781          *
8782          * Handles:     DESIRED -> DESIRED (Special case)
8783          */
8784         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8785             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8786                 dm_con_state->update_hdcp = false;
8787                 return true;
8788         }
8789
8790         /*
8791          * Handles:     UNDESIRED -> UNDESIRED
8792          *              DESIRED -> DESIRED
8793          *              ENABLED -> ENABLED
8794          */
8795         if (old_state->content_protection == state->content_protection)
8796                 return false;
8797
8798         /*
8799          * Handles:     UNDESIRED -> DESIRED
8800          *              DESIRED -> UNDESIRED
8801          *              ENABLED -> UNDESIRED
8802          */
8803         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8804                 return true;
8805
8806         /*
8807          * Handles:     DESIRED -> ENABLED
8808          */
8809         return false;
8810 }
8811
8812 #endif
8813 static void remove_stream(struct amdgpu_device *adev,
8814                           struct amdgpu_crtc *acrtc,
8815                           struct dc_stream_state *stream)
8816 {
8817         /* this is the update mode case */
8818
8819         acrtc->otg_inst = -1;
8820         acrtc->enabled = false;
8821 }
8822
8823 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8824                                struct dc_cursor_position *position)
8825 {
8826         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8827         int x, y;
8828         int xorigin = 0, yorigin = 0;
8829
8830         if (!crtc || !plane->state->fb)
8831                 return 0;
8832
8833         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8834             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8835                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8836                           __func__,
8837                           plane->state->crtc_w,
8838                           plane->state->crtc_h);
8839                 return -EINVAL;
8840         }
8841
8842         x = plane->state->crtc_x;
8843         y = plane->state->crtc_y;
8844
8845         if (x <= -amdgpu_crtc->max_cursor_width ||
8846             y <= -amdgpu_crtc->max_cursor_height)
8847                 return 0;
8848
8849         if (x < 0) {
8850                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8851                 x = 0;
8852         }
8853         if (y < 0) {
8854                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8855                 y = 0;
8856         }
8857         position->enable = true;
8858         position->translate_by_source = true;
8859         position->x = x;
8860         position->y = y;
8861         position->x_hotspot = xorigin;
8862         position->y_hotspot = yorigin;
8863
8864         return 0;
8865 }
8866
8867 static void handle_cursor_update(struct drm_plane *plane,
8868                                  struct drm_plane_state *old_plane_state)
8869 {
8870         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8871         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8872         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8873         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8874         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8875         uint64_t address = afb ? afb->address : 0;
8876         struct dc_cursor_position position = {0};
8877         struct dc_cursor_attributes attributes;
8878         int ret;
8879
8880         if (!plane->state->fb && !old_plane_state->fb)
8881                 return;
8882
8883         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8884                       __func__,
8885                       amdgpu_crtc->crtc_id,
8886                       plane->state->crtc_w,
8887                       plane->state->crtc_h);
8888
8889         ret = get_cursor_position(plane, crtc, &position);
8890         if (ret)
8891                 return;
8892
8893         if (!position.enable) {
8894                 /* turn off cursor */
8895                 if (crtc_state && crtc_state->stream) {
8896                         mutex_lock(&adev->dm.dc_lock);
8897                         dc_stream_set_cursor_position(crtc_state->stream,
8898                                                       &position);
8899                         mutex_unlock(&adev->dm.dc_lock);
8900                 }
8901                 return;
8902         }
8903
8904         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8905         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8906
8907         memset(&attributes, 0, sizeof(attributes));
8908         attributes.address.high_part = upper_32_bits(address);
8909         attributes.address.low_part  = lower_32_bits(address);
8910         attributes.width             = plane->state->crtc_w;
8911         attributes.height            = plane->state->crtc_h;
8912         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8913         attributes.rotation_angle    = 0;
8914         attributes.attribute_flags.value = 0;
8915
8916         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8917
8918         if (crtc_state->stream) {
8919                 mutex_lock(&adev->dm.dc_lock);
8920                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8921                                                          &attributes))
8922                         DRM_ERROR("DC failed to set cursor attributes\n");
8923
8924                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8925                                                    &position))
8926                         DRM_ERROR("DC failed to set cursor position\n");
8927                 mutex_unlock(&adev->dm.dc_lock);
8928         }
8929 }
8930
8931 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8932 {
8933
8934         assert_spin_locked(&acrtc->base.dev->event_lock);
8935         WARN_ON(acrtc->event);
8936
8937         acrtc->event = acrtc->base.state->event;
8938
8939         /* Set the flip status */
8940         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8941
8942         /* Mark this event as consumed */
8943         acrtc->base.state->event = NULL;
8944
8945         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8946                      acrtc->crtc_id);
8947 }
8948
8949 static void update_freesync_state_on_stream(
8950         struct amdgpu_display_manager *dm,
8951         struct dm_crtc_state *new_crtc_state,
8952         struct dc_stream_state *new_stream,
8953         struct dc_plane_state *surface,
8954         u32 flip_timestamp_in_us)
8955 {
8956         struct mod_vrr_params vrr_params;
8957         struct dc_info_packet vrr_infopacket = {0};
8958         struct amdgpu_device *adev = dm->adev;
8959         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8960         unsigned long flags;
8961         bool pack_sdp_v1_3 = false;
8962
8963         if (!new_stream)
8964                 return;
8965
8966         /*
8967          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8968          * For now it's sufficient to just guard against these conditions.
8969          */
8970
8971         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8972                 return;
8973
8974         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8975         vrr_params = acrtc->dm_irq_params.vrr_params;
8976
8977         if (surface) {
8978                 mod_freesync_handle_preflip(
8979                         dm->freesync_module,
8980                         surface,
8981                         new_stream,
8982                         flip_timestamp_in_us,
8983                         &vrr_params);
8984
8985                 if (adev->family < AMDGPU_FAMILY_AI &&
8986                     amdgpu_dm_vrr_active(new_crtc_state)) {
8987                         mod_freesync_handle_v_update(dm->freesync_module,
8988                                                      new_stream, &vrr_params);
8989
8990                         /* Need to call this before the frame ends. */
8991                         dc_stream_adjust_vmin_vmax(dm->dc,
8992                                                    new_crtc_state->stream,
8993                                                    &vrr_params.adjust);
8994                 }
8995         }
8996
8997         mod_freesync_build_vrr_infopacket(
8998                 dm->freesync_module,
8999                 new_stream,
9000                 &vrr_params,
9001                 PACKET_TYPE_VRR,
9002                 TRANSFER_FUNC_UNKNOWN,
9003                 &vrr_infopacket,
9004                 pack_sdp_v1_3);
9005
9006         new_crtc_state->freesync_timing_changed |=
9007                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9008                         &vrr_params.adjust,
9009                         sizeof(vrr_params.adjust)) != 0);
9010
9011         new_crtc_state->freesync_vrr_info_changed |=
9012                 (memcmp(&new_crtc_state->vrr_infopacket,
9013                         &vrr_infopacket,
9014                         sizeof(vrr_infopacket)) != 0);
9015
9016         acrtc->dm_irq_params.vrr_params = vrr_params;
9017         new_crtc_state->vrr_infopacket = vrr_infopacket;
9018
9019         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9020         new_stream->vrr_infopacket = vrr_infopacket;
9021
9022         if (new_crtc_state->freesync_vrr_info_changed)
9023                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9024                               new_crtc_state->base.crtc->base.id,
9025                               (int)new_crtc_state->base.vrr_enabled,
9026                               (int)vrr_params.state);
9027
9028         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9029 }
9030
9031 static void update_stream_irq_parameters(
9032         struct amdgpu_display_manager *dm,
9033         struct dm_crtc_state *new_crtc_state)
9034 {
9035         struct dc_stream_state *new_stream = new_crtc_state->stream;
9036         struct mod_vrr_params vrr_params;
9037         struct mod_freesync_config config = new_crtc_state->freesync_config;
9038         struct amdgpu_device *adev = dm->adev;
9039         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9040         unsigned long flags;
9041
9042         if (!new_stream)
9043                 return;
9044
9045         /*
9046          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9047          * For now it's sufficient to just guard against these conditions.
9048          */
9049         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9050                 return;
9051
9052         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9053         vrr_params = acrtc->dm_irq_params.vrr_params;
9054
9055         if (new_crtc_state->vrr_supported &&
9056             config.min_refresh_in_uhz &&
9057             config.max_refresh_in_uhz) {
9058                 /*
9059                  * if freesync compatible mode was set, config.state will be set
9060                  * in atomic check
9061                  */
9062                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9063                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9064                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9065                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9066                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9067                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9068                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9069                 } else {
9070                         config.state = new_crtc_state->base.vrr_enabled ?
9071                                                      VRR_STATE_ACTIVE_VARIABLE :
9072                                                      VRR_STATE_INACTIVE;
9073                 }
9074         } else {
9075                 config.state = VRR_STATE_UNSUPPORTED;
9076         }
9077
9078         mod_freesync_build_vrr_params(dm->freesync_module,
9079                                       new_stream,
9080                                       &config, &vrr_params);
9081
9082         new_crtc_state->freesync_timing_changed |=
9083                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9084                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9085
9086         new_crtc_state->freesync_config = config;
9087         /* Copy state for access from DM IRQ handler */
9088         acrtc->dm_irq_params.freesync_config = config;
9089         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9090         acrtc->dm_irq_params.vrr_params = vrr_params;
9091         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9092 }
9093
9094 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9095                                             struct dm_crtc_state *new_state)
9096 {
9097         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9098         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9099
9100         if (!old_vrr_active && new_vrr_active) {
9101                 /* Transition VRR inactive -> active:
9102                  * While VRR is active, we must not disable vblank irq, as a
9103                  * reenable after disable would compute bogus vblank/pflip
9104                  * timestamps if it likely happened inside display front-porch.
9105                  *
9106                  * We also need vupdate irq for the actual core vblank handling
9107                  * at end of vblank.
9108                  */
9109                 dm_set_vupdate_irq(new_state->base.crtc, true);
9110                 drm_crtc_vblank_get(new_state->base.crtc);
9111                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9112                                  __func__, new_state->base.crtc->base.id);
9113         } else if (old_vrr_active && !new_vrr_active) {
9114                 /* Transition VRR active -> inactive:
9115                  * Allow vblank irq disable again for fixed refresh rate.
9116                  */
9117                 dm_set_vupdate_irq(new_state->base.crtc, false);
9118                 drm_crtc_vblank_put(new_state->base.crtc);
9119                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9120                                  __func__, new_state->base.crtc->base.id);
9121         }
9122 }
9123
9124 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9125 {
9126         struct drm_plane *plane;
9127         struct drm_plane_state *old_plane_state;
9128         int i;
9129
9130         /*
9131          * TODO: Make this per-stream so we don't issue redundant updates for
9132          * commits with multiple streams.
9133          */
9134         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9135                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9136                         handle_cursor_update(plane, old_plane_state);
9137 }
9138
9139 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9140                                     struct dc_state *dc_state,
9141                                     struct drm_device *dev,
9142                                     struct amdgpu_display_manager *dm,
9143                                     struct drm_crtc *pcrtc,
9144                                     bool wait_for_vblank)
9145 {
9146         uint32_t i;
9147         uint64_t timestamp_ns;
9148         struct drm_plane *plane;
9149         struct drm_plane_state *old_plane_state, *new_plane_state;
9150         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9151         struct drm_crtc_state *new_pcrtc_state =
9152                         drm_atomic_get_new_crtc_state(state, pcrtc);
9153         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9154         struct dm_crtc_state *dm_old_crtc_state =
9155                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9156         int planes_count = 0, vpos, hpos;
9157         long r;
9158         unsigned long flags;
9159         struct amdgpu_bo *abo;
9160         uint32_t target_vblank, last_flip_vblank;
9161         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9162         bool pflip_present = false;
9163         struct {
9164                 struct dc_surface_update surface_updates[MAX_SURFACES];
9165                 struct dc_plane_info plane_infos[MAX_SURFACES];
9166                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9167                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9168                 struct dc_stream_update stream_update;
9169         } *bundle;
9170
9171         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9172
9173         if (!bundle) {
9174                 dm_error("Failed to allocate update bundle\n");
9175                 goto cleanup;
9176         }
9177
9178         /*
9179          * Disable the cursor first if we're disabling all the planes.
9180          * It'll remain on the screen after the planes are re-enabled
9181          * if we don't.
9182          */
9183         if (acrtc_state->active_planes == 0)
9184                 amdgpu_dm_commit_cursors(state);
9185
9186         /* update planes when needed */
9187         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9188                 struct drm_crtc *crtc = new_plane_state->crtc;
9189                 struct drm_crtc_state *new_crtc_state;
9190                 struct drm_framebuffer *fb = new_plane_state->fb;
9191                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9192                 bool plane_needs_flip;
9193                 struct dc_plane_state *dc_plane;
9194                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9195
9196                 /* Cursor plane is handled after stream updates */
9197                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9198                         continue;
9199
9200                 if (!fb || !crtc || pcrtc != crtc)
9201                         continue;
9202
9203                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9204                 if (!new_crtc_state->active)
9205                         continue;
9206
9207                 dc_plane = dm_new_plane_state->dc_state;
9208
9209                 bundle->surface_updates[planes_count].surface = dc_plane;
9210                 if (new_pcrtc_state->color_mgmt_changed) {
9211                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9212                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9213                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9214                 }
9215
9216                 fill_dc_scaling_info(dm->adev, new_plane_state,
9217                                      &bundle->scaling_infos[planes_count]);
9218
9219                 bundle->surface_updates[planes_count].scaling_info =
9220                         &bundle->scaling_infos[planes_count];
9221
9222                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9223
9224                 pflip_present = pflip_present || plane_needs_flip;
9225
9226                 if (!plane_needs_flip) {
9227                         planes_count += 1;
9228                         continue;
9229                 }
9230
9231                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9232
9233                 /*
9234                  * Wait for all fences on this FB. Do limited wait to avoid
9235                  * deadlock during GPU reset when this fence will not signal
9236                  * but we hold reservation lock for the BO.
9237                  */
9238                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9239                                           msecs_to_jiffies(5000));
9240                 if (unlikely(r <= 0))
9241                         DRM_ERROR("Waiting for fences timed out!");
9242
9243                 fill_dc_plane_info_and_addr(
9244                         dm->adev, new_plane_state,
9245                         afb->tiling_flags,
9246                         &bundle->plane_infos[planes_count],
9247                         &bundle->flip_addrs[planes_count].address,
9248                         afb->tmz_surface, false);
9249
9250                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9251                                  new_plane_state->plane->index,
9252                                  bundle->plane_infos[planes_count].dcc.enable);
9253
9254                 bundle->surface_updates[planes_count].plane_info =
9255                         &bundle->plane_infos[planes_count];
9256
9257                 /*
9258                  * Only allow immediate flips for fast updates that don't
9259                  * change FB pitch, DCC state, rotation or mirroing.
9260                  */
9261                 bundle->flip_addrs[planes_count].flip_immediate =
9262                         crtc->state->async_flip &&
9263                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9264
9265                 timestamp_ns = ktime_get_ns();
9266                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9267                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9268                 bundle->surface_updates[planes_count].surface = dc_plane;
9269
9270                 if (!bundle->surface_updates[planes_count].surface) {
9271                         DRM_ERROR("No surface for CRTC: id=%d\n",
9272                                         acrtc_attach->crtc_id);
9273                         continue;
9274                 }
9275
9276                 if (plane == pcrtc->primary)
9277                         update_freesync_state_on_stream(
9278                                 dm,
9279                                 acrtc_state,
9280                                 acrtc_state->stream,
9281                                 dc_plane,
9282                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9283
9284                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9285                                  __func__,
9286                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9287                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9288
9289                 planes_count += 1;
9290
9291         }
9292
9293         if (pflip_present) {
9294                 if (!vrr_active) {
9295                         /* Use old throttling in non-vrr fixed refresh rate mode
9296                          * to keep flip scheduling based on target vblank counts
9297                          * working in a backwards compatible way, e.g., for
9298                          * clients using the GLX_OML_sync_control extension or
9299                          * DRI3/Present extension with defined target_msc.
9300                          */
9301                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9302                 }
9303                 else {
9304                         /* For variable refresh rate mode only:
9305                          * Get vblank of last completed flip to avoid > 1 vrr
9306                          * flips per video frame by use of throttling, but allow
9307                          * flip programming anywhere in the possibly large
9308                          * variable vrr vblank interval for fine-grained flip
9309                          * timing control and more opportunity to avoid stutter
9310                          * on late submission of flips.
9311                          */
9312                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9313                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9314                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9315                 }
9316
9317                 target_vblank = last_flip_vblank + wait_for_vblank;
9318
9319                 /*
9320                  * Wait until we're out of the vertical blank period before the one
9321                  * targeted by the flip
9322                  */
9323                 while ((acrtc_attach->enabled &&
9324                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9325                                                             0, &vpos, &hpos, NULL,
9326                                                             NULL, &pcrtc->hwmode)
9327                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9328                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9329                         (int)(target_vblank -
9330                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9331                         usleep_range(1000, 1100);
9332                 }
9333
9334                 /**
9335                  * Prepare the flip event for the pageflip interrupt to handle.
9336                  *
9337                  * This only works in the case where we've already turned on the
9338                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9339                  * from 0 -> n planes we have to skip a hardware generated event
9340                  * and rely on sending it from software.
9341                  */
9342                 if (acrtc_attach->base.state->event &&
9343                     acrtc_state->active_planes > 0 &&
9344                     !acrtc_state->force_dpms_off) {
9345                         drm_crtc_vblank_get(pcrtc);
9346
9347                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9348
9349                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9350                         prepare_flip_isr(acrtc_attach);
9351
9352                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9353                 }
9354
9355                 if (acrtc_state->stream) {
9356                         if (acrtc_state->freesync_vrr_info_changed)
9357                                 bundle->stream_update.vrr_infopacket =
9358                                         &acrtc_state->stream->vrr_infopacket;
9359                 }
9360         }
9361
9362         /* Update the planes if changed or disable if we don't have any. */
9363         if ((planes_count || acrtc_state->active_planes == 0) &&
9364                 acrtc_state->stream) {
9365 #if defined(CONFIG_DRM_AMD_DC_DCN)
9366                 /*
9367                  * If PSR or idle optimizations are enabled then flush out
9368                  * any pending work before hardware programming.
9369                  */
9370                 if (dm->vblank_control_workqueue)
9371                         flush_workqueue(dm->vblank_control_workqueue);
9372 #endif
9373
9374                 bundle->stream_update.stream = acrtc_state->stream;
9375                 if (new_pcrtc_state->mode_changed) {
9376                         bundle->stream_update.src = acrtc_state->stream->src;
9377                         bundle->stream_update.dst = acrtc_state->stream->dst;
9378                 }
9379
9380                 if (new_pcrtc_state->color_mgmt_changed) {
9381                         /*
9382                          * TODO: This isn't fully correct since we've actually
9383                          * already modified the stream in place.
9384                          */
9385                         bundle->stream_update.gamut_remap =
9386                                 &acrtc_state->stream->gamut_remap_matrix;
9387                         bundle->stream_update.output_csc_transform =
9388                                 &acrtc_state->stream->csc_color_matrix;
9389                         bundle->stream_update.out_transfer_func =
9390                                 acrtc_state->stream->out_transfer_func;
9391                 }
9392
9393                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9394                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9395                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9396
9397                 /*
9398                  * If FreeSync state on the stream has changed then we need to
9399                  * re-adjust the min/max bounds now that DC doesn't handle this
9400                  * as part of commit.
9401                  */
9402                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9403                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9404                         dc_stream_adjust_vmin_vmax(
9405                                 dm->dc, acrtc_state->stream,
9406                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9407                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9408                 }
9409                 mutex_lock(&dm->dc_lock);
9410                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9411                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9412                         amdgpu_dm_psr_disable(acrtc_state->stream);
9413
9414                 dc_commit_updates_for_stream(dm->dc,
9415                                                      bundle->surface_updates,
9416                                                      planes_count,
9417                                                      acrtc_state->stream,
9418                                                      &bundle->stream_update,
9419                                                      dc_state);
9420
9421                 /**
9422                  * Enable or disable the interrupts on the backend.
9423                  *
9424                  * Most pipes are put into power gating when unused.
9425                  *
9426                  * When power gating is enabled on a pipe we lose the
9427                  * interrupt enablement state when power gating is disabled.
9428                  *
9429                  * So we need to update the IRQ control state in hardware
9430                  * whenever the pipe turns on (since it could be previously
9431                  * power gated) or off (since some pipes can't be power gated
9432                  * on some ASICs).
9433                  */
9434                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9435                         dm_update_pflip_irq_state(drm_to_adev(dev),
9436                                                   acrtc_attach);
9437
9438                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9439                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9440                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9441                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9442
9443                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9444                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9445                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9446                         struct amdgpu_dm_connector *aconn =
9447                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9448
9449                         if (aconn->psr_skip_count > 0)
9450                                 aconn->psr_skip_count--;
9451
9452                         /* Allow PSR when skip count is 0. */
9453                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9454                 } else {
9455                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9456                 }
9457
9458                 mutex_unlock(&dm->dc_lock);
9459         }
9460
9461         /*
9462          * Update cursor state *after* programming all the planes.
9463          * This avoids redundant programming in the case where we're going
9464          * to be disabling a single plane - those pipes are being disabled.
9465          */
9466         if (acrtc_state->active_planes)
9467                 amdgpu_dm_commit_cursors(state);
9468
9469 cleanup:
9470         kfree(bundle);
9471 }
9472
9473 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9474                                    struct drm_atomic_state *state)
9475 {
9476         struct amdgpu_device *adev = drm_to_adev(dev);
9477         struct amdgpu_dm_connector *aconnector;
9478         struct drm_connector *connector;
9479         struct drm_connector_state *old_con_state, *new_con_state;
9480         struct drm_crtc_state *new_crtc_state;
9481         struct dm_crtc_state *new_dm_crtc_state;
9482         const struct dc_stream_status *status;
9483         int i, inst;
9484
9485         /* Notify device removals. */
9486         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9487                 if (old_con_state->crtc != new_con_state->crtc) {
9488                         /* CRTC changes require notification. */
9489                         goto notify;
9490                 }
9491
9492                 if (!new_con_state->crtc)
9493                         continue;
9494
9495                 new_crtc_state = drm_atomic_get_new_crtc_state(
9496                         state, new_con_state->crtc);
9497
9498                 if (!new_crtc_state)
9499                         continue;
9500
9501                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9502                         continue;
9503
9504         notify:
9505                 aconnector = to_amdgpu_dm_connector(connector);
9506
9507                 mutex_lock(&adev->dm.audio_lock);
9508                 inst = aconnector->audio_inst;
9509                 aconnector->audio_inst = -1;
9510                 mutex_unlock(&adev->dm.audio_lock);
9511
9512                 amdgpu_dm_audio_eld_notify(adev, inst);
9513         }
9514
9515         /* Notify audio device additions. */
9516         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9517                 if (!new_con_state->crtc)
9518                         continue;
9519
9520                 new_crtc_state = drm_atomic_get_new_crtc_state(
9521                         state, new_con_state->crtc);
9522
9523                 if (!new_crtc_state)
9524                         continue;
9525
9526                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9527                         continue;
9528
9529                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9530                 if (!new_dm_crtc_state->stream)
9531                         continue;
9532
9533                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9534                 if (!status)
9535                         continue;
9536
9537                 aconnector = to_amdgpu_dm_connector(connector);
9538
9539                 mutex_lock(&adev->dm.audio_lock);
9540                 inst = status->audio_inst;
9541                 aconnector->audio_inst = inst;
9542                 mutex_unlock(&adev->dm.audio_lock);
9543
9544                 amdgpu_dm_audio_eld_notify(adev, inst);
9545         }
9546 }
9547
9548 /*
9549  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9550  * @crtc_state: the DRM CRTC state
9551  * @stream_state: the DC stream state.
9552  *
9553  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9554  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9555  */
9556 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9557                                                 struct dc_stream_state *stream_state)
9558 {
9559         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9560 }
9561
9562 /**
9563  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9564  * @state: The atomic state to commit
9565  *
9566  * This will tell DC to commit the constructed DC state from atomic_check,
9567  * programming the hardware. Any failures here implies a hardware failure, since
9568  * atomic check should have filtered anything non-kosher.
9569  */
9570 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9571 {
9572         struct drm_device *dev = state->dev;
9573         struct amdgpu_device *adev = drm_to_adev(dev);
9574         struct amdgpu_display_manager *dm = &adev->dm;
9575         struct dm_atomic_state *dm_state;
9576         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9577         uint32_t i, j;
9578         struct drm_crtc *crtc;
9579         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9580         unsigned long flags;
9581         bool wait_for_vblank = true;
9582         struct drm_connector *connector;
9583         struct drm_connector_state *old_con_state, *new_con_state;
9584         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9585         int crtc_disable_count = 0;
9586         bool mode_set_reset_required = false;
9587
9588         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9589
9590         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9591
9592         dm_state = dm_atomic_get_new_state(state);
9593         if (dm_state && dm_state->context) {
9594                 dc_state = dm_state->context;
9595         } else {
9596                 /* No state changes, retain current state. */
9597                 dc_state_temp = dc_create_state(dm->dc);
9598                 ASSERT(dc_state_temp);
9599                 dc_state = dc_state_temp;
9600                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9601         }
9602
9603         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9604                                        new_crtc_state, i) {
9605                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9606
9607                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9608
9609                 if (old_crtc_state->active &&
9610                     (!new_crtc_state->active ||
9611                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9612                         manage_dm_interrupts(adev, acrtc, false);
9613                         dc_stream_release(dm_old_crtc_state->stream);
9614                 }
9615         }
9616
9617         drm_atomic_helper_calc_timestamping_constants(state);
9618
9619         /* update changed items */
9620         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9621                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9622
9623                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9624                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9625
9626                 DRM_DEBUG_ATOMIC(
9627                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9628                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9629                         "connectors_changed:%d\n",
9630                         acrtc->crtc_id,
9631                         new_crtc_state->enable,
9632                         new_crtc_state->active,
9633                         new_crtc_state->planes_changed,
9634                         new_crtc_state->mode_changed,
9635                         new_crtc_state->active_changed,
9636                         new_crtc_state->connectors_changed);
9637
9638                 /* Disable cursor if disabling crtc */
9639                 if (old_crtc_state->active && !new_crtc_state->active) {
9640                         struct dc_cursor_position position;
9641
9642                         memset(&position, 0, sizeof(position));
9643                         mutex_lock(&dm->dc_lock);
9644                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9645                         mutex_unlock(&dm->dc_lock);
9646                 }
9647
9648                 /* Copy all transient state flags into dc state */
9649                 if (dm_new_crtc_state->stream) {
9650                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9651                                                             dm_new_crtc_state->stream);
9652                 }
9653
9654                 /* handles headless hotplug case, updating new_state and
9655                  * aconnector as needed
9656                  */
9657
9658                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9659
9660                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9661
9662                         if (!dm_new_crtc_state->stream) {
9663                                 /*
9664                                  * this could happen because of issues with
9665                                  * userspace notifications delivery.
9666                                  * In this case userspace tries to set mode on
9667                                  * display which is disconnected in fact.
9668                                  * dc_sink is NULL in this case on aconnector.
9669                                  * We expect reset mode will come soon.
9670                                  *
9671                                  * This can also happen when unplug is done
9672                                  * during resume sequence ended
9673                                  *
9674                                  * In this case, we want to pretend we still
9675                                  * have a sink to keep the pipe running so that
9676                                  * hw state is consistent with the sw state
9677                                  */
9678                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9679                                                 __func__, acrtc->base.base.id);
9680                                 continue;
9681                         }
9682
9683                         if (dm_old_crtc_state->stream)
9684                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9685
9686                         pm_runtime_get_noresume(dev->dev);
9687
9688                         acrtc->enabled = true;
9689                         acrtc->hw_mode = new_crtc_state->mode;
9690                         crtc->hwmode = new_crtc_state->mode;
9691                         mode_set_reset_required = true;
9692                 } else if (modereset_required(new_crtc_state)) {
9693                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9694                         /* i.e. reset mode */
9695                         if (dm_old_crtc_state->stream)
9696                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9697
9698                         mode_set_reset_required = true;
9699                 }
9700         } /* for_each_crtc_in_state() */
9701
9702         if (dc_state) {
9703                 /* if there mode set or reset, disable eDP PSR */
9704                 if (mode_set_reset_required) {
9705 #if defined(CONFIG_DRM_AMD_DC_DCN)
9706                         if (dm->vblank_control_workqueue)
9707                                 flush_workqueue(dm->vblank_control_workqueue);
9708 #endif
9709                         amdgpu_dm_psr_disable_all(dm);
9710                 }
9711
9712                 dm_enable_per_frame_crtc_master_sync(dc_state);
9713                 mutex_lock(&dm->dc_lock);
9714                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9715 #if defined(CONFIG_DRM_AMD_DC_DCN)
9716                /* Allow idle optimization when vblank count is 0 for display off */
9717                if (dm->active_vblank_irq_count == 0)
9718                    dc_allow_idle_optimizations(dm->dc,true);
9719 #endif
9720                 mutex_unlock(&dm->dc_lock);
9721         }
9722
9723         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9724                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9725
9726                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9727
9728                 if (dm_new_crtc_state->stream != NULL) {
9729                         const struct dc_stream_status *status =
9730                                         dc_stream_get_status(dm_new_crtc_state->stream);
9731
9732                         if (!status)
9733                                 status = dc_stream_get_status_from_state(dc_state,
9734                                                                          dm_new_crtc_state->stream);
9735                         if (!status)
9736                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9737                         else
9738                                 acrtc->otg_inst = status->primary_otg_inst;
9739                 }
9740         }
9741 #ifdef CONFIG_DRM_AMD_DC_HDCP
9742         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9743                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9744                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9745                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9746
9747                 new_crtc_state = NULL;
9748
9749                 if (acrtc)
9750                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9751
9752                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9753
9754                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9755                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9756                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9757                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9758                         dm_new_con_state->update_hdcp = true;
9759                         continue;
9760                 }
9761
9762                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9763                         hdcp_update_display(
9764                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9765                                 new_con_state->hdcp_content_type,
9766                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9767         }
9768 #endif
9769
9770         /* Handle connector state changes */
9771         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9772                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9773                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9774                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9775                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9776                 struct dc_stream_update stream_update;
9777                 struct dc_info_packet hdr_packet;
9778                 struct dc_stream_status *status = NULL;
9779                 bool abm_changed, hdr_changed, scaling_changed;
9780
9781                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9782                 memset(&stream_update, 0, sizeof(stream_update));
9783
9784                 if (acrtc) {
9785                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9786                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9787                 }
9788
9789                 /* Skip any modesets/resets */
9790                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9791                         continue;
9792
9793                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9794                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9795
9796                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9797                                                              dm_old_con_state);
9798
9799                 abm_changed = dm_new_crtc_state->abm_level !=
9800                               dm_old_crtc_state->abm_level;
9801
9802                 hdr_changed =
9803                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9804
9805                 if (!scaling_changed && !abm_changed && !hdr_changed)
9806                         continue;
9807
9808                 stream_update.stream = dm_new_crtc_state->stream;
9809                 if (scaling_changed) {
9810                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9811                                         dm_new_con_state, dm_new_crtc_state->stream);
9812
9813                         stream_update.src = dm_new_crtc_state->stream->src;
9814                         stream_update.dst = dm_new_crtc_state->stream->dst;
9815                 }
9816
9817                 if (abm_changed) {
9818                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9819
9820                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9821                 }
9822
9823                 if (hdr_changed) {
9824                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9825                         stream_update.hdr_static_metadata = &hdr_packet;
9826                 }
9827
9828                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9829
9830                 if (WARN_ON(!status))
9831                         continue;
9832
9833                 WARN_ON(!status->plane_count);
9834
9835                 /*
9836                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9837                  * Here we create an empty update on each plane.
9838                  * To fix this, DC should permit updating only stream properties.
9839                  */
9840                 for (j = 0; j < status->plane_count; j++)
9841                         dummy_updates[j].surface = status->plane_states[0];
9842
9843
9844                 mutex_lock(&dm->dc_lock);
9845                 dc_commit_updates_for_stream(dm->dc,
9846                                                      dummy_updates,
9847                                                      status->plane_count,
9848                                                      dm_new_crtc_state->stream,
9849                                                      &stream_update,
9850                                                      dc_state);
9851                 mutex_unlock(&dm->dc_lock);
9852         }
9853
9854         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9855         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9856                                       new_crtc_state, i) {
9857                 if (old_crtc_state->active && !new_crtc_state->active)
9858                         crtc_disable_count++;
9859
9860                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9861                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9862
9863                 /* For freesync config update on crtc state and params for irq */
9864                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9865
9866                 /* Handle vrr on->off / off->on transitions */
9867                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9868                                                 dm_new_crtc_state);
9869         }
9870
9871         /**
9872          * Enable interrupts for CRTCs that are newly enabled or went through
9873          * a modeset. It was intentionally deferred until after the front end
9874          * state was modified to wait until the OTG was on and so the IRQ
9875          * handlers didn't access stale or invalid state.
9876          */
9877         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9878                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9879 #ifdef CONFIG_DEBUG_FS
9880                 bool configure_crc = false;
9881                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9882 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9883                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9884 #endif
9885                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9886                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9887                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9888 #endif
9889                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9890
9891                 if (new_crtc_state->active &&
9892                     (!old_crtc_state->active ||
9893                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9894                         dc_stream_retain(dm_new_crtc_state->stream);
9895                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9896                         manage_dm_interrupts(adev, acrtc, true);
9897
9898 #ifdef CONFIG_DEBUG_FS
9899                         /**
9900                          * Frontend may have changed so reapply the CRC capture
9901                          * settings for the stream.
9902                          */
9903                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9904
9905                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9906                                 configure_crc = true;
9907 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9908                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9909                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9910                                         acrtc->dm_irq_params.crc_window.update_win = true;
9911                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9912                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9913                                         crc_rd_wrk->crtc = crtc;
9914                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9915                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9916                                 }
9917 #endif
9918                         }
9919
9920                         if (configure_crc)
9921                                 if (amdgpu_dm_crtc_configure_crc_source(
9922                                         crtc, dm_new_crtc_state, cur_crc_src))
9923                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9924 #endif
9925                 }
9926         }
9927
9928         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9929                 if (new_crtc_state->async_flip)
9930                         wait_for_vblank = false;
9931
9932         /* update planes when needed per crtc*/
9933         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9934                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9935
9936                 if (dm_new_crtc_state->stream)
9937                         amdgpu_dm_commit_planes(state, dc_state, dev,
9938                                                 dm, crtc, wait_for_vblank);
9939         }
9940
9941         /* Update audio instances for each connector. */
9942         amdgpu_dm_commit_audio(dev, state);
9943
9944 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9945         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9946         /* restore the backlight level */
9947         for (i = 0; i < dm->num_of_edps; i++) {
9948                 if (dm->backlight_dev[i] &&
9949                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9950                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9951         }
9952 #endif
9953         /*
9954          * send vblank event on all events not handled in flip and
9955          * mark consumed event for drm_atomic_helper_commit_hw_done
9956          */
9957         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9958         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9959
9960                 if (new_crtc_state->event)
9961                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9962
9963                 new_crtc_state->event = NULL;
9964         }
9965         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9966
9967         /* Signal HW programming completion */
9968         drm_atomic_helper_commit_hw_done(state);
9969
9970         if (wait_for_vblank)
9971                 drm_atomic_helper_wait_for_flip_done(dev, state);
9972
9973         drm_atomic_helper_cleanup_planes(dev, state);
9974
9975         /* return the stolen vga memory back to VRAM */
9976         if (!adev->mman.keep_stolen_vga_memory)
9977                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9978         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9979
9980         /*
9981          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9982          * so we can put the GPU into runtime suspend if we're not driving any
9983          * displays anymore
9984          */
9985         for (i = 0; i < crtc_disable_count; i++)
9986                 pm_runtime_put_autosuspend(dev->dev);
9987         pm_runtime_mark_last_busy(dev->dev);
9988
9989         if (dc_state_temp)
9990                 dc_release_state(dc_state_temp);
9991 }
9992
9993
9994 static int dm_force_atomic_commit(struct drm_connector *connector)
9995 {
9996         int ret = 0;
9997         struct drm_device *ddev = connector->dev;
9998         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9999         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10000         struct drm_plane *plane = disconnected_acrtc->base.primary;
10001         struct drm_connector_state *conn_state;
10002         struct drm_crtc_state *crtc_state;
10003         struct drm_plane_state *plane_state;
10004
10005         if (!state)
10006                 return -ENOMEM;
10007
10008         state->acquire_ctx = ddev->mode_config.acquire_ctx;
10009
10010         /* Construct an atomic state to restore previous display setting */
10011
10012         /*
10013          * Attach connectors to drm_atomic_state
10014          */
10015         conn_state = drm_atomic_get_connector_state(state, connector);
10016
10017         ret = PTR_ERR_OR_ZERO(conn_state);
10018         if (ret)
10019                 goto out;
10020
10021         /* Attach crtc to drm_atomic_state*/
10022         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10023
10024         ret = PTR_ERR_OR_ZERO(crtc_state);
10025         if (ret)
10026                 goto out;
10027
10028         /* force a restore */
10029         crtc_state->mode_changed = true;
10030
10031         /* Attach plane to drm_atomic_state */
10032         plane_state = drm_atomic_get_plane_state(state, plane);
10033
10034         ret = PTR_ERR_OR_ZERO(plane_state);
10035         if (ret)
10036                 goto out;
10037
10038         /* Call commit internally with the state we just constructed */
10039         ret = drm_atomic_commit(state);
10040
10041 out:
10042         drm_atomic_state_put(state);
10043         if (ret)
10044                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10045
10046         return ret;
10047 }
10048
10049 /*
10050  * This function handles all cases when set mode does not come upon hotplug.
10051  * This includes when a display is unplugged then plugged back into the
10052  * same port and when running without usermode desktop manager supprot
10053  */
10054 void dm_restore_drm_connector_state(struct drm_device *dev,
10055                                     struct drm_connector *connector)
10056 {
10057         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10058         struct amdgpu_crtc *disconnected_acrtc;
10059         struct dm_crtc_state *acrtc_state;
10060
10061         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10062                 return;
10063
10064         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10065         if (!disconnected_acrtc)
10066                 return;
10067
10068         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10069         if (!acrtc_state->stream)
10070                 return;
10071
10072         /*
10073          * If the previous sink is not released and different from the current,
10074          * we deduce we are in a state where we can not rely on usermode call
10075          * to turn on the display, so we do it here
10076          */
10077         if (acrtc_state->stream->sink != aconnector->dc_sink)
10078                 dm_force_atomic_commit(&aconnector->base);
10079 }
10080
10081 /*
10082  * Grabs all modesetting locks to serialize against any blocking commits,
10083  * Waits for completion of all non blocking commits.
10084  */
10085 static int do_aquire_global_lock(struct drm_device *dev,
10086                                  struct drm_atomic_state *state)
10087 {
10088         struct drm_crtc *crtc;
10089         struct drm_crtc_commit *commit;
10090         long ret;
10091
10092         /*
10093          * Adding all modeset locks to aquire_ctx will
10094          * ensure that when the framework release it the
10095          * extra locks we are locking here will get released to
10096          */
10097         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10098         if (ret)
10099                 return ret;
10100
10101         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10102                 spin_lock(&crtc->commit_lock);
10103                 commit = list_first_entry_or_null(&crtc->commit_list,
10104                                 struct drm_crtc_commit, commit_entry);
10105                 if (commit)
10106                         drm_crtc_commit_get(commit);
10107                 spin_unlock(&crtc->commit_lock);
10108
10109                 if (!commit)
10110                         continue;
10111
10112                 /*
10113                  * Make sure all pending HW programming completed and
10114                  * page flips done
10115                  */
10116                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10117
10118                 if (ret > 0)
10119                         ret = wait_for_completion_interruptible_timeout(
10120                                         &commit->flip_done, 10*HZ);
10121
10122                 if (ret == 0)
10123                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10124                                   "timed out\n", crtc->base.id, crtc->name);
10125
10126                 drm_crtc_commit_put(commit);
10127         }
10128
10129         return ret < 0 ? ret : 0;
10130 }
10131
10132 static void get_freesync_config_for_crtc(
10133         struct dm_crtc_state *new_crtc_state,
10134         struct dm_connector_state *new_con_state)
10135 {
10136         struct mod_freesync_config config = {0};
10137         struct amdgpu_dm_connector *aconnector =
10138                         to_amdgpu_dm_connector(new_con_state->base.connector);
10139         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10140         int vrefresh = drm_mode_vrefresh(mode);
10141         bool fs_vid_mode = false;
10142
10143         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10144                                         vrefresh >= aconnector->min_vfreq &&
10145                                         vrefresh <= aconnector->max_vfreq;
10146
10147         if (new_crtc_state->vrr_supported) {
10148                 new_crtc_state->stream->ignore_msa_timing_param = true;
10149                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10150
10151                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10152                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10153                 config.vsif_supported = true;
10154                 config.btr = true;
10155
10156                 if (fs_vid_mode) {
10157                         config.state = VRR_STATE_ACTIVE_FIXED;
10158                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10159                         goto out;
10160                 } else if (new_crtc_state->base.vrr_enabled) {
10161                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10162                 } else {
10163                         config.state = VRR_STATE_INACTIVE;
10164                 }
10165         }
10166 out:
10167         new_crtc_state->freesync_config = config;
10168 }
10169
10170 static void reset_freesync_config_for_crtc(
10171         struct dm_crtc_state *new_crtc_state)
10172 {
10173         new_crtc_state->vrr_supported = false;
10174
10175         memset(&new_crtc_state->vrr_infopacket, 0,
10176                sizeof(new_crtc_state->vrr_infopacket));
10177 }
10178
10179 static bool
10180 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10181                                  struct drm_crtc_state *new_crtc_state)
10182 {
10183         struct drm_display_mode old_mode, new_mode;
10184
10185         if (!old_crtc_state || !new_crtc_state)
10186                 return false;
10187
10188         old_mode = old_crtc_state->mode;
10189         new_mode = new_crtc_state->mode;
10190
10191         if (old_mode.clock       == new_mode.clock &&
10192             old_mode.hdisplay    == new_mode.hdisplay &&
10193             old_mode.vdisplay    == new_mode.vdisplay &&
10194             old_mode.htotal      == new_mode.htotal &&
10195             old_mode.vtotal      != new_mode.vtotal &&
10196             old_mode.hsync_start == new_mode.hsync_start &&
10197             old_mode.vsync_start != new_mode.vsync_start &&
10198             old_mode.hsync_end   == new_mode.hsync_end &&
10199             old_mode.vsync_end   != new_mode.vsync_end &&
10200             old_mode.hskew       == new_mode.hskew &&
10201             old_mode.vscan       == new_mode.vscan &&
10202             (old_mode.vsync_end - old_mode.vsync_start) ==
10203             (new_mode.vsync_end - new_mode.vsync_start))
10204                 return true;
10205
10206         return false;
10207 }
10208
10209 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10210         uint64_t num, den, res;
10211         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10212
10213         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10214
10215         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10216         den = (unsigned long long)new_crtc_state->mode.htotal *
10217               (unsigned long long)new_crtc_state->mode.vtotal;
10218
10219         res = div_u64(num, den);
10220         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10221 }
10222
10223 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10224                          struct drm_atomic_state *state,
10225                          struct drm_crtc *crtc,
10226                          struct drm_crtc_state *old_crtc_state,
10227                          struct drm_crtc_state *new_crtc_state,
10228                          bool enable,
10229                          bool *lock_and_validation_needed)
10230 {
10231         struct dm_atomic_state *dm_state = NULL;
10232         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10233         struct dc_stream_state *new_stream;
10234         int ret = 0;
10235
10236         /*
10237          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10238          * update changed items
10239          */
10240         struct amdgpu_crtc *acrtc = NULL;
10241         struct amdgpu_dm_connector *aconnector = NULL;
10242         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10243         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10244
10245         new_stream = NULL;
10246
10247         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10248         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10249         acrtc = to_amdgpu_crtc(crtc);
10250         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10251
10252         /* TODO This hack should go away */
10253         if (aconnector && enable) {
10254                 /* Make sure fake sink is created in plug-in scenario */
10255                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10256                                                             &aconnector->base);
10257                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10258                                                             &aconnector->base);
10259
10260                 if (IS_ERR(drm_new_conn_state)) {
10261                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10262                         goto fail;
10263                 }
10264
10265                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10266                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10267
10268                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10269                         goto skip_modeset;
10270
10271                 new_stream = create_validate_stream_for_sink(aconnector,
10272                                                              &new_crtc_state->mode,
10273                                                              dm_new_conn_state,
10274                                                              dm_old_crtc_state->stream);
10275
10276                 /*
10277                  * we can have no stream on ACTION_SET if a display
10278                  * was disconnected during S3, in this case it is not an
10279                  * error, the OS will be updated after detection, and
10280                  * will do the right thing on next atomic commit
10281                  */
10282
10283                 if (!new_stream) {
10284                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10285                                         __func__, acrtc->base.base.id);
10286                         ret = -ENOMEM;
10287                         goto fail;
10288                 }
10289
10290                 /*
10291                  * TODO: Check VSDB bits to decide whether this should
10292                  * be enabled or not.
10293                  */
10294                 new_stream->triggered_crtc_reset.enabled =
10295                         dm->force_timing_sync;
10296
10297                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10298
10299                 ret = fill_hdr_info_packet(drm_new_conn_state,
10300                                            &new_stream->hdr_static_metadata);
10301                 if (ret)
10302                         goto fail;
10303
10304                 /*
10305                  * If we already removed the old stream from the context
10306                  * (and set the new stream to NULL) then we can't reuse
10307                  * the old stream even if the stream and scaling are unchanged.
10308                  * We'll hit the BUG_ON and black screen.
10309                  *
10310                  * TODO: Refactor this function to allow this check to work
10311                  * in all conditions.
10312                  */
10313                 if (dm_new_crtc_state->stream &&
10314                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10315                         goto skip_modeset;
10316
10317                 if (dm_new_crtc_state->stream &&
10318                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10319                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10320                         new_crtc_state->mode_changed = false;
10321                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10322                                          new_crtc_state->mode_changed);
10323                 }
10324         }
10325
10326         /* mode_changed flag may get updated above, need to check again */
10327         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10328                 goto skip_modeset;
10329
10330         DRM_DEBUG_ATOMIC(
10331                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10332                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10333                 "connectors_changed:%d\n",
10334                 acrtc->crtc_id,
10335                 new_crtc_state->enable,
10336                 new_crtc_state->active,
10337                 new_crtc_state->planes_changed,
10338                 new_crtc_state->mode_changed,
10339                 new_crtc_state->active_changed,
10340                 new_crtc_state->connectors_changed);
10341
10342         /* Remove stream for any changed/disabled CRTC */
10343         if (!enable) {
10344
10345                 if (!dm_old_crtc_state->stream)
10346                         goto skip_modeset;
10347
10348                 if (dm_new_crtc_state->stream &&
10349                     is_timing_unchanged_for_freesync(new_crtc_state,
10350                                                      old_crtc_state)) {
10351                         new_crtc_state->mode_changed = false;
10352                         DRM_DEBUG_DRIVER(
10353                                 "Mode change not required for front porch change, "
10354                                 "setting mode_changed to %d",
10355                                 new_crtc_state->mode_changed);
10356
10357                         set_freesync_fixed_config(dm_new_crtc_state);
10358
10359                         goto skip_modeset;
10360                 } else if (aconnector &&
10361                            is_freesync_video_mode(&new_crtc_state->mode,
10362                                                   aconnector)) {
10363                         struct drm_display_mode *high_mode;
10364
10365                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10366                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10367                                 set_freesync_fixed_config(dm_new_crtc_state);
10368                         }
10369                 }
10370
10371                 ret = dm_atomic_get_state(state, &dm_state);
10372                 if (ret)
10373                         goto fail;
10374
10375                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10376                                 crtc->base.id);
10377
10378                 /* i.e. reset mode */
10379                 if (dc_remove_stream_from_ctx(
10380                                 dm->dc,
10381                                 dm_state->context,
10382                                 dm_old_crtc_state->stream) != DC_OK) {
10383                         ret = -EINVAL;
10384                         goto fail;
10385                 }
10386
10387                 dc_stream_release(dm_old_crtc_state->stream);
10388                 dm_new_crtc_state->stream = NULL;
10389
10390                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10391
10392                 *lock_and_validation_needed = true;
10393
10394         } else {/* Add stream for any updated/enabled CRTC */
10395                 /*
10396                  * Quick fix to prevent NULL pointer on new_stream when
10397                  * added MST connectors not found in existing crtc_state in the chained mode
10398                  * TODO: need to dig out the root cause of that
10399                  */
10400                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10401                         goto skip_modeset;
10402
10403                 if (modereset_required(new_crtc_state))
10404                         goto skip_modeset;
10405
10406                 if (modeset_required(new_crtc_state, new_stream,
10407                                      dm_old_crtc_state->stream)) {
10408
10409                         WARN_ON(dm_new_crtc_state->stream);
10410
10411                         ret = dm_atomic_get_state(state, &dm_state);
10412                         if (ret)
10413                                 goto fail;
10414
10415                         dm_new_crtc_state->stream = new_stream;
10416
10417                         dc_stream_retain(new_stream);
10418
10419                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10420                                          crtc->base.id);
10421
10422                         if (dc_add_stream_to_ctx(
10423                                         dm->dc,
10424                                         dm_state->context,
10425                                         dm_new_crtc_state->stream) != DC_OK) {
10426                                 ret = -EINVAL;
10427                                 goto fail;
10428                         }
10429
10430                         *lock_and_validation_needed = true;
10431                 }
10432         }
10433
10434 skip_modeset:
10435         /* Release extra reference */
10436         if (new_stream)
10437                  dc_stream_release(new_stream);
10438
10439         /*
10440          * We want to do dc stream updates that do not require a
10441          * full modeset below.
10442          */
10443         if (!(enable && aconnector && new_crtc_state->active))
10444                 return 0;
10445         /*
10446          * Given above conditions, the dc state cannot be NULL because:
10447          * 1. We're in the process of enabling CRTCs (just been added
10448          *    to the dc context, or already is on the context)
10449          * 2. Has a valid connector attached, and
10450          * 3. Is currently active and enabled.
10451          * => The dc stream state currently exists.
10452          */
10453         BUG_ON(dm_new_crtc_state->stream == NULL);
10454
10455         /* Scaling or underscan settings */
10456         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10457                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10458                 update_stream_scaling_settings(
10459                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10460
10461         /* ABM settings */
10462         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10463
10464         /*
10465          * Color management settings. We also update color properties
10466          * when a modeset is needed, to ensure it gets reprogrammed.
10467          */
10468         if (dm_new_crtc_state->base.color_mgmt_changed ||
10469             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10470                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10471                 if (ret)
10472                         goto fail;
10473         }
10474
10475         /* Update Freesync settings. */
10476         get_freesync_config_for_crtc(dm_new_crtc_state,
10477                                      dm_new_conn_state);
10478
10479         return ret;
10480
10481 fail:
10482         if (new_stream)
10483                 dc_stream_release(new_stream);
10484         return ret;
10485 }
10486
10487 static bool should_reset_plane(struct drm_atomic_state *state,
10488                                struct drm_plane *plane,
10489                                struct drm_plane_state *old_plane_state,
10490                                struct drm_plane_state *new_plane_state)
10491 {
10492         struct drm_plane *other;
10493         struct drm_plane_state *old_other_state, *new_other_state;
10494         struct drm_crtc_state *new_crtc_state;
10495         int i;
10496
10497         /*
10498          * TODO: Remove this hack once the checks below are sufficient
10499          * enough to determine when we need to reset all the planes on
10500          * the stream.
10501          */
10502         if (state->allow_modeset)
10503                 return true;
10504
10505         /* Exit early if we know that we're adding or removing the plane. */
10506         if (old_plane_state->crtc != new_plane_state->crtc)
10507                 return true;
10508
10509         /* old crtc == new_crtc == NULL, plane not in context. */
10510         if (!new_plane_state->crtc)
10511                 return false;
10512
10513         new_crtc_state =
10514                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10515
10516         if (!new_crtc_state)
10517                 return true;
10518
10519         /* CRTC Degamma changes currently require us to recreate planes. */
10520         if (new_crtc_state->color_mgmt_changed)
10521                 return true;
10522
10523         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10524                 return true;
10525
10526         /*
10527          * If there are any new primary or overlay planes being added or
10528          * removed then the z-order can potentially change. To ensure
10529          * correct z-order and pipe acquisition the current DC architecture
10530          * requires us to remove and recreate all existing planes.
10531          *
10532          * TODO: Come up with a more elegant solution for this.
10533          */
10534         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10535                 struct amdgpu_framebuffer *old_afb, *new_afb;
10536                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10537                         continue;
10538
10539                 if (old_other_state->crtc != new_plane_state->crtc &&
10540                     new_other_state->crtc != new_plane_state->crtc)
10541                         continue;
10542
10543                 if (old_other_state->crtc != new_other_state->crtc)
10544                         return true;
10545
10546                 /* Src/dst size and scaling updates. */
10547                 if (old_other_state->src_w != new_other_state->src_w ||
10548                     old_other_state->src_h != new_other_state->src_h ||
10549                     old_other_state->crtc_w != new_other_state->crtc_w ||
10550                     old_other_state->crtc_h != new_other_state->crtc_h)
10551                         return true;
10552
10553                 /* Rotation / mirroring updates. */
10554                 if (old_other_state->rotation != new_other_state->rotation)
10555                         return true;
10556
10557                 /* Blending updates. */
10558                 if (old_other_state->pixel_blend_mode !=
10559                     new_other_state->pixel_blend_mode)
10560                         return true;
10561
10562                 /* Alpha updates. */
10563                 if (old_other_state->alpha != new_other_state->alpha)
10564                         return true;
10565
10566                 /* Colorspace changes. */
10567                 if (old_other_state->color_range != new_other_state->color_range ||
10568                     old_other_state->color_encoding != new_other_state->color_encoding)
10569                         return true;
10570
10571                 /* Framebuffer checks fall at the end. */
10572                 if (!old_other_state->fb || !new_other_state->fb)
10573                         continue;
10574
10575                 /* Pixel format changes can require bandwidth updates. */
10576                 if (old_other_state->fb->format != new_other_state->fb->format)
10577                         return true;
10578
10579                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10580                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10581
10582                 /* Tiling and DCC changes also require bandwidth updates. */
10583                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10584                     old_afb->base.modifier != new_afb->base.modifier)
10585                         return true;
10586         }
10587
10588         return false;
10589 }
10590
10591 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10592                               struct drm_plane_state *new_plane_state,
10593                               struct drm_framebuffer *fb)
10594 {
10595         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10596         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10597         unsigned int pitch;
10598         bool linear;
10599
10600         if (fb->width > new_acrtc->max_cursor_width ||
10601             fb->height > new_acrtc->max_cursor_height) {
10602                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10603                                  new_plane_state->fb->width,
10604                                  new_plane_state->fb->height);
10605                 return -EINVAL;
10606         }
10607         if (new_plane_state->src_w != fb->width << 16 ||
10608             new_plane_state->src_h != fb->height << 16) {
10609                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10610                 return -EINVAL;
10611         }
10612
10613         /* Pitch in pixels */
10614         pitch = fb->pitches[0] / fb->format->cpp[0];
10615
10616         if (fb->width != pitch) {
10617                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10618                                  fb->width, pitch);
10619                 return -EINVAL;
10620         }
10621
10622         switch (pitch) {
10623         case 64:
10624         case 128:
10625         case 256:
10626                 /* FB pitch is supported by cursor plane */
10627                 break;
10628         default:
10629                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10630                 return -EINVAL;
10631         }
10632
10633         /* Core DRM takes care of checking FB modifiers, so we only need to
10634          * check tiling flags when the FB doesn't have a modifier. */
10635         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10636                 if (adev->family < AMDGPU_FAMILY_AI) {
10637                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10638                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10639                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10640                 } else {
10641                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10642                 }
10643                 if (!linear) {
10644                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10645                         return -EINVAL;
10646                 }
10647         }
10648
10649         return 0;
10650 }
10651
10652 static int dm_update_plane_state(struct dc *dc,
10653                                  struct drm_atomic_state *state,
10654                                  struct drm_plane *plane,
10655                                  struct drm_plane_state *old_plane_state,
10656                                  struct drm_plane_state *new_plane_state,
10657                                  bool enable,
10658                                  bool *lock_and_validation_needed)
10659 {
10660
10661         struct dm_atomic_state *dm_state = NULL;
10662         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10663         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10664         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10665         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10666         struct amdgpu_crtc *new_acrtc;
10667         bool needs_reset;
10668         int ret = 0;
10669
10670
10671         new_plane_crtc = new_plane_state->crtc;
10672         old_plane_crtc = old_plane_state->crtc;
10673         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10674         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10675
10676         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10677                 if (!enable || !new_plane_crtc ||
10678                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10679                         return 0;
10680
10681                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10682
10683                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10684                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10685                         return -EINVAL;
10686                 }
10687
10688                 if (new_plane_state->fb) {
10689                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10690                                                  new_plane_state->fb);
10691                         if (ret)
10692                                 return ret;
10693                 }
10694
10695                 return 0;
10696         }
10697
10698         needs_reset = should_reset_plane(state, plane, old_plane_state,
10699                                          new_plane_state);
10700
10701         /* Remove any changed/removed planes */
10702         if (!enable) {
10703                 if (!needs_reset)
10704                         return 0;
10705
10706                 if (!old_plane_crtc)
10707                         return 0;
10708
10709                 old_crtc_state = drm_atomic_get_old_crtc_state(
10710                                 state, old_plane_crtc);
10711                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10712
10713                 if (!dm_old_crtc_state->stream)
10714                         return 0;
10715
10716                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10717                                 plane->base.id, old_plane_crtc->base.id);
10718
10719                 ret = dm_atomic_get_state(state, &dm_state);
10720                 if (ret)
10721                         return ret;
10722
10723                 if (!dc_remove_plane_from_context(
10724                                 dc,
10725                                 dm_old_crtc_state->stream,
10726                                 dm_old_plane_state->dc_state,
10727                                 dm_state->context)) {
10728
10729                         return -EINVAL;
10730                 }
10731
10732
10733                 dc_plane_state_release(dm_old_plane_state->dc_state);
10734                 dm_new_plane_state->dc_state = NULL;
10735
10736                 *lock_and_validation_needed = true;
10737
10738         } else { /* Add new planes */
10739                 struct dc_plane_state *dc_new_plane_state;
10740
10741                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10742                         return 0;
10743
10744                 if (!new_plane_crtc)
10745                         return 0;
10746
10747                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10748                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10749
10750                 if (!dm_new_crtc_state->stream)
10751                         return 0;
10752
10753                 if (!needs_reset)
10754                         return 0;
10755
10756                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10757                 if (ret)
10758                         return ret;
10759
10760                 WARN_ON(dm_new_plane_state->dc_state);
10761
10762                 dc_new_plane_state = dc_create_plane_state(dc);
10763                 if (!dc_new_plane_state)
10764                         return -ENOMEM;
10765
10766                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10767                                  plane->base.id, new_plane_crtc->base.id);
10768
10769                 ret = fill_dc_plane_attributes(
10770                         drm_to_adev(new_plane_crtc->dev),
10771                         dc_new_plane_state,
10772                         new_plane_state,
10773                         new_crtc_state);
10774                 if (ret) {
10775                         dc_plane_state_release(dc_new_plane_state);
10776                         return ret;
10777                 }
10778
10779                 ret = dm_atomic_get_state(state, &dm_state);
10780                 if (ret) {
10781                         dc_plane_state_release(dc_new_plane_state);
10782                         return ret;
10783                 }
10784
10785                 /*
10786                  * Any atomic check errors that occur after this will
10787                  * not need a release. The plane state will be attached
10788                  * to the stream, and therefore part of the atomic
10789                  * state. It'll be released when the atomic state is
10790                  * cleaned.
10791                  */
10792                 if (!dc_add_plane_to_context(
10793                                 dc,
10794                                 dm_new_crtc_state->stream,
10795                                 dc_new_plane_state,
10796                                 dm_state->context)) {
10797
10798                         dc_plane_state_release(dc_new_plane_state);
10799                         return -EINVAL;
10800                 }
10801
10802                 dm_new_plane_state->dc_state = dc_new_plane_state;
10803
10804                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10805
10806                 /* Tell DC to do a full surface update every time there
10807                  * is a plane change. Inefficient, but works for now.
10808                  */
10809                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10810
10811                 *lock_and_validation_needed = true;
10812         }
10813
10814
10815         return ret;
10816 }
10817
10818 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10819                                        int *src_w, int *src_h)
10820 {
10821         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10822         case DRM_MODE_ROTATE_90:
10823         case DRM_MODE_ROTATE_270:
10824                 *src_w = plane_state->src_h >> 16;
10825                 *src_h = plane_state->src_w >> 16;
10826                 break;
10827         case DRM_MODE_ROTATE_0:
10828         case DRM_MODE_ROTATE_180:
10829         default:
10830                 *src_w = plane_state->src_w >> 16;
10831                 *src_h = plane_state->src_h >> 16;
10832                 break;
10833         }
10834 }
10835
10836 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10837                                 struct drm_crtc *crtc,
10838                                 struct drm_crtc_state *new_crtc_state)
10839 {
10840         struct drm_plane *cursor = crtc->cursor, *underlying;
10841         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10842         int i;
10843         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10844         int cursor_src_w, cursor_src_h;
10845         int underlying_src_w, underlying_src_h;
10846
10847         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10848          * cursor per pipe but it's going to inherit the scaling and
10849          * positioning from the underlying pipe. Check the cursor plane's
10850          * blending properties match the underlying planes'. */
10851
10852         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10853         if (!new_cursor_state || !new_cursor_state->fb) {
10854                 return 0;
10855         }
10856
10857         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10858         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10859         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10860
10861         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10862                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10863                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10864                         continue;
10865
10866                 /* Ignore disabled planes */
10867                 if (!new_underlying_state->fb)
10868                         continue;
10869
10870                 dm_get_oriented_plane_size(new_underlying_state,
10871                                            &underlying_src_w, &underlying_src_h);
10872                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10873                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10874
10875                 if (cursor_scale_w != underlying_scale_w ||
10876                     cursor_scale_h != underlying_scale_h) {
10877                         drm_dbg_atomic(crtc->dev,
10878                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10879                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10880                         return -EINVAL;
10881                 }
10882
10883                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10884                 if (new_underlying_state->crtc_x <= 0 &&
10885                     new_underlying_state->crtc_y <= 0 &&
10886                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10887                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10888                         break;
10889         }
10890
10891         return 0;
10892 }
10893
10894 #if defined(CONFIG_DRM_AMD_DC_DCN)
10895 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10896 {
10897         struct drm_connector *connector;
10898         struct drm_connector_state *conn_state, *old_conn_state;
10899         struct amdgpu_dm_connector *aconnector = NULL;
10900         int i;
10901         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10902                 if (!conn_state->crtc)
10903                         conn_state = old_conn_state;
10904
10905                 if (conn_state->crtc != crtc)
10906                         continue;
10907
10908                 aconnector = to_amdgpu_dm_connector(connector);
10909                 if (!aconnector->port || !aconnector->mst_port)
10910                         aconnector = NULL;
10911                 else
10912                         break;
10913         }
10914
10915         if (!aconnector)
10916                 return 0;
10917
10918         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10919 }
10920 #endif
10921
10922 /**
10923  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10924  * @dev: The DRM device
10925  * @state: The atomic state to commit
10926  *
10927  * Validate that the given atomic state is programmable by DC into hardware.
10928  * This involves constructing a &struct dc_state reflecting the new hardware
10929  * state we wish to commit, then querying DC to see if it is programmable. It's
10930  * important not to modify the existing DC state. Otherwise, atomic_check
10931  * may unexpectedly commit hardware changes.
10932  *
10933  * When validating the DC state, it's important that the right locks are
10934  * acquired. For full updates case which removes/adds/updates streams on one
10935  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10936  * that any such full update commit will wait for completion of any outstanding
10937  * flip using DRMs synchronization events.
10938  *
10939  * Note that DM adds the affected connectors for all CRTCs in state, when that
10940  * might not seem necessary. This is because DC stream creation requires the
10941  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10942  * be possible but non-trivial - a possible TODO item.
10943  *
10944  * Return: -Error code if validation failed.
10945  */
10946 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10947                                   struct drm_atomic_state *state)
10948 {
10949         struct amdgpu_device *adev = drm_to_adev(dev);
10950         struct dm_atomic_state *dm_state = NULL;
10951         struct dc *dc = adev->dm.dc;
10952         struct drm_connector *connector;
10953         struct drm_connector_state *old_con_state, *new_con_state;
10954         struct drm_crtc *crtc;
10955         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10956         struct drm_plane *plane;
10957         struct drm_plane_state *old_plane_state, *new_plane_state;
10958         enum dc_status status;
10959         int ret, i;
10960         bool lock_and_validation_needed = false;
10961         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10962 #if defined(CONFIG_DRM_AMD_DC_DCN)
10963         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10964         struct drm_dp_mst_topology_state *mst_state;
10965         struct drm_dp_mst_topology_mgr *mgr;
10966 #endif
10967
10968         trace_amdgpu_dm_atomic_check_begin(state);
10969
10970         ret = drm_atomic_helper_check_modeset(dev, state);
10971         if (ret) {
10972                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10973                 goto fail;
10974         }
10975
10976         /* Check connector changes */
10977         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10978                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10979                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10980
10981                 /* Skip connectors that are disabled or part of modeset already. */
10982                 if (!old_con_state->crtc && !new_con_state->crtc)
10983                         continue;
10984
10985                 if (!new_con_state->crtc)
10986                         continue;
10987
10988                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10989                 if (IS_ERR(new_crtc_state)) {
10990                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10991                         ret = PTR_ERR(new_crtc_state);
10992                         goto fail;
10993                 }
10994
10995                 if (dm_old_con_state->abm_level !=
10996                     dm_new_con_state->abm_level)
10997                         new_crtc_state->connectors_changed = true;
10998         }
10999
11000 #if defined(CONFIG_DRM_AMD_DC_DCN)
11001         if (dc_resource_is_dsc_encoding_supported(dc)) {
11002                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11003                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11004                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
11005                                 if (ret) {
11006                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11007                                         goto fail;
11008                                 }
11009                         }
11010                 }
11011                 pre_validate_dsc(state, &dm_state, vars);
11012         }
11013 #endif
11014         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11015                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11016
11017                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11018                     !new_crtc_state->color_mgmt_changed &&
11019                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11020                         dm_old_crtc_state->dsc_force_changed == false)
11021                         continue;
11022
11023                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11024                 if (ret) {
11025                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11026                         goto fail;
11027                 }
11028
11029                 if (!new_crtc_state->enable)
11030                         continue;
11031
11032                 ret = drm_atomic_add_affected_connectors(state, crtc);
11033                 if (ret) {
11034                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11035                         goto fail;
11036                 }
11037
11038                 ret = drm_atomic_add_affected_planes(state, crtc);
11039                 if (ret) {
11040                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11041                         goto fail;
11042                 }
11043
11044                 if (dm_old_crtc_state->dsc_force_changed)
11045                         new_crtc_state->mode_changed = true;
11046         }
11047
11048         /*
11049          * Add all primary and overlay planes on the CRTC to the state
11050          * whenever a plane is enabled to maintain correct z-ordering
11051          * and to enable fast surface updates.
11052          */
11053         drm_for_each_crtc(crtc, dev) {
11054                 bool modified = false;
11055
11056                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11057                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11058                                 continue;
11059
11060                         if (new_plane_state->crtc == crtc ||
11061                             old_plane_state->crtc == crtc) {
11062                                 modified = true;
11063                                 break;
11064                         }
11065                 }
11066
11067                 if (!modified)
11068                         continue;
11069
11070                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11071                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11072                                 continue;
11073
11074                         new_plane_state =
11075                                 drm_atomic_get_plane_state(state, plane);
11076
11077                         if (IS_ERR(new_plane_state)) {
11078                                 ret = PTR_ERR(new_plane_state);
11079                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11080                                 goto fail;
11081                         }
11082                 }
11083         }
11084
11085         /* Remove exiting planes if they are modified */
11086         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11087                 ret = dm_update_plane_state(dc, state, plane,
11088                                             old_plane_state,
11089                                             new_plane_state,
11090                                             false,
11091                                             &lock_and_validation_needed);
11092                 if (ret) {
11093                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11094                         goto fail;
11095                 }
11096         }
11097
11098         /* Disable all crtcs which require disable */
11099         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11100                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11101                                            old_crtc_state,
11102                                            new_crtc_state,
11103                                            false,
11104                                            &lock_and_validation_needed);
11105                 if (ret) {
11106                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11107                         goto fail;
11108                 }
11109         }
11110
11111         /* Enable all crtcs which require enable */
11112         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11113                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11114                                            old_crtc_state,
11115                                            new_crtc_state,
11116                                            true,
11117                                            &lock_and_validation_needed);
11118                 if (ret) {
11119                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11120                         goto fail;
11121                 }
11122         }
11123
11124         /* Add new/modified planes */
11125         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11126                 ret = dm_update_plane_state(dc, state, plane,
11127                                             old_plane_state,
11128                                             new_plane_state,
11129                                             true,
11130                                             &lock_and_validation_needed);
11131                 if (ret) {
11132                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11133                         goto fail;
11134                 }
11135         }
11136
11137         /* Run this here since we want to validate the streams we created */
11138         ret = drm_atomic_helper_check_planes(dev, state);
11139         if (ret) {
11140                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11141                 goto fail;
11142         }
11143
11144         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11145                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11146                 if (dm_new_crtc_state->mpo_requested)
11147                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11148         }
11149
11150         /* Check cursor planes scaling */
11151         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11152                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11153                 if (ret) {
11154                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11155                         goto fail;
11156                 }
11157         }
11158
11159         if (state->legacy_cursor_update) {
11160                 /*
11161                  * This is a fast cursor update coming from the plane update
11162                  * helper, check if it can be done asynchronously for better
11163                  * performance.
11164                  */
11165                 state->async_update =
11166                         !drm_atomic_helper_async_check(dev, state);
11167
11168                 /*
11169                  * Skip the remaining global validation if this is an async
11170                  * update. Cursor updates can be done without affecting
11171                  * state or bandwidth calcs and this avoids the performance
11172                  * penalty of locking the private state object and
11173                  * allocating a new dc_state.
11174                  */
11175                 if (state->async_update)
11176                         return 0;
11177         }
11178
11179         /* Check scaling and underscan changes*/
11180         /* TODO Removed scaling changes validation due to inability to commit
11181          * new stream into context w\o causing full reset. Need to
11182          * decide how to handle.
11183          */
11184         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11185                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11186                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11187                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11188
11189                 /* Skip any modesets/resets */
11190                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11191                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11192                         continue;
11193
11194                 /* Skip any thing not scale or underscan changes */
11195                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11196                         continue;
11197
11198                 lock_and_validation_needed = true;
11199         }
11200
11201 #if defined(CONFIG_DRM_AMD_DC_DCN)
11202         /* set the slot info for each mst_state based on the link encoding format */
11203         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11204                 struct amdgpu_dm_connector *aconnector;
11205                 struct drm_connector *connector;
11206                 struct drm_connector_list_iter iter;
11207                 u8 link_coding_cap;
11208
11209                 if (!mgr->mst_state )
11210                         continue;
11211
11212                 drm_connector_list_iter_begin(dev, &iter);
11213                 drm_for_each_connector_iter(connector, &iter) {
11214                         int id = connector->index;
11215
11216                         if (id == mst_state->mgr->conn_base_id) {
11217                                 aconnector = to_amdgpu_dm_connector(connector);
11218                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11219                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11220
11221                                 break;
11222                         }
11223                 }
11224                 drm_connector_list_iter_end(&iter);
11225
11226         }
11227 #endif
11228         /**
11229          * Streams and planes are reset when there are changes that affect
11230          * bandwidth. Anything that affects bandwidth needs to go through
11231          * DC global validation to ensure that the configuration can be applied
11232          * to hardware.
11233          *
11234          * We have to currently stall out here in atomic_check for outstanding
11235          * commits to finish in this case because our IRQ handlers reference
11236          * DRM state directly - we can end up disabling interrupts too early
11237          * if we don't.
11238          *
11239          * TODO: Remove this stall and drop DM state private objects.
11240          */
11241         if (lock_and_validation_needed) {
11242                 ret = dm_atomic_get_state(state, &dm_state);
11243                 if (ret) {
11244                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11245                         goto fail;
11246                 }
11247
11248                 ret = do_aquire_global_lock(dev, state);
11249                 if (ret) {
11250                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11251                         goto fail;
11252                 }
11253
11254 #if defined(CONFIG_DRM_AMD_DC_DCN)
11255                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11256                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11257                         goto fail;
11258                 }
11259
11260                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11261                 if (ret) {
11262                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11263                         goto fail;
11264                 }
11265 #endif
11266
11267                 /*
11268                  * Perform validation of MST topology in the state:
11269                  * We need to perform MST atomic check before calling
11270                  * dc_validate_global_state(), or there is a chance
11271                  * to get stuck in an infinite loop and hang eventually.
11272                  */
11273                 ret = drm_dp_mst_atomic_check(state);
11274                 if (ret) {
11275                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11276                         goto fail;
11277                 }
11278                 status = dc_validate_global_state(dc, dm_state->context, true);
11279                 if (status != DC_OK) {
11280                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11281                                        dc_status_to_str(status), status);
11282                         ret = -EINVAL;
11283                         goto fail;
11284                 }
11285         } else {
11286                 /*
11287                  * The commit is a fast update. Fast updates shouldn't change
11288                  * the DC context, affect global validation, and can have their
11289                  * commit work done in parallel with other commits not touching
11290                  * the same resource. If we have a new DC context as part of
11291                  * the DM atomic state from validation we need to free it and
11292                  * retain the existing one instead.
11293                  *
11294                  * Furthermore, since the DM atomic state only contains the DC
11295                  * context and can safely be annulled, we can free the state
11296                  * and clear the associated private object now to free
11297                  * some memory and avoid a possible use-after-free later.
11298                  */
11299
11300                 for (i = 0; i < state->num_private_objs; i++) {
11301                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11302
11303                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11304                                 int j = state->num_private_objs-1;
11305
11306                                 dm_atomic_destroy_state(obj,
11307                                                 state->private_objs[i].state);
11308
11309                                 /* If i is not at the end of the array then the
11310                                  * last element needs to be moved to where i was
11311                                  * before the array can safely be truncated.
11312                                  */
11313                                 if (i != j)
11314                                         state->private_objs[i] =
11315                                                 state->private_objs[j];
11316
11317                                 state->private_objs[j].ptr = NULL;
11318                                 state->private_objs[j].state = NULL;
11319                                 state->private_objs[j].old_state = NULL;
11320                                 state->private_objs[j].new_state = NULL;
11321
11322                                 state->num_private_objs = j;
11323                                 break;
11324                         }
11325                 }
11326         }
11327
11328         /* Store the overall update type for use later in atomic check. */
11329         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11330                 struct dm_crtc_state *dm_new_crtc_state =
11331                         to_dm_crtc_state(new_crtc_state);
11332
11333                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11334                                                          UPDATE_TYPE_FULL :
11335                                                          UPDATE_TYPE_FAST;
11336         }
11337
11338         /* Must be success */
11339         WARN_ON(ret);
11340
11341         trace_amdgpu_dm_atomic_check_finish(state, ret);
11342
11343         return ret;
11344
11345 fail:
11346         if (ret == -EDEADLK)
11347                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11348         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11349                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11350         else
11351                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11352
11353         trace_amdgpu_dm_atomic_check_finish(state, ret);
11354
11355         return ret;
11356 }
11357
11358 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11359                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11360 {
11361         uint8_t dpcd_data;
11362         bool capable = false;
11363
11364         if (amdgpu_dm_connector->dc_link &&
11365                 dm_helpers_dp_read_dpcd(
11366                                 NULL,
11367                                 amdgpu_dm_connector->dc_link,
11368                                 DP_DOWN_STREAM_PORT_COUNT,
11369                                 &dpcd_data,
11370                                 sizeof(dpcd_data))) {
11371                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11372         }
11373
11374         return capable;
11375 }
11376
11377 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11378                 unsigned int offset,
11379                 unsigned int total_length,
11380                 uint8_t *data,
11381                 unsigned int length,
11382                 struct amdgpu_hdmi_vsdb_info *vsdb)
11383 {
11384         bool res;
11385         union dmub_rb_cmd cmd;
11386         struct dmub_cmd_send_edid_cea *input;
11387         struct dmub_cmd_edid_cea_output *output;
11388
11389         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11390                 return false;
11391
11392         memset(&cmd, 0, sizeof(cmd));
11393
11394         input = &cmd.edid_cea.data.input;
11395
11396         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11397         cmd.edid_cea.header.sub_type = 0;
11398         cmd.edid_cea.header.payload_bytes =
11399                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11400         input->offset = offset;
11401         input->length = length;
11402         input->cea_total_length = total_length;
11403         memcpy(input->payload, data, length);
11404
11405         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11406         if (!res) {
11407                 DRM_ERROR("EDID CEA parser failed\n");
11408                 return false;
11409         }
11410
11411         output = &cmd.edid_cea.data.output;
11412
11413         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11414                 if (!output->ack.success) {
11415                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11416                                         output->ack.offset);
11417                 }
11418         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11419                 if (!output->amd_vsdb.vsdb_found)
11420                         return false;
11421
11422                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11423                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11424                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11425                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11426         } else {
11427                 DRM_WARN("Unknown EDID CEA parser results\n");
11428                 return false;
11429         }
11430
11431         return true;
11432 }
11433
11434 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11435                 uint8_t *edid_ext, int len,
11436                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11437 {
11438         int i;
11439
11440         /* send extension block to DMCU for parsing */
11441         for (i = 0; i < len; i += 8) {
11442                 bool res;
11443                 int offset;
11444
11445                 /* send 8 bytes a time */
11446                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11447                         return false;
11448
11449                 if (i+8 == len) {
11450                         /* EDID block sent completed, expect result */
11451                         int version, min_rate, max_rate;
11452
11453                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11454                         if (res) {
11455                                 /* amd vsdb found */
11456                                 vsdb_info->freesync_supported = 1;
11457                                 vsdb_info->amd_vsdb_version = version;
11458                                 vsdb_info->min_refresh_rate_hz = min_rate;
11459                                 vsdb_info->max_refresh_rate_hz = max_rate;
11460                                 return true;
11461                         }
11462                         /* not amd vsdb */
11463                         return false;
11464                 }
11465
11466                 /* check for ack*/
11467                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11468                 if (!res)
11469                         return false;
11470         }
11471
11472         return false;
11473 }
11474
11475 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11476                 uint8_t *edid_ext, int len,
11477                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11478 {
11479         int i;
11480
11481         /* send extension block to DMCU for parsing */
11482         for (i = 0; i < len; i += 8) {
11483                 /* send 8 bytes a time */
11484                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11485                         return false;
11486         }
11487
11488         return vsdb_info->freesync_supported;
11489 }
11490
11491 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11492                 uint8_t *edid_ext, int len,
11493                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11494 {
11495         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11496
11497         if (adev->dm.dmub_srv)
11498                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11499         else
11500                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11501 }
11502
11503 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11504                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11505 {
11506         uint8_t *edid_ext = NULL;
11507         int i;
11508         bool valid_vsdb_found = false;
11509
11510         /*----- drm_find_cea_extension() -----*/
11511         /* No EDID or EDID extensions */
11512         if (edid == NULL || edid->extensions == 0)
11513                 return -ENODEV;
11514
11515         /* Find CEA extension */
11516         for (i = 0; i < edid->extensions; i++) {
11517                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11518                 if (edid_ext[0] == CEA_EXT)
11519                         break;
11520         }
11521
11522         if (i == edid->extensions)
11523                 return -ENODEV;
11524
11525         /*----- cea_db_offsets() -----*/
11526         if (edid_ext[0] != CEA_EXT)
11527                 return -ENODEV;
11528
11529         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11530
11531         return valid_vsdb_found ? i : -ENODEV;
11532 }
11533
11534 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11535                                         struct edid *edid)
11536 {
11537         int i = 0;
11538         struct detailed_timing *timing;
11539         struct detailed_non_pixel *data;
11540         struct detailed_data_monitor_range *range;
11541         struct amdgpu_dm_connector *amdgpu_dm_connector =
11542                         to_amdgpu_dm_connector(connector);
11543         struct dm_connector_state *dm_con_state = NULL;
11544         struct dc_sink *sink;
11545
11546         struct drm_device *dev = connector->dev;
11547         struct amdgpu_device *adev = drm_to_adev(dev);
11548         bool freesync_capable = false;
11549         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11550
11551         if (!connector->state) {
11552                 DRM_ERROR("%s - Connector has no state", __func__);
11553                 goto update;
11554         }
11555
11556         sink = amdgpu_dm_connector->dc_sink ?
11557                 amdgpu_dm_connector->dc_sink :
11558                 amdgpu_dm_connector->dc_em_sink;
11559
11560         if (!edid || !sink) {
11561                 dm_con_state = to_dm_connector_state(connector->state);
11562
11563                 amdgpu_dm_connector->min_vfreq = 0;
11564                 amdgpu_dm_connector->max_vfreq = 0;
11565                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11566                 connector->display_info.monitor_range.min_vfreq = 0;
11567                 connector->display_info.monitor_range.max_vfreq = 0;
11568                 freesync_capable = false;
11569
11570                 goto update;
11571         }
11572
11573         dm_con_state = to_dm_connector_state(connector->state);
11574
11575         if (!adev->dm.freesync_module)
11576                 goto update;
11577
11578
11579         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11580                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11581                 bool edid_check_required = false;
11582
11583                 if (edid) {
11584                         edid_check_required = is_dp_capable_without_timing_msa(
11585                                                 adev->dm.dc,
11586                                                 amdgpu_dm_connector);
11587                 }
11588
11589                 if (edid_check_required == true && (edid->version > 1 ||
11590                    (edid->version == 1 && edid->revision > 1))) {
11591                         for (i = 0; i < 4; i++) {
11592
11593                                 timing  = &edid->detailed_timings[i];
11594                                 data    = &timing->data.other_data;
11595                                 range   = &data->data.range;
11596                                 /*
11597                                  * Check if monitor has continuous frequency mode
11598                                  */
11599                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11600                                         continue;
11601                                 /*
11602                                  * Check for flag range limits only. If flag == 1 then
11603                                  * no additional timing information provided.
11604                                  * Default GTF, GTF Secondary curve and CVT are not
11605                                  * supported
11606                                  */
11607                                 if (range->flags != 1)
11608                                         continue;
11609
11610                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11611                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11612                                 amdgpu_dm_connector->pixel_clock_mhz =
11613                                         range->pixel_clock_mhz * 10;
11614
11615                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11616                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11617
11618                                 break;
11619                         }
11620
11621                         if (amdgpu_dm_connector->max_vfreq -
11622                             amdgpu_dm_connector->min_vfreq > 10) {
11623
11624                                 freesync_capable = true;
11625                         }
11626                 }
11627         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11628                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11629                 if (i >= 0 && vsdb_info.freesync_supported) {
11630                         timing  = &edid->detailed_timings[i];
11631                         data    = &timing->data.other_data;
11632
11633                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11634                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11635                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11636                                 freesync_capable = true;
11637
11638                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11639                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11640                 }
11641         }
11642
11643 update:
11644         if (dm_con_state)
11645                 dm_con_state->freesync_capable = freesync_capable;
11646
11647         if (connector->vrr_capable_property)
11648                 drm_connector_set_vrr_capable_property(connector,
11649                                                        freesync_capable);
11650 }
11651
11652 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11653 {
11654         struct amdgpu_device *adev = drm_to_adev(dev);
11655         struct dc *dc = adev->dm.dc;
11656         int i;
11657
11658         mutex_lock(&adev->dm.dc_lock);
11659         if (dc->current_state) {
11660                 for (i = 0; i < dc->current_state->stream_count; ++i)
11661                         dc->current_state->streams[i]
11662                                 ->triggered_crtc_reset.enabled =
11663                                 adev->dm.force_timing_sync;
11664
11665                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11666                 dc_trigger_sync(dc, dc->current_state);
11667         }
11668         mutex_unlock(&adev->dm.dc_lock);
11669 }
11670
11671 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11672                        uint32_t value, const char *func_name)
11673 {
11674 #ifdef DM_CHECK_ADDR_0
11675         if (address == 0) {
11676                 DC_ERR("invalid register write. address = 0");
11677                 return;
11678         }
11679 #endif
11680         cgs_write_register(ctx->cgs_device, address, value);
11681         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11682 }
11683
11684 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11685                           const char *func_name)
11686 {
11687         uint32_t value;
11688 #ifdef DM_CHECK_ADDR_0
11689         if (address == 0) {
11690                 DC_ERR("invalid register read; address = 0\n");
11691                 return 0;
11692         }
11693 #endif
11694
11695         if (ctx->dmub_srv &&
11696             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11697             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11698                 ASSERT(false);
11699                 return 0;
11700         }
11701
11702         value = cgs_read_register(ctx->cgs_device, address);
11703
11704         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11705
11706         return value;
11707 }
11708
11709 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11710                                                 struct dc_context *ctx,
11711                                                 uint8_t status_type,
11712                                                 uint32_t *operation_result)
11713 {
11714         struct amdgpu_device *adev = ctx->driver_context;
11715         int return_status = -1;
11716         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11717
11718         if (is_cmd_aux) {
11719                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11720                         return_status = p_notify->aux_reply.length;
11721                         *operation_result = p_notify->result;
11722                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11723                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11724                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11725                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11726                 } else {
11727                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11728                 }
11729         } else {
11730                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11731                         return_status = 0;
11732                         *operation_result = p_notify->sc_status;
11733                 } else {
11734                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11735                 }
11736         }
11737
11738         return return_status;
11739 }
11740
11741 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11742         unsigned int link_index, void *cmd_payload, void *operation_result)
11743 {
11744         struct amdgpu_device *adev = ctx->driver_context;
11745         int ret = 0;
11746
11747         if (is_cmd_aux) {
11748                 dc_process_dmub_aux_transfer_async(ctx->dc,
11749                         link_index, (struct aux_payload *)cmd_payload);
11750         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11751                                         (struct set_config_cmd_payload *)cmd_payload,
11752                                         adev->dm.dmub_notify)) {
11753                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11754                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11755                                         (uint32_t *)operation_result);
11756         }
11757
11758         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11759         if (ret == 0) {
11760                 DRM_ERROR("wait_for_completion_timeout timeout!");
11761                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11762                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11763                                 (uint32_t *)operation_result);
11764         }
11765
11766         if (is_cmd_aux) {
11767                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11768                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11769
11770                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11771                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11772                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11773                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11774                                        adev->dm.dmub_notify->aux_reply.length);
11775                         }
11776                 }
11777         }
11778
11779         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11780                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11781                         (uint32_t *)operation_result);
11782 }
11783
11784 /*
11785  * Check whether seamless boot is supported.
11786  *
11787  * So far we only support seamless boot on CHIP_VANGOGH.
11788  * If everything goes well, we may consider expanding
11789  * seamless boot to other ASICs.
11790  */
11791 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11792 {
11793         switch (adev->asic_type) {
11794         case CHIP_VANGOGH:
11795                 if (!adev->mman.keep_stolen_vga_memory)
11796                         return true;
11797                 break;
11798         default:
11799                 break;
11800         }
11801
11802         return false;
11803 }