Merge tag 'locking-urgent-2021-05-09' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137         switch (link->dpcd_caps.dongle_type) {
138         case DISPLAY_DONGLE_NONE:
139                 return DRM_MODE_SUBCONNECTOR_Native;
140         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141                 return DRM_MODE_SUBCONNECTOR_VGA;
142         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143         case DISPLAY_DONGLE_DP_DVI_DONGLE:
144                 return DRM_MODE_SUBCONNECTOR_DVID;
145         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147                 return DRM_MODE_SUBCONNECTOR_HDMIA;
148         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149         default:
150                 return DRM_MODE_SUBCONNECTOR_Unknown;
151         }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_link *link = aconnector->dc_link;
157         struct drm_connector *connector = &aconnector->base;
158         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161                 return;
162
163         if (aconnector->dc_sink)
164                 subconnector = get_subconnector_type(link);
165
166         drm_object_property_set_value(&connector->base,
167                         connector->dev->mode_config.dp_subconnector_property,
168                         subconnector);
169 }
170
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183                                 struct drm_plane *plane,
184                                 unsigned long possible_crtcs,
185                                 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187                                struct drm_plane *plane,
188                                uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
191                                     uint32_t link_index,
192                                     struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194                                   struct amdgpu_encoder *aencoder,
195                                   uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202                                   struct drm_atomic_state *state);
203
204 static void handle_cursor_update(struct drm_plane *plane,
205                                  struct drm_plane_state *old_plane_state);
206
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218                                  struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234         if (crtc >= adev->mode_info.num_crtc)
235                 return 0;
236         else {
237                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238
239                 if (acrtc->dm_irq_params.stream == NULL) {
240                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241                                   crtc);
242                         return 0;
243                 }
244
245                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246         }
247 }
248
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250                                   u32 *vbl, u32 *position)
251 {
252         uint32_t v_blank_start, v_blank_end, h_position, v_position;
253
254         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255                 return -EINVAL;
256         else {
257                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
259                 if (acrtc->dm_irq_params.stream ==  NULL) {
260                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261                                   crtc);
262                         return 0;
263                 }
264
265                 /*
266                  * TODO rework base driver to use values directly.
267                  * for now parse it back into reg-format
268                  */
269                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270                                          &v_blank_start,
271                                          &v_blank_end,
272                                          &h_position,
273                                          &v_position);
274
275                 *position = v_position | (h_position << 16);
276                 *vbl = v_blank_start | (v_blank_end << 16);
277         }
278
279         return 0;
280 }
281
282 static bool dm_is_idle(void *handle)
283 {
284         /* XXX todo */
285         return true;
286 }
287
288 static int dm_wait_for_idle(void *handle)
289 {
290         /* XXX todo */
291         return 0;
292 }
293
294 static bool dm_check_soft_reset(void *handle)
295 {
296         return false;
297 }
298
299 static int dm_soft_reset(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307                      int otg_inst)
308 {
309         struct drm_device *dev = adev_to_drm(adev);
310         struct drm_crtc *crtc;
311         struct amdgpu_crtc *amdgpu_crtc;
312
313         if (otg_inst == -1) {
314                 WARN_ON(1);
315                 return adev->mode_info.crtcs[0];
316         }
317
318         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319                 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321                 if (amdgpu_crtc->otg_inst == otg_inst)
322                         return amdgpu_crtc;
323         }
324
325         return NULL;
326 }
327
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330         return acrtc->dm_irq_params.freesync_config.state ==
331                        VRR_STATE_ACTIVE_VARIABLE ||
332                acrtc->dm_irq_params.freesync_config.state ==
333                        VRR_STATE_ACTIVE_FIXED;
334 }
335
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343                                               struct dm_crtc_state *new_state)
344 {
345         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346                 return true;
347         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348                 return true;
349         else
350                 return false;
351 }
352
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362         struct amdgpu_crtc *amdgpu_crtc;
363         struct common_irq_params *irq_params = interrupt_params;
364         struct amdgpu_device *adev = irq_params->adev;
365         unsigned long flags;
366         struct drm_pending_vblank_event *e;
367         uint32_t vpos, hpos, v_blank_start, v_blank_end;
368         bool vrr_active;
369
370         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372         /* IRQ could occur when in initial stage */
373         /* TODO work and BO cleanup */
374         if (amdgpu_crtc == NULL) {
375                 DC_LOG_PFLIP("CRTC is null, returning.\n");
376                 return;
377         }
378
379         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383                                                  amdgpu_crtc->pflip_status,
384                                                  AMDGPU_FLIP_SUBMITTED,
385                                                  amdgpu_crtc->crtc_id,
386                                                  amdgpu_crtc);
387                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388                 return;
389         }
390
391         /* page flip completed. */
392         e = amdgpu_crtc->event;
393         amdgpu_crtc->event = NULL;
394
395         if (!e)
396                 WARN_ON(1);
397
398         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399
400         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401         if (!vrr_active ||
402             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403                                       &v_blank_end, &hpos, &vpos) ||
404             (vpos < v_blank_start)) {
405                 /* Update to correct count and vblank timestamp if racing with
406                  * vblank irq. This also updates to the correct vblank timestamp
407                  * even in VRR mode, as scanout is past the front-porch atm.
408                  */
409                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410
411                 /* Wake up userspace by sending the pageflip event with proper
412                  * count and timestamp of vblank of flip completion.
413                  */
414                 if (e) {
415                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416
417                         /* Event sent, so done with vblank for this flip */
418                         drm_crtc_vblank_put(&amdgpu_crtc->base);
419                 }
420         } else if (e) {
421                 /* VRR active and inside front-porch: vblank count and
422                  * timestamp for pageflip event will only be up to date after
423                  * drm_crtc_handle_vblank() has been executed from late vblank
424                  * irq handler after start of back-porch (vline 0). We queue the
425                  * pageflip event for send-out by drm_crtc_handle_vblank() with
426                  * updated timestamp and count, once it runs after us.
427                  *
428                  * We need to open-code this instead of using the helper
429                  * drm_crtc_arm_vblank_event(), as that helper would
430                  * call drm_crtc_accurate_vblank_count(), which we must
431                  * not call in VRR mode while we are in front-porch!
432                  */
433
434                 /* sequence will be replaced by real count during send-out. */
435                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436                 e->pipe = amdgpu_crtc->crtc_id;
437
438                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439                 e = NULL;
440         }
441
442         /* Keep track of vblank of this flip for flip throttling. We use the
443          * cooked hw counter, as that one incremented at start of this vblank
444          * of pageflip completion, so last_flip_vblank is the forbidden count
445          * for queueing new pageflips if vsync + VRR is enabled.
446          */
447         amdgpu_crtc->dm_irq_params.last_flip_vblank =
448                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449
450         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452
453         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454                      amdgpu_crtc->crtc_id, amdgpu_crtc,
455                      vrr_active, (int) !e);
456 }
457
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460         struct common_irq_params *irq_params = interrupt_params;
461         struct amdgpu_device *adev = irq_params->adev;
462         struct amdgpu_crtc *acrtc;
463         struct drm_device *drm_dev;
464         struct drm_vblank_crtc *vblank;
465         ktime_t frame_duration_ns, previous_timestamp;
466         unsigned long flags;
467         int vrr_active;
468
469         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470
471         if (acrtc) {
472                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473                 drm_dev = acrtc->base.dev;
474                 vblank = &drm_dev->vblank[acrtc->base.index];
475                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476                 frame_duration_ns = vblank->time - previous_timestamp;
477
478                 if (frame_duration_ns > 0) {
479                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
480                                                 frame_duration_ns,
481                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
483                 }
484
485                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486                               acrtc->crtc_id,
487                               vrr_active);
488
489                 /* Core vblank handling is done here after end of front-porch in
490                  * vrr mode, as vblank timestamping will give valid results
491                  * while now done after front-porch. This will also deliver
492                  * page-flip completion events that have been queued to us
493                  * if a pageflip happened inside front-porch.
494                  */
495                 if (vrr_active) {
496                         drm_crtc_handle_vblank(&acrtc->base);
497
498                         /* BTR processing for pre-DCE12 ASICs */
499                         if (acrtc->dm_irq_params.stream &&
500                             adev->family < AMDGPU_FAMILY_AI) {
501                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502                                 mod_freesync_handle_v_update(
503                                     adev->dm.freesync_module,
504                                     acrtc->dm_irq_params.stream,
505                                     &acrtc->dm_irq_params.vrr_params);
506
507                                 dc_stream_adjust_vmin_vmax(
508                                     adev->dm.dc,
509                                     acrtc->dm_irq_params.stream,
510                                     &acrtc->dm_irq_params.vrr_params.adjust);
511                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512                         }
513                 }
514         }
515 }
516
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526         struct common_irq_params *irq_params = interrupt_params;
527         struct amdgpu_device *adev = irq_params->adev;
528         struct amdgpu_crtc *acrtc;
529         unsigned long flags;
530         int vrr_active;
531
532         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533         if (!acrtc)
534                 return;
535
536         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537
538         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539                       vrr_active, acrtc->dm_irq_params.active_planes);
540
541         /**
542          * Core vblank handling at start of front-porch is only possible
543          * in non-vrr mode, as only there vblank timestamping will give
544          * valid results while done in front-porch. Otherwise defer it
545          * to dm_vupdate_high_irq after end of front-porch.
546          */
547         if (!vrr_active)
548                 drm_crtc_handle_vblank(&acrtc->base);
549
550         /**
551          * Following stuff must happen at start of vblank, for crc
552          * computation and below-the-range btr support in vrr mode.
553          */
554         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555
556         /* BTR updates need to happen before VUPDATE on Vega and above. */
557         if (adev->family < AMDGPU_FAMILY_AI)
558                 return;
559
560         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561
562         if (acrtc->dm_irq_params.stream &&
563             acrtc->dm_irq_params.vrr_params.supported &&
564             acrtc->dm_irq_params.freesync_config.state ==
565                     VRR_STATE_ACTIVE_VARIABLE) {
566                 mod_freesync_handle_v_update(adev->dm.freesync_module,
567                                              acrtc->dm_irq_params.stream,
568                                              &acrtc->dm_irq_params.vrr_params);
569
570                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571                                            &acrtc->dm_irq_params.vrr_params.adjust);
572         }
573
574         /*
575          * If there aren't any active_planes then DCH HUBP may be clock-gated.
576          * In that case, pageflip completion interrupts won't fire and pageflip
577          * completion events won't get delivered. Prevent this by sending
578          * pending pageflip events from here if a flip is still pending.
579          *
580          * If any planes are enabled, use dm_pflip_high_irq() instead, to
581          * avoid race conditions between flip programming and completion,
582          * which could cause too early flip completion events.
583          */
584         if (adev->family >= AMDGPU_FAMILY_RV &&
585             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586             acrtc->dm_irq_params.active_planes == 0) {
587                 if (acrtc->event) {
588                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589                         acrtc->event = NULL;
590                         drm_crtc_vblank_put(&acrtc->base);
591                 }
592                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
593         }
594
595         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609         struct common_irq_params *irq_params = interrupt_params;
610         struct amdgpu_device *adev = irq_params->adev;
611         struct amdgpu_crtc *acrtc;
612
613         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614
615         if (!acrtc)
616                 return;
617
618         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622
623 static int dm_set_clockgating_state(void *handle,
624                   enum amd_clockgating_state state)
625 {
626         return 0;
627 }
628
629 static int dm_set_powergating_state(void *handle,
630                   enum amd_powergating_state state)
631 {
632         return 0;
633 }
634
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641         struct drm_device *dev = connector->dev;
642         struct amdgpu_device *adev = drm_to_adev(dev);
643         struct dm_compressor_info *compressor = &adev->dm.compressor;
644         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645         struct drm_display_mode *mode;
646         unsigned long max_size = 0;
647
648         if (adev->dm.dc->fbc_compressor == NULL)
649                 return;
650
651         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652                 return;
653
654         if (compressor->bo_ptr)
655                 return;
656
657
658         list_for_each_entry(mode, &connector->modes, head) {
659                 if (max_size < mode->htotal * mode->vtotal)
660                         max_size = mode->htotal * mode->vtotal;
661         }
662
663         if (max_size) {
664                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666                             &compressor->gpu_addr, &compressor->cpu_addr);
667
668                 if (r)
669                         DRM_ERROR("DM: Failed to initialize FBC\n");
670                 else {
671                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673                 }
674
675         }
676
677 }
678
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680                                           int pipe, bool *enabled,
681                                           unsigned char *buf, int max_bytes)
682 {
683         struct drm_device *dev = dev_get_drvdata(kdev);
684         struct amdgpu_device *adev = drm_to_adev(dev);
685         struct drm_connector *connector;
686         struct drm_connector_list_iter conn_iter;
687         struct amdgpu_dm_connector *aconnector;
688         int ret = 0;
689
690         *enabled = false;
691
692         mutex_lock(&adev->dm.audio_lock);
693
694         drm_connector_list_iter_begin(dev, &conn_iter);
695         drm_for_each_connector_iter(connector, &conn_iter) {
696                 aconnector = to_amdgpu_dm_connector(connector);
697                 if (aconnector->audio_inst != port)
698                         continue;
699
700                 *enabled = true;
701                 ret = drm_eld_size(connector->eld);
702                 memcpy(buf, connector->eld, min(max_bytes, ret));
703
704                 break;
705         }
706         drm_connector_list_iter_end(&conn_iter);
707
708         mutex_unlock(&adev->dm.audio_lock);
709
710         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711
712         return ret;
713 }
714
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716         .get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720                                        struct device *hda_kdev, void *data)
721 {
722         struct drm_device *dev = dev_get_drvdata(kdev);
723         struct amdgpu_device *adev = drm_to_adev(dev);
724         struct drm_audio_component *acomp = data;
725
726         acomp->ops = &amdgpu_dm_audio_component_ops;
727         acomp->dev = kdev;
728         adev->dm.audio_component = acomp;
729
730         return 0;
731 }
732
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734                                           struct device *hda_kdev, void *data)
735 {
736         struct drm_device *dev = dev_get_drvdata(kdev);
737         struct amdgpu_device *adev = drm_to_adev(dev);
738         struct drm_audio_component *acomp = data;
739
740         acomp->ops = NULL;
741         acomp->dev = NULL;
742         adev->dm.audio_component = NULL;
743 }
744
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746         .bind   = amdgpu_dm_audio_component_bind,
747         .unbind = amdgpu_dm_audio_component_unbind,
748 };
749
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752         int i, ret;
753
754         if (!amdgpu_audio)
755                 return 0;
756
757         adev->mode_info.audio.enabled = true;
758
759         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760
761         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762                 adev->mode_info.audio.pin[i].channels = -1;
763                 adev->mode_info.audio.pin[i].rate = -1;
764                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765                 adev->mode_info.audio.pin[i].status_bits = 0;
766                 adev->mode_info.audio.pin[i].category_code = 0;
767                 adev->mode_info.audio.pin[i].connected = false;
768                 adev->mode_info.audio.pin[i].id =
769                         adev->dm.dc->res_pool->audios[i]->inst;
770                 adev->mode_info.audio.pin[i].offset = 0;
771         }
772
773         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774         if (ret < 0)
775                 return ret;
776
777         adev->dm.audio_registered = true;
778
779         return 0;
780 }
781
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784         if (!amdgpu_audio)
785                 return;
786
787         if (!adev->mode_info.audio.enabled)
788                 return;
789
790         if (adev->dm.audio_registered) {
791                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792                 adev->dm.audio_registered = false;
793         }
794
795         /* TODO: Disable audio? */
796
797         adev->mode_info.audio.enabled = false;
798 }
799
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802         struct drm_audio_component *acomp = adev->dm.audio_component;
803
804         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806
807                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808                                                  pin, -1);
809         }
810 }
811
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814         const struct dmcub_firmware_header_v1_0 *hdr;
815         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817         const struct firmware *dmub_fw = adev->dm.dmub_fw;
818         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819         struct abm *abm = adev->dm.dc->res_pool->abm;
820         struct dmub_srv_hw_params hw_params;
821         enum dmub_status status;
822         const unsigned char *fw_inst_const, *fw_bss_data;
823         uint32_t i, fw_inst_const_size, fw_bss_data_size;
824         bool has_hw_support;
825
826         if (!dmub_srv)
827                 /* DMUB isn't supported on the ASIC. */
828                 return 0;
829
830         if (!fb_info) {
831                 DRM_ERROR("No framebuffer info for DMUB service.\n");
832                 return -EINVAL;
833         }
834
835         if (!dmub_fw) {
836                 /* Firmware required for DMUB support. */
837                 DRM_ERROR("No firmware provided for DMUB.\n");
838                 return -EINVAL;
839         }
840
841         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842         if (status != DMUB_STATUS_OK) {
843                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844                 return -EINVAL;
845         }
846
847         if (!has_hw_support) {
848                 DRM_INFO("DMUB unsupported on ASIC\n");
849                 return 0;
850         }
851
852         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853
854         fw_inst_const = dmub_fw->data +
855                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856                         PSP_HEADER_BYTES;
857
858         fw_bss_data = dmub_fw->data +
859                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860                       le32_to_cpu(hdr->inst_const_bytes);
861
862         /* Copy firmware and bios info into FB memory. */
863         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865
866         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867
868         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869          * amdgpu_ucode_init_single_fw will load dmub firmware
870          * fw_inst_const part to cw0; otherwise, the firmware back door load
871          * will be done by dm_dmub_hw_init
872          */
873         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875                                 fw_inst_const_size);
876         }
877
878         if (fw_bss_data_size)
879                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880                        fw_bss_data, fw_bss_data_size);
881
882         /* Copy firmware bios info into FB memory. */
883         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884                adev->bios_size);
885
886         /* Reset regions that need to be reset. */
887         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889
890         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892
893         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895
896         /* Initialize hardware. */
897         memset(&hw_params, 0, sizeof(hw_params));
898         hw_params.fb_base = adev->gmc.fb_start;
899         hw_params.fb_offset = adev->gmc.aper_base;
900
901         /* backdoor load firmware and trigger dmub running */
902         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903                 hw_params.load_inst_const = true;
904
905         if (dmcu)
906                 hw_params.psp_version = dmcu->psp_version;
907
908         for (i = 0; i < fb_info->num_fb; ++i)
909                 hw_params.fb[i] = &fb_info->fb[i];
910
911         status = dmub_srv_hw_init(dmub_srv, &hw_params);
912         if (status != DMUB_STATUS_OK) {
913                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914                 return -EINVAL;
915         }
916
917         /* Wait for firmware load to finish. */
918         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919         if (status != DMUB_STATUS_OK)
920                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921
922         /* Init DMCU and ABM if available. */
923         if (dmcu && abm) {
924                 dmcu->funcs->dmcu_init(dmcu);
925                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926         }
927
928         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929         if (!adev->dm.dc->ctx->dmub_srv) {
930                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931                 return -ENOMEM;
932         }
933
934         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935                  adev->dm.dmcub_fw_version);
936
937         return 0;
938 }
939
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
943 {
944         struct common_irq_params *irq_params = interrupt_params;
945         struct amdgpu_device *adev = irq_params->adev;
946         struct amdgpu_display_manager *dm = &adev->dm;
947         struct dmcub_trace_buf_entry entry = { 0 };
948         uint32_t count = 0;
949
950         do {
951                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953                                                         entry.param0, entry.param1);
954
955                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
957                 } else
958                         break;
959
960                 count++;
961
962         } while (count <= DMUB_TRACE_MAX_READ);
963
964         ASSERT(count <= DMUB_TRACE_MAX_READ);
965 }
966
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969         uint64_t pt_base;
970         uint32_t logical_addr_low;
971         uint32_t logical_addr_high;
972         uint32_t agp_base, agp_bot, agp_top;
973         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974
975         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977
978         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979                 /*
980                  * Raven2 has a HW issue that it is unable to use the vram which
981                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982                  * workaround that increase system aperture high address (add 1)
983                  * to get rid of the VM fault and hardware hang.
984                  */
985                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986         else
987                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988
989         agp_base = 0;
990         agp_bot = adev->gmc.agp_start >> 24;
991         agp_top = adev->gmc.agp_end >> 24;
992
993
994         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999         page_table_base.low_part = lower_32_bits(pt_base);
1000
1001         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003
1004         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007
1008         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011
1012         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015
1016         pa_config->is_hvm_enabled = 0;
1017
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023
1024         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025         struct amdgpu_display_manager *dm = vblank_work->dm;
1026
1027         mutex_lock(&dm->dc_lock);
1028
1029         if (vblank_work->enable)
1030                 dm->active_vblank_irq_count++;
1031         else if(dm->active_vblank_irq_count)
1032                 dm->active_vblank_irq_count--;
1033
1034         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035
1036         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037
1038         mutex_unlock(&dm->dc_lock);
1039 }
1040
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043
1044         int max_caps = dc->caps.max_links;
1045         struct vblank_workqueue *vblank_work;
1046         int i = 0;
1047
1048         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049         if (ZERO_OR_NULL_PTR(vblank_work)) {
1050                 kfree(vblank_work);
1051                 return NULL;
1052         }
1053
1054         for (i = 0; i < max_caps; i++)
1055                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056
1057         return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062         struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064         struct dc_callback_init init_params;
1065 #endif
1066         int r;
1067
1068         adev->dm.ddev = adev_to_drm(adev);
1069         adev->dm.adev = adev;
1070
1071         /* Zero all the fields */
1072         memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074         memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076
1077         mutex_init(&adev->dm.dc_lock);
1078         mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080         spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082
1083         if(amdgpu_dm_irq_init(adev)) {
1084                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085                 goto error;
1086         }
1087
1088         init_data.asic_id.chip_family = adev->family;
1089
1090         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092
1093         init_data.asic_id.vram_width = adev->gmc.vram_width;
1094         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095         init_data.asic_id.atombios_base_address =
1096                 adev->mode_info.atom_context->bios;
1097
1098         init_data.driver = adev;
1099
1100         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101
1102         if (!adev->dm.cgs_device) {
1103                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104                 goto error;
1105         }
1106
1107         init_data.cgs_device = adev->dm.cgs_device;
1108
1109         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110
1111         switch (adev->asic_type) {
1112         case CHIP_CARRIZO:
1113         case CHIP_STONEY:
1114         case CHIP_RAVEN:
1115         case CHIP_RENOIR:
1116                 init_data.flags.gpu_vm_support = true;
1117                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118                         init_data.flags.disable_dmcu = true;
1119                 break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121         case CHIP_VANGOGH:
1122                 init_data.flags.gpu_vm_support = true;
1123                 break;
1124 #endif
1125         default:
1126                 break;
1127         }
1128
1129         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130                 init_data.flags.fbc_support = true;
1131
1132         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133                 init_data.flags.multi_mon_pp_mclk_switch = true;
1134
1135         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136                 init_data.flags.disable_fractional_pwm = true;
1137
1138         init_data.flags.power_down_display_on_boot = true;
1139
1140         INIT_LIST_HEAD(&adev->dm.da_list);
1141         /* Display Core create. */
1142         adev->dm.dc = dc_create(&init_data);
1143
1144         if (adev->dm.dc) {
1145                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146         } else {
1147                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148                 goto error;
1149         }
1150
1151         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154         }
1155
1156         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158
1159         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160                 adev->dm.dc->debug.disable_stutter = true;
1161
1162         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163                 adev->dm.dc->debug.disable_dsc = true;
1164
1165         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166                 adev->dm.dc->debug.disable_clock_gate = true;
1167
1168         r = dm_dmub_hw_init(adev);
1169         if (r) {
1170                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171                 goto error;
1172         }
1173
1174         dc_hardware_init(adev->dm.dc);
1175
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177         if (adev->apu_flags) {
1178                 struct dc_phy_addr_space_config pa_config;
1179
1180                 mmhub_read_system_context(adev, &pa_config);
1181
1182                 // Call the DC init_memory func
1183                 dc_setup_system_context(adev->dm.dc, &pa_config);
1184         }
1185 #endif
1186
1187         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188         if (!adev->dm.freesync_module) {
1189                 DRM_ERROR(
1190                 "amdgpu: failed to initialize freesync_module.\n");
1191         } else
1192                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193                                 adev->dm.freesync_module);
1194
1195         amdgpu_dm_init_color_mod();
1196
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198         if (adev->dm.dc->caps.max_links > 0) {
1199                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200
1201                 if (!adev->dm.vblank_workqueue)
1202                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203                 else
1204                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205         }
1206 #endif
1207
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211
1212                 if (!adev->dm.hdcp_workqueue)
1213                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214                 else
1215                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216
1217                 dc_init_callbacks(adev->dm.dc, &init_params);
1218         }
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223         if (amdgpu_dm_initialize_drm_device(adev)) {
1224                 DRM_ERROR(
1225                 "amdgpu: failed to initialize sw for display support.\n");
1226                 goto error;
1227         }
1228
1229         /* create fake encoders for MST */
1230         dm_dp_create_fake_mst_encoders(adev);
1231
1232         /* TODO: Add_display_info? */
1233
1234         /* TODO use dynamic cursor width */
1235         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237
1238         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239                 DRM_ERROR(
1240                 "amdgpu: failed to initialize sw for display support.\n");
1241                 goto error;
1242         }
1243
1244
1245         DRM_DEBUG_DRIVER("KMS initialized.\n");
1246
1247         return 0;
1248 error:
1249         amdgpu_dm_fini(adev);
1250
1251         return -EINVAL;
1252 }
1253
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256         int i;
1257
1258         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260         }
1261
1262         amdgpu_dm_audio_fini(adev);
1263
1264         amdgpu_dm_destroy_drm_device(&adev->dm);
1265
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267         if (adev->dm.crc_rd_wrk) {
1268                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269                 kfree(adev->dm.crc_rd_wrk);
1270                 adev->dm.crc_rd_wrk = NULL;
1271         }
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274         if (adev->dm.hdcp_workqueue) {
1275                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276                 adev->dm.hdcp_workqueue = NULL;
1277         }
1278
1279         if (adev->dm.dc)
1280                 dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284         if (adev->dm.vblank_workqueue) {
1285                 adev->dm.vblank_workqueue->dm = NULL;
1286                 kfree(adev->dm.vblank_workqueue);
1287                 adev->dm.vblank_workqueue = NULL;
1288         }
1289 #endif
1290
1291         if (adev->dm.dc->ctx->dmub_srv) {
1292                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293                 adev->dm.dc->ctx->dmub_srv = NULL;
1294         }
1295
1296         if (adev->dm.dmub_bo)
1297                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298                                       &adev->dm.dmub_bo_gpu_addr,
1299                                       &adev->dm.dmub_bo_cpu_addr);
1300
1301         /* DC Destroy TODO: Replace destroy DAL */
1302         if (adev->dm.dc)
1303                 dc_destroy(&adev->dm.dc);
1304         /*
1305          * TODO: pageflip, vlank interrupt
1306          *
1307          * amdgpu_dm_irq_fini(adev);
1308          */
1309
1310         if (adev->dm.cgs_device) {
1311                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312                 adev->dm.cgs_device = NULL;
1313         }
1314         if (adev->dm.freesync_module) {
1315                 mod_freesync_destroy(adev->dm.freesync_module);
1316                 adev->dm.freesync_module = NULL;
1317         }
1318
1319         mutex_destroy(&adev->dm.audio_lock);
1320         mutex_destroy(&adev->dm.dc_lock);
1321
1322         return;
1323 }
1324
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327         const char *fw_name_dmcu = NULL;
1328         int r;
1329         const struct dmcu_firmware_header_v1_0 *hdr;
1330
1331         switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333         case CHIP_TAHITI:
1334         case CHIP_PITCAIRN:
1335         case CHIP_VERDE:
1336         case CHIP_OLAND:
1337 #endif
1338         case CHIP_BONAIRE:
1339         case CHIP_HAWAII:
1340         case CHIP_KAVERI:
1341         case CHIP_KABINI:
1342         case CHIP_MULLINS:
1343         case CHIP_TONGA:
1344         case CHIP_FIJI:
1345         case CHIP_CARRIZO:
1346         case CHIP_STONEY:
1347         case CHIP_POLARIS11:
1348         case CHIP_POLARIS10:
1349         case CHIP_POLARIS12:
1350         case CHIP_VEGAM:
1351         case CHIP_VEGA10:
1352         case CHIP_VEGA12:
1353         case CHIP_VEGA20:
1354         case CHIP_NAVI10:
1355         case CHIP_NAVI14:
1356         case CHIP_RENOIR:
1357         case CHIP_SIENNA_CICHLID:
1358         case CHIP_NAVY_FLOUNDER:
1359         case CHIP_DIMGREY_CAVEFISH:
1360         case CHIP_VANGOGH:
1361                 return 0;
1362         case CHIP_NAVI12:
1363                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364                 break;
1365         case CHIP_RAVEN:
1366                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370                 else
1371                         return 0;
1372                 break;
1373         default:
1374                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375                 return -EINVAL;
1376         }
1377
1378         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380                 return 0;
1381         }
1382
1383         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384         if (r == -ENOENT) {
1385                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387                 adev->dm.fw_dmcu = NULL;
1388                 return 0;
1389         }
1390         if (r) {
1391                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392                         fw_name_dmcu);
1393                 return r;
1394         }
1395
1396         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397         if (r) {
1398                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399                         fw_name_dmcu);
1400                 release_firmware(adev->dm.fw_dmcu);
1401                 adev->dm.fw_dmcu = NULL;
1402                 return r;
1403         }
1404
1405         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408         adev->firmware.fw_size +=
1409                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410
1411         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413         adev->firmware.fw_size +=
1414                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415
1416         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417
1418         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419
1420         return 0;
1421 }
1422
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425         struct amdgpu_device *adev = ctx;
1426
1427         return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431                                      uint32_t value)
1432 {
1433         struct amdgpu_device *adev = ctx;
1434
1435         return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440         struct dmub_srv_create_params create_params;
1441         struct dmub_srv_region_params region_params;
1442         struct dmub_srv_region_info region_info;
1443         struct dmub_srv_fb_params fb_params;
1444         struct dmub_srv_fb_info *fb_info;
1445         struct dmub_srv *dmub_srv;
1446         const struct dmcub_firmware_header_v1_0 *hdr;
1447         const char *fw_name_dmub;
1448         enum dmub_asic dmub_asic;
1449         enum dmub_status status;
1450         int r;
1451
1452         switch (adev->asic_type) {
1453         case CHIP_RENOIR:
1454                 dmub_asic = DMUB_ASIC_DCN21;
1455                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458                 break;
1459         case CHIP_SIENNA_CICHLID:
1460                 dmub_asic = DMUB_ASIC_DCN30;
1461                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462                 break;
1463         case CHIP_NAVY_FLOUNDER:
1464                 dmub_asic = DMUB_ASIC_DCN30;
1465                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466                 break;
1467         case CHIP_VANGOGH:
1468                 dmub_asic = DMUB_ASIC_DCN301;
1469                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470                 break;
1471         case CHIP_DIMGREY_CAVEFISH:
1472                 dmub_asic = DMUB_ASIC_DCN302;
1473                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474                 break;
1475
1476         default:
1477                 /* ASIC doesn't support DMUB. */
1478                 return 0;
1479         }
1480
1481         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482         if (r) {
1483                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484                 return 0;
1485         }
1486
1487         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488         if (r) {
1489                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490                 return 0;
1491         }
1492
1493         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494
1495         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497                         AMDGPU_UCODE_ID_DMCUB;
1498                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499                         adev->dm.dmub_fw;
1500                 adev->firmware.fw_size +=
1501                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502
1503                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504                          adev->dm.dmcub_fw_version);
1505         }
1506
1507         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508
1509         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510         dmub_srv = adev->dm.dmub_srv;
1511
1512         if (!dmub_srv) {
1513                 DRM_ERROR("Failed to allocate DMUB service!\n");
1514                 return -ENOMEM;
1515         }
1516
1517         memset(&create_params, 0, sizeof(create_params));
1518         create_params.user_ctx = adev;
1519         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521         create_params.asic = dmub_asic;
1522
1523         /* Create the DMUB service. */
1524         status = dmub_srv_create(dmub_srv, &create_params);
1525         if (status != DMUB_STATUS_OK) {
1526                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1527                 return -EINVAL;
1528         }
1529
1530         /* Calculate the size of all the regions for the DMUB service. */
1531         memset(&region_params, 0, sizeof(region_params));
1532
1533         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536         region_params.vbios_size = adev->bios_size;
1537         region_params.fw_bss_data = region_params.bss_data_size ?
1538                 adev->dm.dmub_fw->data +
1539                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541         region_params.fw_inst_const =
1542                 adev->dm.dmub_fw->data +
1543                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544                 PSP_HEADER_BYTES;
1545
1546         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547                                            &region_info);
1548
1549         if (status != DMUB_STATUS_OK) {
1550                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551                 return -EINVAL;
1552         }
1553
1554         /*
1555          * Allocate a framebuffer based on the total size of all the regions.
1556          * TODO: Move this into GART.
1557          */
1558         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560                                     &adev->dm.dmub_bo_gpu_addr,
1561                                     &adev->dm.dmub_bo_cpu_addr);
1562         if (r)
1563                 return r;
1564
1565         /* Rebase the regions on the framebuffer address. */
1566         memset(&fb_params, 0, sizeof(fb_params));
1567         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569         fb_params.region_info = &region_info;
1570
1571         adev->dm.dmub_fb_info =
1572                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573         fb_info = adev->dm.dmub_fb_info;
1574
1575         if (!fb_info) {
1576                 DRM_ERROR(
1577                         "Failed to allocate framebuffer info for DMUB service!\n");
1578                 return -ENOMEM;
1579         }
1580
1581         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582         if (status != DMUB_STATUS_OK) {
1583                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584                 return -EINVAL;
1585         }
1586
1587         return 0;
1588 }
1589
1590 static int dm_sw_init(void *handle)
1591 {
1592         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593         int r;
1594
1595         r = dm_dmub_sw_init(adev);
1596         if (r)
1597                 return r;
1598
1599         return load_dmcu_fw(adev);
1600 }
1601
1602 static int dm_sw_fini(void *handle)
1603 {
1604         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605
1606         kfree(adev->dm.dmub_fb_info);
1607         adev->dm.dmub_fb_info = NULL;
1608
1609         if (adev->dm.dmub_srv) {
1610                 dmub_srv_destroy(adev->dm.dmub_srv);
1611                 adev->dm.dmub_srv = NULL;
1612         }
1613
1614         release_firmware(adev->dm.dmub_fw);
1615         adev->dm.dmub_fw = NULL;
1616
1617         release_firmware(adev->dm.fw_dmcu);
1618         adev->dm.fw_dmcu = NULL;
1619
1620         return 0;
1621 }
1622
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625         struct amdgpu_dm_connector *aconnector;
1626         struct drm_connector *connector;
1627         struct drm_connector_list_iter iter;
1628         int ret = 0;
1629
1630         drm_connector_list_iter_begin(dev, &iter);
1631         drm_for_each_connector_iter(connector, &iter) {
1632                 aconnector = to_amdgpu_dm_connector(connector);
1633                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634                     aconnector->mst_mgr.aux) {
1635                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636                                          aconnector,
1637                                          aconnector->base.base.id);
1638
1639                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640                         if (ret < 0) {
1641                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1642                                 aconnector->dc_link->type =
1643                                         dc_connection_single;
1644                                 break;
1645                         }
1646                 }
1647         }
1648         drm_connector_list_iter_end(&iter);
1649
1650         return ret;
1651 }
1652
1653 static int dm_late_init(void *handle)
1654 {
1655         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656
1657         struct dmcu_iram_parameters params;
1658         unsigned int linear_lut[16];
1659         int i;
1660         struct dmcu *dmcu = NULL;
1661         bool ret = true;
1662
1663         dmcu = adev->dm.dc->res_pool->dmcu;
1664
1665         for (i = 0; i < 16; i++)
1666                 linear_lut[i] = 0xFFFF * i / 15;
1667
1668         params.set = 0;
1669         params.backlight_ramping_start = 0xCCCC;
1670         params.backlight_ramping_reduction = 0xCCCCCCCC;
1671         params.backlight_lut_array_size = 16;
1672         params.backlight_lut_array = linear_lut;
1673
1674         /* Min backlight level after ABM reduction,  Don't allow below 1%
1675          * 0xFFFF x 0.01 = 0x28F
1676          */
1677         params.min_abm_backlight = 0x28F;
1678
1679         /* In the case where abm is implemented on dmcub,
1680          * dmcu object will be null.
1681          * ABM 2.4 and up are implemented on dmcub.
1682          */
1683         if (dmcu)
1684                 ret = dmcu_load_iram(dmcu, params);
1685         else if (adev->dm.dc->ctx->dmub_srv)
1686                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687
1688         if (!ret)
1689                 return -EINVAL;
1690
1691         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696         struct amdgpu_dm_connector *aconnector;
1697         struct drm_connector *connector;
1698         struct drm_connector_list_iter iter;
1699         struct drm_dp_mst_topology_mgr *mgr;
1700         int ret;
1701         bool need_hotplug = false;
1702
1703         drm_connector_list_iter_begin(dev, &iter);
1704         drm_for_each_connector_iter(connector, &iter) {
1705                 aconnector = to_amdgpu_dm_connector(connector);
1706                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707                     aconnector->mst_port)
1708                         continue;
1709
1710                 mgr = &aconnector->mst_mgr;
1711
1712                 if (suspend) {
1713                         drm_dp_mst_topology_mgr_suspend(mgr);
1714                 } else {
1715                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716                         if (ret < 0) {
1717                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718                                 need_hotplug = true;
1719                         }
1720                 }
1721         }
1722         drm_connector_list_iter_end(&iter);
1723
1724         if (need_hotplug)
1725                 drm_kms_helper_hotplug_event(dev);
1726 }
1727
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730         struct smu_context *smu = &adev->smu;
1731         int ret = 0;
1732
1733         if (!is_support_sw_smu(adev))
1734                 return 0;
1735
1736         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737          * on window driver dc implementation.
1738          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739          * should be passed to smu during boot up and resume from s3.
1740          * boot up: dc calculate dcn watermark clock settings within dc_create,
1741          * dcn20_resource_construct
1742          * then call pplib functions below to pass the settings to smu:
1743          * smu_set_watermarks_for_clock_ranges
1744          * smu_set_watermarks_table
1745          * navi10_set_watermarks_table
1746          * smu_write_watermarks_table
1747          *
1748          * For Renoir, clock settings of dcn watermark are also fixed values.
1749          * dc has implemented different flow for window driver:
1750          * dc_hardware_init / dc_set_power_state
1751          * dcn10_init_hw
1752          * notify_wm_ranges
1753          * set_wm_ranges
1754          * -- Linux
1755          * smu_set_watermarks_for_clock_ranges
1756          * renoir_set_watermarks_table
1757          * smu_write_watermarks_table
1758          *
1759          * For Linux,
1760          * dc_hardware_init -> amdgpu_dm_init
1761          * dc_set_power_state --> dm_resume
1762          *
1763          * therefore, this function apply to navi10/12/14 but not Renoir
1764          * *
1765          */
1766         switch(adev->asic_type) {
1767         case CHIP_NAVI10:
1768         case CHIP_NAVI14:
1769         case CHIP_NAVI12:
1770                 break;
1771         default:
1772                 return 0;
1773         }
1774
1775         ret = smu_write_watermarks_table(smu);
1776         if (ret) {
1777                 DRM_ERROR("Failed to update WMTABLE!\n");
1778                 return ret;
1779         }
1780
1781         return 0;
1782 }
1783
1784 /**
1785  * dm_hw_init() - Initialize DC device
1786  * @handle: The base driver device containing the amdgpu_dm device.
1787  *
1788  * Initialize the &struct amdgpu_display_manager device. This involves calling
1789  * the initializers of each DM component, then populating the struct with them.
1790  *
1791  * Although the function implies hardware initialization, both hardware and
1792  * software are initialized here. Splitting them out to their relevant init
1793  * hooks is a future TODO item.
1794  *
1795  * Some notable things that are initialized here:
1796  *
1797  * - Display Core, both software and hardware
1798  * - DC modules that we need (freesync and color management)
1799  * - DRM software states
1800  * - Interrupt sources and handlers
1801  * - Vblank support
1802  * - Debug FS entries, if enabled
1803  */
1804 static int dm_hw_init(void *handle)
1805 {
1806         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807         /* Create DAL display manager */
1808         amdgpu_dm_init(adev);
1809         amdgpu_dm_hpd_init(adev);
1810
1811         return 0;
1812 }
1813
1814 /**
1815  * dm_hw_fini() - Teardown DC device
1816  * @handle: The base driver device containing the amdgpu_dm device.
1817  *
1818  * Teardown components within &struct amdgpu_display_manager that require
1819  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820  * were loaded. Also flush IRQ workqueues and disable them.
1821  */
1822 static int dm_hw_fini(void *handle)
1823 {
1824         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825
1826         amdgpu_dm_hpd_fini(adev);
1827
1828         amdgpu_dm_irq_fini(adev);
1829         amdgpu_dm_fini(adev);
1830         return 0;
1831 }
1832
1833
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838                                  struct dc_state *state, bool enable)
1839 {
1840         enum dc_irq_source irq_source;
1841         struct amdgpu_crtc *acrtc;
1842         int rc = -EBUSY;
1843         int i = 0;
1844
1845         for (i = 0; i < state->stream_count; i++) {
1846                 acrtc = get_crtc_by_otg_inst(
1847                                 adev, state->stream_status[i].primary_otg_inst);
1848
1849                 if (acrtc && state->stream_status[i].plane_count != 0) {
1850                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1854                         if (rc)
1855                                 DRM_WARN("Failed to %s pflip interrupts\n",
1856                                          enable ? "enable" : "disable");
1857
1858                         if (enable) {
1859                                 rc = dm_enable_vblank(&acrtc->base);
1860                                 if (rc)
1861                                         DRM_WARN("Failed to enable vblank interrupts\n");
1862                         } else {
1863                                 dm_disable_vblank(&acrtc->base);
1864                         }
1865
1866                 }
1867         }
1868
1869 }
1870
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873         struct dc_state *context = NULL;
1874         enum dc_status res = DC_ERROR_UNEXPECTED;
1875         int i;
1876         struct dc_stream_state *del_streams[MAX_PIPES];
1877         int del_streams_count = 0;
1878
1879         memset(del_streams, 0, sizeof(del_streams));
1880
1881         context = dc_create_state(dc);
1882         if (context == NULL)
1883                 goto context_alloc_fail;
1884
1885         dc_resource_state_copy_construct_current(dc, context);
1886
1887         /* First remove from context all streams */
1888         for (i = 0; i < context->stream_count; i++) {
1889                 struct dc_stream_state *stream = context->streams[i];
1890
1891                 del_streams[del_streams_count++] = stream;
1892         }
1893
1894         /* Remove all planes for removed streams and then remove the streams */
1895         for (i = 0; i < del_streams_count; i++) {
1896                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897                         res = DC_FAIL_DETACH_SURFACES;
1898                         goto fail;
1899                 }
1900
1901                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902                 if (res != DC_OK)
1903                         goto fail;
1904         }
1905
1906
1907         res = dc_validate_global_state(dc, context, false);
1908
1909         if (res != DC_OK) {
1910                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911                 goto fail;
1912         }
1913
1914         res = dc_commit_state(dc, context);
1915
1916 fail:
1917         dc_release_state(context);
1918
1919 context_alloc_fail:
1920         return res;
1921 }
1922
1923 static int dm_suspend(void *handle)
1924 {
1925         struct amdgpu_device *adev = handle;
1926         struct amdgpu_display_manager *dm = &adev->dm;
1927         int ret = 0;
1928
1929         if (amdgpu_in_reset(adev)) {
1930                 mutex_lock(&dm->dc_lock);
1931
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933                 dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935
1936                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937
1938                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939
1940                 amdgpu_dm_commit_zero_streams(dm->dc);
1941
1942                 amdgpu_dm_irq_suspend(adev);
1943
1944                 return ret;
1945         }
1946
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948         amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950         WARN_ON(adev->dm.cached_state);
1951         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952
1953         s3_handle_mst(adev_to_drm(adev), true);
1954
1955         amdgpu_dm_irq_suspend(adev);
1956
1957
1958         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959
1960         return 0;
1961 }
1962
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965                                              struct drm_crtc *crtc)
1966 {
1967         uint32_t i;
1968         struct drm_connector_state *new_con_state;
1969         struct drm_connector *connector;
1970         struct drm_crtc *crtc_from_state;
1971
1972         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973                 crtc_from_state = new_con_state->crtc;
1974
1975                 if (crtc_from_state == crtc)
1976                         return to_amdgpu_dm_connector(connector);
1977         }
1978
1979         return NULL;
1980 }
1981
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984         struct dc_sink_init_data sink_init_data = { 0 };
1985         struct display_sink_capability sink_caps = { 0 };
1986         enum dc_edid_status edid_status;
1987         struct dc_context *dc_ctx = link->ctx;
1988         struct dc_sink *sink = NULL;
1989         struct dc_sink *prev_sink = NULL;
1990
1991         link->type = dc_connection_none;
1992         prev_sink = link->local_sink;
1993
1994         if (prev_sink)
1995                 dc_sink_release(prev_sink);
1996
1997         switch (link->connector_signal) {
1998         case SIGNAL_TYPE_HDMI_TYPE_A: {
1999                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001                 break;
2002         }
2003
2004         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007                 break;
2008         }
2009
2010         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013                 break;
2014         }
2015
2016         case SIGNAL_TYPE_LVDS: {
2017                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2019                 break;
2020         }
2021
2022         case SIGNAL_TYPE_EDP: {
2023                 sink_caps.transaction_type =
2024                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025                 sink_caps.signal = SIGNAL_TYPE_EDP;
2026                 break;
2027         }
2028
2029         case SIGNAL_TYPE_DISPLAY_PORT: {
2030                 sink_caps.transaction_type =
2031                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033                 break;
2034         }
2035
2036         default:
2037                 DC_ERROR("Invalid connector type! signal:%d\n",
2038                         link->connector_signal);
2039                 return;
2040         }
2041
2042         sink_init_data.link = link;
2043         sink_init_data.sink_signal = sink_caps.signal;
2044
2045         sink = dc_sink_create(&sink_init_data);
2046         if (!sink) {
2047                 DC_ERROR("Failed to create sink!\n");
2048                 return;
2049         }
2050
2051         /* dc_sink_create returns a new reference */
2052         link->local_sink = sink;
2053
2054         edid_status = dm_helpers_read_local_edid(
2055                         link->ctx,
2056                         link,
2057                         sink);
2058
2059         if (edid_status != EDID_OK)
2060                 DC_ERROR("Failed to read EDID");
2061
2062 }
2063
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065                                      struct amdgpu_display_manager *dm)
2066 {
2067         struct {
2068                 struct dc_surface_update surface_updates[MAX_SURFACES];
2069                 struct dc_plane_info plane_infos[MAX_SURFACES];
2070                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072                 struct dc_stream_update stream_update;
2073         } * bundle;
2074         int k, m;
2075
2076         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077
2078         if (!bundle) {
2079                 dm_error("Failed to allocate update bundle\n");
2080                 goto cleanup;
2081         }
2082
2083         for (k = 0; k < dc_state->stream_count; k++) {
2084                 bundle->stream_update.stream = dc_state->streams[k];
2085
2086                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087                         bundle->surface_updates[m].surface =
2088                                 dc_state->stream_status->plane_states[m];
2089                         bundle->surface_updates[m].surface->force_full_update =
2090                                 true;
2091                 }
2092                 dc_commit_updates_for_stream(
2093                         dm->dc, bundle->surface_updates,
2094                         dc_state->stream_status->plane_count,
2095                         dc_state->streams[k], &bundle->stream_update, dc_state);
2096         }
2097
2098 cleanup:
2099         kfree(bundle);
2100
2101         return;
2102 }
2103
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106         struct dc_stream_state *stream_state;
2107         struct amdgpu_dm_connector *aconnector = link->priv;
2108         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109         struct dc_stream_update stream_update;
2110         bool dpms_off = true;
2111
2112         memset(&stream_update, 0, sizeof(stream_update));
2113         stream_update.dpms_off = &dpms_off;
2114
2115         mutex_lock(&adev->dm.dc_lock);
2116         stream_state = dc_stream_find_from_link(link);
2117
2118         if (stream_state == NULL) {
2119                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120                 mutex_unlock(&adev->dm.dc_lock);
2121                 return;
2122         }
2123
2124         stream_update.stream = stream_state;
2125         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126                                      stream_state, &stream_update,
2127                                      stream_state->ctx->dc->current_state);
2128         mutex_unlock(&adev->dm.dc_lock);
2129 }
2130
2131 static int dm_resume(void *handle)
2132 {
2133         struct amdgpu_device *adev = handle;
2134         struct drm_device *ddev = adev_to_drm(adev);
2135         struct amdgpu_display_manager *dm = &adev->dm;
2136         struct amdgpu_dm_connector *aconnector;
2137         struct drm_connector *connector;
2138         struct drm_connector_list_iter iter;
2139         struct drm_crtc *crtc;
2140         struct drm_crtc_state *new_crtc_state;
2141         struct dm_crtc_state *dm_new_crtc_state;
2142         struct drm_plane *plane;
2143         struct drm_plane_state *new_plane_state;
2144         struct dm_plane_state *dm_new_plane_state;
2145         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146         enum dc_connection_type new_connection_type = dc_connection_none;
2147         struct dc_state *dc_state;
2148         int i, r, j;
2149
2150         if (amdgpu_in_reset(adev)) {
2151                 dc_state = dm->cached_dc_state;
2152
2153                 r = dm_dmub_hw_init(adev);
2154                 if (r)
2155                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158                 dc_resume(dm->dc);
2159
2160                 amdgpu_dm_irq_resume_early(adev);
2161
2162                 for (i = 0; i < dc_state->stream_count; i++) {
2163                         dc_state->streams[i]->mode_changed = true;
2164                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2166                                         = 0xffffffff;
2167                         }
2168                 }
2169
2170                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171
2172                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173
2174                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175
2176                 dc_release_state(dm->cached_dc_state);
2177                 dm->cached_dc_state = NULL;
2178
2179                 amdgpu_dm_irq_resume_late(adev);
2180
2181                 mutex_unlock(&dm->dc_lock);
2182
2183                 return 0;
2184         }
2185         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186         dc_release_state(dm_state->context);
2187         dm_state->context = dc_create_state(dm->dc);
2188         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189         dc_resource_state_construct(dm->dc, dm_state->context);
2190
2191         /* Before powering on DC we need to re-initialize DMUB. */
2192         r = dm_dmub_hw_init(adev);
2193         if (r)
2194                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195
2196         /* power on hardware */
2197         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198
2199         /* program HPD filter */
2200         dc_resume(dm->dc);
2201
2202         /*
2203          * early enable HPD Rx IRQ, should be done before set mode as short
2204          * pulse interrupts are used for MST
2205          */
2206         amdgpu_dm_irq_resume_early(adev);
2207
2208         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209         s3_handle_mst(ddev, false);
2210
2211         /* Do detection*/
2212         drm_connector_list_iter_begin(ddev, &iter);
2213         drm_for_each_connector_iter(connector, &iter) {
2214                 aconnector = to_amdgpu_dm_connector(connector);
2215
2216                 /*
2217                  * this is the case when traversing through already created
2218                  * MST connectors, should be skipped
2219                  */
2220                 if (aconnector->mst_port)
2221                         continue;
2222
2223                 mutex_lock(&aconnector->hpd_lock);
2224                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225                         DRM_ERROR("KMS: Failed to detect connector\n");
2226
2227                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228                         emulated_link_detect(aconnector->dc_link);
2229                 else
2230                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231
2232                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233                         aconnector->fake_enable = false;
2234
2235                 if (aconnector->dc_sink)
2236                         dc_sink_release(aconnector->dc_sink);
2237                 aconnector->dc_sink = NULL;
2238                 amdgpu_dm_update_connector_after_detect(aconnector);
2239                 mutex_unlock(&aconnector->hpd_lock);
2240         }
2241         drm_connector_list_iter_end(&iter);
2242
2243         /* Force mode set in atomic commit */
2244         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245                 new_crtc_state->active_changed = true;
2246
2247         /*
2248          * atomic_check is expected to create the dc states. We need to release
2249          * them here, since they were duplicated as part of the suspend
2250          * procedure.
2251          */
2252         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254                 if (dm_new_crtc_state->stream) {
2255                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256                         dc_stream_release(dm_new_crtc_state->stream);
2257                         dm_new_crtc_state->stream = NULL;
2258                 }
2259         }
2260
2261         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263                 if (dm_new_plane_state->dc_state) {
2264                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265                         dc_plane_state_release(dm_new_plane_state->dc_state);
2266                         dm_new_plane_state->dc_state = NULL;
2267                 }
2268         }
2269
2270         drm_atomic_helper_resume(ddev, dm->cached_state);
2271
2272         dm->cached_state = NULL;
2273
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275         amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277
2278         amdgpu_dm_irq_resume_late(adev);
2279
2280         amdgpu_dm_smu_write_watermarks_table(adev);
2281
2282         return 0;
2283 }
2284
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296         .name = "dm",
2297         .early_init = dm_early_init,
2298         .late_init = dm_late_init,
2299         .sw_init = dm_sw_init,
2300         .sw_fini = dm_sw_fini,
2301         .hw_init = dm_hw_init,
2302         .hw_fini = dm_hw_fini,
2303         .suspend = dm_suspend,
2304         .resume = dm_resume,
2305         .is_idle = dm_is_idle,
2306         .wait_for_idle = dm_wait_for_idle,
2307         .check_soft_reset = dm_check_soft_reset,
2308         .soft_reset = dm_soft_reset,
2309         .set_clockgating_state = dm_set_clockgating_state,
2310         .set_powergating_state = dm_set_powergating_state,
2311 };
2312
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315         .type = AMD_IP_BLOCK_TYPE_DCE,
2316         .major = 1,
2317         .minor = 0,
2318         .rev = 0,
2319         .funcs = &amdgpu_dm_funcs,
2320 };
2321
2322
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330         .fb_create = amdgpu_display_user_framebuffer_create,
2331         .get_format_info = amd_get_format_info,
2332         .output_poll_changed = drm_fb_helper_output_poll_changed,
2333         .atomic_check = amdgpu_dm_atomic_check,
2334         .atomic_commit = drm_atomic_helper_commit,
2335 };
2336
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343         u32 max_cll, min_cll, max, min, q, r;
2344         struct amdgpu_dm_backlight_caps *caps;
2345         struct amdgpu_display_manager *dm;
2346         struct drm_connector *conn_base;
2347         struct amdgpu_device *adev;
2348         struct dc_link *link = NULL;
2349         static const u8 pre_computed_values[] = {
2350                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352
2353         if (!aconnector || !aconnector->dc_link)
2354                 return;
2355
2356         link = aconnector->dc_link;
2357         if (link->connector_signal != SIGNAL_TYPE_EDP)
2358                 return;
2359
2360         conn_base = &aconnector->base;
2361         adev = drm_to_adev(conn_base->dev);
2362         dm = &adev->dm;
2363         caps = &dm->backlight_caps;
2364         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365         caps->aux_support = false;
2366         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368
2369         if (caps->ext_caps->bits.oled == 1 ||
2370             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372                 caps->aux_support = true;
2373
2374         if (amdgpu_backlight == 0)
2375                 caps->aux_support = false;
2376         else if (amdgpu_backlight == 1)
2377                 caps->aux_support = true;
2378
2379         /* From the specification (CTA-861-G), for calculating the maximum
2380          * luminance we need to use:
2381          *      Luminance = 50*2**(CV/32)
2382          * Where CV is a one-byte value.
2383          * For calculating this expression we may need float point precision;
2384          * to avoid this complexity level, we take advantage that CV is divided
2385          * by a constant. From the Euclids division algorithm, we know that CV
2386          * can be written as: CV = 32*q + r. Next, we replace CV in the
2387          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388          * need to pre-compute the value of r/32. For pre-computing the values
2389          * We just used the following Ruby line:
2390          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391          * The results of the above expressions can be verified at
2392          * pre_computed_values.
2393          */
2394         q = max_cll >> 5;
2395         r = max_cll % 32;
2396         max = (1 << q) * pre_computed_values[r];
2397
2398         // min luminance: maxLum * (CV/255)^2 / 100
2399         q = DIV_ROUND_CLOSEST(min_cll, 255);
2400         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401
2402         caps->aux_max_input_signal = max;
2403         caps->aux_min_input_signal = min;
2404 }
2405
2406 void amdgpu_dm_update_connector_after_detect(
2407                 struct amdgpu_dm_connector *aconnector)
2408 {
2409         struct drm_connector *connector = &aconnector->base;
2410         struct drm_device *dev = connector->dev;
2411         struct dc_sink *sink;
2412
2413         /* MST handled by drm_mst framework */
2414         if (aconnector->mst_mgr.mst_state == true)
2415                 return;
2416
2417         sink = aconnector->dc_link->local_sink;
2418         if (sink)
2419                 dc_sink_retain(sink);
2420
2421         /*
2422          * Edid mgmt connector gets first update only in mode_valid hook and then
2423          * the connector sink is set to either fake or physical sink depends on link status.
2424          * Skip if already done during boot.
2425          */
2426         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427                         && aconnector->dc_em_sink) {
2428
2429                 /*
2430                  * For S3 resume with headless use eml_sink to fake stream
2431                  * because on resume connector->sink is set to NULL
2432                  */
2433                 mutex_lock(&dev->mode_config.mutex);
2434
2435                 if (sink) {
2436                         if (aconnector->dc_sink) {
2437                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2438                                 /*
2439                                  * retain and release below are used to
2440                                  * bump up refcount for sink because the link doesn't point
2441                                  * to it anymore after disconnect, so on next crtc to connector
2442                                  * reshuffle by UMD we will get into unwanted dc_sink release
2443                                  */
2444                                 dc_sink_release(aconnector->dc_sink);
2445                         }
2446                         aconnector->dc_sink = sink;
2447                         dc_sink_retain(aconnector->dc_sink);
2448                         amdgpu_dm_update_freesync_caps(connector,
2449                                         aconnector->edid);
2450                 } else {
2451                         amdgpu_dm_update_freesync_caps(connector, NULL);
2452                         if (!aconnector->dc_sink) {
2453                                 aconnector->dc_sink = aconnector->dc_em_sink;
2454                                 dc_sink_retain(aconnector->dc_sink);
2455                         }
2456                 }
2457
2458                 mutex_unlock(&dev->mode_config.mutex);
2459
2460                 if (sink)
2461                         dc_sink_release(sink);
2462                 return;
2463         }
2464
2465         /*
2466          * TODO: temporary guard to look for proper fix
2467          * if this sink is MST sink, we should not do anything
2468          */
2469         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470                 dc_sink_release(sink);
2471                 return;
2472         }
2473
2474         if (aconnector->dc_sink == sink) {
2475                 /*
2476                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477                  * Do nothing!!
2478                  */
2479                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480                                 aconnector->connector_id);
2481                 if (sink)
2482                         dc_sink_release(sink);
2483                 return;
2484         }
2485
2486         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487                 aconnector->connector_id, aconnector->dc_sink, sink);
2488
2489         mutex_lock(&dev->mode_config.mutex);
2490
2491         /*
2492          * 1. Update status of the drm connector
2493          * 2. Send an event and let userspace tell us what to do
2494          */
2495         if (sink) {
2496                 /*
2497                  * TODO: check if we still need the S3 mode update workaround.
2498                  * If yes, put it here.
2499                  */
2500                 if (aconnector->dc_sink) {
2501                         amdgpu_dm_update_freesync_caps(connector, NULL);
2502                         dc_sink_release(aconnector->dc_sink);
2503                 }
2504
2505                 aconnector->dc_sink = sink;
2506                 dc_sink_retain(aconnector->dc_sink);
2507                 if (sink->dc_edid.length == 0) {
2508                         aconnector->edid = NULL;
2509                         if (aconnector->dc_link->aux_mode) {
2510                                 drm_dp_cec_unset_edid(
2511                                         &aconnector->dm_dp_aux.aux);
2512                         }
2513                 } else {
2514                         aconnector->edid =
2515                                 (struct edid *)sink->dc_edid.raw_edid;
2516
2517                         drm_connector_update_edid_property(connector,
2518                                                            aconnector->edid);
2519                         if (aconnector->dc_link->aux_mode)
2520                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521                                                     aconnector->edid);
2522                 }
2523
2524                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525                 update_connector_ext_caps(aconnector);
2526         } else {
2527                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528                 amdgpu_dm_update_freesync_caps(connector, NULL);
2529                 drm_connector_update_edid_property(connector, NULL);
2530                 aconnector->num_modes = 0;
2531                 dc_sink_release(aconnector->dc_sink);
2532                 aconnector->dc_sink = NULL;
2533                 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539         }
2540
2541         mutex_unlock(&dev->mode_config.mutex);
2542
2543         update_subconnector_property(aconnector);
2544
2545         if (sink)
2546                 dc_sink_release(sink);
2547 }
2548
2549 static void handle_hpd_irq(void *param)
2550 {
2551         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552         struct drm_connector *connector = &aconnector->base;
2553         struct drm_device *dev = connector->dev;
2554         enum dc_connection_type new_connection_type = dc_connection_none;
2555         struct amdgpu_device *adev = drm_to_adev(dev);
2556 #ifdef CONFIG_DRM_AMD_DC_HDCP
2557         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559
2560         if (adev->dm.disable_hpd_irq)
2561                 return;
2562
2563         /*
2564          * In case of failure or MST no need to update connector status or notify the OS
2565          * since (for MST case) MST does this in its own context.
2566          */
2567         mutex_lock(&aconnector->hpd_lock);
2568
2569 #ifdef CONFIG_DRM_AMD_DC_HDCP
2570         if (adev->dm.hdcp_workqueue) {
2571                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2572                 dm_con_state->update_hdcp = true;
2573         }
2574 #endif
2575         if (aconnector->fake_enable)
2576                 aconnector->fake_enable = false;
2577
2578         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2579                 DRM_ERROR("KMS: Failed to detect connector\n");
2580
2581         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2582                 emulated_link_detect(aconnector->dc_link);
2583
2584
2585                 drm_modeset_lock_all(dev);
2586                 dm_restore_drm_connector_state(dev, connector);
2587                 drm_modeset_unlock_all(dev);
2588
2589                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2590                         drm_kms_helper_hotplug_event(dev);
2591
2592         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2593                 if (new_connection_type == dc_connection_none &&
2594                     aconnector->dc_link->type == dc_connection_none)
2595                         dm_set_dpms_off(aconnector->dc_link);
2596
2597                 amdgpu_dm_update_connector_after_detect(aconnector);
2598
2599                 drm_modeset_lock_all(dev);
2600                 dm_restore_drm_connector_state(dev, connector);
2601                 drm_modeset_unlock_all(dev);
2602
2603                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2604                         drm_kms_helper_hotplug_event(dev);
2605         }
2606         mutex_unlock(&aconnector->hpd_lock);
2607
2608 }
2609
2610 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2611 {
2612         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2613         uint8_t dret;
2614         bool new_irq_handled = false;
2615         int dpcd_addr;
2616         int dpcd_bytes_to_read;
2617
2618         const int max_process_count = 30;
2619         int process_count = 0;
2620
2621         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2622
2623         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2624                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2625                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2626                 dpcd_addr = DP_SINK_COUNT;
2627         } else {
2628                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2629                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2630                 dpcd_addr = DP_SINK_COUNT_ESI;
2631         }
2632
2633         dret = drm_dp_dpcd_read(
2634                 &aconnector->dm_dp_aux.aux,
2635                 dpcd_addr,
2636                 esi,
2637                 dpcd_bytes_to_read);
2638
2639         while (dret == dpcd_bytes_to_read &&
2640                 process_count < max_process_count) {
2641                 uint8_t retry;
2642                 dret = 0;
2643
2644                 process_count++;
2645
2646                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2647                 /* handle HPD short pulse irq */
2648                 if (aconnector->mst_mgr.mst_state)
2649                         drm_dp_mst_hpd_irq(
2650                                 &aconnector->mst_mgr,
2651                                 esi,
2652                                 &new_irq_handled);
2653
2654                 if (new_irq_handled) {
2655                         /* ACK at DPCD to notify down stream */
2656                         const int ack_dpcd_bytes_to_write =
2657                                 dpcd_bytes_to_read - 1;
2658
2659                         for (retry = 0; retry < 3; retry++) {
2660                                 uint8_t wret;
2661
2662                                 wret = drm_dp_dpcd_write(
2663                                         &aconnector->dm_dp_aux.aux,
2664                                         dpcd_addr + 1,
2665                                         &esi[1],
2666                                         ack_dpcd_bytes_to_write);
2667                                 if (wret == ack_dpcd_bytes_to_write)
2668                                         break;
2669                         }
2670
2671                         /* check if there is new irq to be handled */
2672                         dret = drm_dp_dpcd_read(
2673                                 &aconnector->dm_dp_aux.aux,
2674                                 dpcd_addr,
2675                                 esi,
2676                                 dpcd_bytes_to_read);
2677
2678                         new_irq_handled = false;
2679                 } else {
2680                         break;
2681                 }
2682         }
2683
2684         if (process_count == max_process_count)
2685                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2686 }
2687
2688 static void handle_hpd_rx_irq(void *param)
2689 {
2690         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2691         struct drm_connector *connector = &aconnector->base;
2692         struct drm_device *dev = connector->dev;
2693         struct dc_link *dc_link = aconnector->dc_link;
2694         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2695         bool result = false;
2696         enum dc_connection_type new_connection_type = dc_connection_none;
2697         struct amdgpu_device *adev = drm_to_adev(dev);
2698         union hpd_irq_data hpd_irq_data;
2699
2700         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2701
2702         if (adev->dm.disable_hpd_irq)
2703                 return;
2704
2705
2706         /*
2707          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2708          * conflict, after implement i2c helper, this mutex should be
2709          * retired.
2710          */
2711         if (dc_link->type != dc_connection_mst_branch)
2712                 mutex_lock(&aconnector->hpd_lock);
2713
2714         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2715
2716         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2717                 (dc_link->type == dc_connection_mst_branch)) {
2718                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2719                         result = true;
2720                         dm_handle_hpd_rx_irq(aconnector);
2721                         goto out;
2722                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2723                         result = false;
2724                         dm_handle_hpd_rx_irq(aconnector);
2725                         goto out;
2726                 }
2727         }
2728
2729         mutex_lock(&adev->dm.dc_lock);
2730 #ifdef CONFIG_DRM_AMD_DC_HDCP
2731         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2732 #else
2733         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2734 #endif
2735         mutex_unlock(&adev->dm.dc_lock);
2736
2737 out:
2738         if (result && !is_mst_root_connector) {
2739                 /* Downstream Port status changed. */
2740                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2741                         DRM_ERROR("KMS: Failed to detect connector\n");
2742
2743                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2744                         emulated_link_detect(dc_link);
2745
2746                         if (aconnector->fake_enable)
2747                                 aconnector->fake_enable = false;
2748
2749                         amdgpu_dm_update_connector_after_detect(aconnector);
2750
2751
2752                         drm_modeset_lock_all(dev);
2753                         dm_restore_drm_connector_state(dev, connector);
2754                         drm_modeset_unlock_all(dev);
2755
2756                         drm_kms_helper_hotplug_event(dev);
2757                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2758
2759                         if (aconnector->fake_enable)
2760                                 aconnector->fake_enable = false;
2761
2762                         amdgpu_dm_update_connector_after_detect(aconnector);
2763
2764
2765                         drm_modeset_lock_all(dev);
2766                         dm_restore_drm_connector_state(dev, connector);
2767                         drm_modeset_unlock_all(dev);
2768
2769                         drm_kms_helper_hotplug_event(dev);
2770                 }
2771         }
2772 #ifdef CONFIG_DRM_AMD_DC_HDCP
2773         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2774                 if (adev->dm.hdcp_workqueue)
2775                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2776         }
2777 #endif
2778
2779         if (dc_link->type != dc_connection_mst_branch) {
2780                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2781                 mutex_unlock(&aconnector->hpd_lock);
2782         }
2783 }
2784
2785 static void register_hpd_handlers(struct amdgpu_device *adev)
2786 {
2787         struct drm_device *dev = adev_to_drm(adev);
2788         struct drm_connector *connector;
2789         struct amdgpu_dm_connector *aconnector;
2790         const struct dc_link *dc_link;
2791         struct dc_interrupt_params int_params = {0};
2792
2793         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2794         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2795
2796         list_for_each_entry(connector,
2797                         &dev->mode_config.connector_list, head) {
2798
2799                 aconnector = to_amdgpu_dm_connector(connector);
2800                 dc_link = aconnector->dc_link;
2801
2802                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2803                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2804                         int_params.irq_source = dc_link->irq_source_hpd;
2805
2806                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2807                                         handle_hpd_irq,
2808                                         (void *) aconnector);
2809                 }
2810
2811                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2812
2813                         /* Also register for DP short pulse (hpd_rx). */
2814                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2815                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2816
2817                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2818                                         handle_hpd_rx_irq,
2819                                         (void *) aconnector);
2820                 }
2821         }
2822 }
2823
2824 #if defined(CONFIG_DRM_AMD_DC_SI)
2825 /* Register IRQ sources and initialize IRQ callbacks */
2826 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2827 {
2828         struct dc *dc = adev->dm.dc;
2829         struct common_irq_params *c_irq_params;
2830         struct dc_interrupt_params int_params = {0};
2831         int r;
2832         int i;
2833         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2834
2835         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2836         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2837
2838         /*
2839          * Actions of amdgpu_irq_add_id():
2840          * 1. Register a set() function with base driver.
2841          *    Base driver will call set() function to enable/disable an
2842          *    interrupt in DC hardware.
2843          * 2. Register amdgpu_dm_irq_handler().
2844          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2845          *    coming from DC hardware.
2846          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2847          *    for acknowledging and handling. */
2848
2849         /* Use VBLANK interrupt */
2850         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2851                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2852                 if (r) {
2853                         DRM_ERROR("Failed to add crtc irq id!\n");
2854                         return r;
2855                 }
2856
2857                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2858                 int_params.irq_source =
2859                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2860
2861                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2862
2863                 c_irq_params->adev = adev;
2864                 c_irq_params->irq_src = int_params.irq_source;
2865
2866                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2867                                 dm_crtc_high_irq, c_irq_params);
2868         }
2869
2870         /* Use GRPH_PFLIP interrupt */
2871         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2872                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2873                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2874                 if (r) {
2875                         DRM_ERROR("Failed to add page flip irq id!\n");
2876                         return r;
2877                 }
2878
2879                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2880                 int_params.irq_source =
2881                         dc_interrupt_to_irq_source(dc, i, 0);
2882
2883                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2884
2885                 c_irq_params->adev = adev;
2886                 c_irq_params->irq_src = int_params.irq_source;
2887
2888                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2889                                 dm_pflip_high_irq, c_irq_params);
2890
2891         }
2892
2893         /* HPD */
2894         r = amdgpu_irq_add_id(adev, client_id,
2895                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2896         if (r) {
2897                 DRM_ERROR("Failed to add hpd irq id!\n");
2898                 return r;
2899         }
2900
2901         register_hpd_handlers(adev);
2902
2903         return 0;
2904 }
2905 #endif
2906
2907 /* Register IRQ sources and initialize IRQ callbacks */
2908 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2909 {
2910         struct dc *dc = adev->dm.dc;
2911         struct common_irq_params *c_irq_params;
2912         struct dc_interrupt_params int_params = {0};
2913         int r;
2914         int i;
2915         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2916
2917         if (adev->asic_type >= CHIP_VEGA10)
2918                 client_id = SOC15_IH_CLIENTID_DCE;
2919
2920         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2921         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2922
2923         /*
2924          * Actions of amdgpu_irq_add_id():
2925          * 1. Register a set() function with base driver.
2926          *    Base driver will call set() function to enable/disable an
2927          *    interrupt in DC hardware.
2928          * 2. Register amdgpu_dm_irq_handler().
2929          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2930          *    coming from DC hardware.
2931          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2932          *    for acknowledging and handling. */
2933
2934         /* Use VBLANK interrupt */
2935         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2936                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2937                 if (r) {
2938                         DRM_ERROR("Failed to add crtc irq id!\n");
2939                         return r;
2940                 }
2941
2942                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2943                 int_params.irq_source =
2944                         dc_interrupt_to_irq_source(dc, i, 0);
2945
2946                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2947
2948                 c_irq_params->adev = adev;
2949                 c_irq_params->irq_src = int_params.irq_source;
2950
2951                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2952                                 dm_crtc_high_irq, c_irq_params);
2953         }
2954
2955         /* Use VUPDATE interrupt */
2956         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2957                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2958                 if (r) {
2959                         DRM_ERROR("Failed to add vupdate irq id!\n");
2960                         return r;
2961                 }
2962
2963                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2964                 int_params.irq_source =
2965                         dc_interrupt_to_irq_source(dc, i, 0);
2966
2967                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2968
2969                 c_irq_params->adev = adev;
2970                 c_irq_params->irq_src = int_params.irq_source;
2971
2972                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2973                                 dm_vupdate_high_irq, c_irq_params);
2974         }
2975
2976         /* Use GRPH_PFLIP interrupt */
2977         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2978                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2979                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2980                 if (r) {
2981                         DRM_ERROR("Failed to add page flip irq id!\n");
2982                         return r;
2983                 }
2984
2985                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2986                 int_params.irq_source =
2987                         dc_interrupt_to_irq_source(dc, i, 0);
2988
2989                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2990
2991                 c_irq_params->adev = adev;
2992                 c_irq_params->irq_src = int_params.irq_source;
2993
2994                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2995                                 dm_pflip_high_irq, c_irq_params);
2996
2997         }
2998
2999         /* HPD */
3000         r = amdgpu_irq_add_id(adev, client_id,
3001                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3002         if (r) {
3003                 DRM_ERROR("Failed to add hpd irq id!\n");
3004                 return r;
3005         }
3006
3007         register_hpd_handlers(adev);
3008
3009         return 0;
3010 }
3011
3012 #if defined(CONFIG_DRM_AMD_DC_DCN)
3013 /* Register IRQ sources and initialize IRQ callbacks */
3014 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3015 {
3016         struct dc *dc = adev->dm.dc;
3017         struct common_irq_params *c_irq_params;
3018         struct dc_interrupt_params int_params = {0};
3019         int r;
3020         int i;
3021 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3022         static const unsigned int vrtl_int_srcid[] = {
3023                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3024                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3025                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3026                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3027                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3028                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3029         };
3030 #endif
3031
3032         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3033         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3034
3035         /*
3036          * Actions of amdgpu_irq_add_id():
3037          * 1. Register a set() function with base driver.
3038          *    Base driver will call set() function to enable/disable an
3039          *    interrupt in DC hardware.
3040          * 2. Register amdgpu_dm_irq_handler().
3041          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3042          *    coming from DC hardware.
3043          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3044          *    for acknowledging and handling.
3045          */
3046
3047         /* Use VSTARTUP interrupt */
3048         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3049                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3050                         i++) {
3051                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3052
3053                 if (r) {
3054                         DRM_ERROR("Failed to add crtc irq id!\n");
3055                         return r;
3056                 }
3057
3058                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3059                 int_params.irq_source =
3060                         dc_interrupt_to_irq_source(dc, i, 0);
3061
3062                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3063
3064                 c_irq_params->adev = adev;
3065                 c_irq_params->irq_src = int_params.irq_source;
3066
3067                 amdgpu_dm_irq_register_interrupt(
3068                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3069         }
3070
3071         /* Use otg vertical line interrupt */
3072 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3073         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3074                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3075                                 vrtl_int_srcid[i], &adev->vline0_irq);
3076
3077                 if (r) {
3078                         DRM_ERROR("Failed to add vline0 irq id!\n");
3079                         return r;
3080                 }
3081
3082                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3083                 int_params.irq_source =
3084                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3085
3086                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3087                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3088                         break;
3089                 }
3090
3091                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3092                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3093
3094                 c_irq_params->adev = adev;
3095                 c_irq_params->irq_src = int_params.irq_source;
3096
3097                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3098                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3099         }
3100 #endif
3101
3102         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3103          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3104          * to trigger at end of each vblank, regardless of state of the lock,
3105          * matching DCE behaviour.
3106          */
3107         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3108              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3109              i++) {
3110                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3111
3112                 if (r) {
3113                         DRM_ERROR("Failed to add vupdate irq id!\n");
3114                         return r;
3115                 }
3116
3117                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3118                 int_params.irq_source =
3119                         dc_interrupt_to_irq_source(dc, i, 0);
3120
3121                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3122
3123                 c_irq_params->adev = adev;
3124                 c_irq_params->irq_src = int_params.irq_source;
3125
3126                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3127                                 dm_vupdate_high_irq, c_irq_params);
3128         }
3129
3130         /* Use GRPH_PFLIP interrupt */
3131         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3132                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3133                         i++) {
3134                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3135                 if (r) {
3136                         DRM_ERROR("Failed to add page flip irq id!\n");
3137                         return r;
3138                 }
3139
3140                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3141                 int_params.irq_source =
3142                         dc_interrupt_to_irq_source(dc, i, 0);
3143
3144                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3145
3146                 c_irq_params->adev = adev;
3147                 c_irq_params->irq_src = int_params.irq_source;
3148
3149                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3150                                 dm_pflip_high_irq, c_irq_params);
3151
3152         }
3153
3154         if (dc->ctx->dmub_srv) {
3155                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3156                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3157
3158                 if (r) {
3159                         DRM_ERROR("Failed to add dmub trace irq id!\n");
3160                         return r;
3161                 }
3162
3163                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3164                 int_params.irq_source =
3165                         dc_interrupt_to_irq_source(dc, i, 0);
3166
3167                 c_irq_params = &adev->dm.dmub_trace_params[0];
3168
3169                 c_irq_params->adev = adev;
3170                 c_irq_params->irq_src = int_params.irq_source;
3171
3172                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3173                                 dm_dmub_trace_high_irq, c_irq_params);
3174         }
3175
3176         /* HPD */
3177         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3178                         &adev->hpd_irq);
3179         if (r) {
3180                 DRM_ERROR("Failed to add hpd irq id!\n");
3181                 return r;
3182         }
3183
3184         register_hpd_handlers(adev);
3185
3186         return 0;
3187 }
3188 #endif
3189
3190 /*
3191  * Acquires the lock for the atomic state object and returns
3192  * the new atomic state.
3193  *
3194  * This should only be called during atomic check.
3195  */
3196 static int dm_atomic_get_state(struct drm_atomic_state *state,
3197                                struct dm_atomic_state **dm_state)
3198 {
3199         struct drm_device *dev = state->dev;
3200         struct amdgpu_device *adev = drm_to_adev(dev);
3201         struct amdgpu_display_manager *dm = &adev->dm;
3202         struct drm_private_state *priv_state;
3203
3204         if (*dm_state)
3205                 return 0;
3206
3207         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3208         if (IS_ERR(priv_state))
3209                 return PTR_ERR(priv_state);
3210
3211         *dm_state = to_dm_atomic_state(priv_state);
3212
3213         return 0;
3214 }
3215
3216 static struct dm_atomic_state *
3217 dm_atomic_get_new_state(struct drm_atomic_state *state)
3218 {
3219         struct drm_device *dev = state->dev;
3220         struct amdgpu_device *adev = drm_to_adev(dev);
3221         struct amdgpu_display_manager *dm = &adev->dm;
3222         struct drm_private_obj *obj;
3223         struct drm_private_state *new_obj_state;
3224         int i;
3225
3226         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3227                 if (obj->funcs == dm->atomic_obj.funcs)
3228                         return to_dm_atomic_state(new_obj_state);
3229         }
3230
3231         return NULL;
3232 }
3233
3234 static struct drm_private_state *
3235 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3236 {
3237         struct dm_atomic_state *old_state, *new_state;
3238
3239         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3240         if (!new_state)
3241                 return NULL;
3242
3243         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3244
3245         old_state = to_dm_atomic_state(obj->state);
3246
3247         if (old_state && old_state->context)
3248                 new_state->context = dc_copy_state(old_state->context);
3249
3250         if (!new_state->context) {
3251                 kfree(new_state);
3252                 return NULL;
3253         }
3254
3255         return &new_state->base;
3256 }
3257
3258 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3259                                     struct drm_private_state *state)
3260 {
3261         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3262
3263         if (dm_state && dm_state->context)
3264                 dc_release_state(dm_state->context);
3265
3266         kfree(dm_state);
3267 }
3268
3269 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3270         .atomic_duplicate_state = dm_atomic_duplicate_state,
3271         .atomic_destroy_state = dm_atomic_destroy_state,
3272 };
3273
3274 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3275 {
3276         struct dm_atomic_state *state;
3277         int r;
3278
3279         adev->mode_info.mode_config_initialized = true;
3280
3281         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3282         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3283
3284         adev_to_drm(adev)->mode_config.max_width = 16384;
3285         adev_to_drm(adev)->mode_config.max_height = 16384;
3286
3287         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3288         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3289         /* indicates support for immediate flip */
3290         adev_to_drm(adev)->mode_config.async_page_flip = true;
3291
3292         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3293
3294         state = kzalloc(sizeof(*state), GFP_KERNEL);
3295         if (!state)
3296                 return -ENOMEM;
3297
3298         state->context = dc_create_state(adev->dm.dc);
3299         if (!state->context) {
3300                 kfree(state);
3301                 return -ENOMEM;
3302         }
3303
3304         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3305
3306         drm_atomic_private_obj_init(adev_to_drm(adev),
3307                                     &adev->dm.atomic_obj,
3308                                     &state->base,
3309                                     &dm_atomic_state_funcs);
3310
3311         r = amdgpu_display_modeset_create_props(adev);
3312         if (r) {
3313                 dc_release_state(state->context);
3314                 kfree(state);
3315                 return r;
3316         }
3317
3318         r = amdgpu_dm_audio_init(adev);
3319         if (r) {
3320                 dc_release_state(state->context);
3321                 kfree(state);
3322                 return r;
3323         }
3324
3325         return 0;
3326 }
3327
3328 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3329 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3330 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3331
3332 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3333         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3334
3335 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3336 {
3337 #if defined(CONFIG_ACPI)
3338         struct amdgpu_dm_backlight_caps caps;
3339
3340         memset(&caps, 0, sizeof(caps));
3341
3342         if (dm->backlight_caps.caps_valid)
3343                 return;
3344
3345         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3346         if (caps.caps_valid) {
3347                 dm->backlight_caps.caps_valid = true;
3348                 if (caps.aux_support)
3349                         return;
3350                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3351                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3352         } else {
3353                 dm->backlight_caps.min_input_signal =
3354                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3355                 dm->backlight_caps.max_input_signal =
3356                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3357         }
3358 #else
3359         if (dm->backlight_caps.aux_support)
3360                 return;
3361
3362         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3363         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3364 #endif
3365 }
3366
3367 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3368                                 unsigned *min, unsigned *max)
3369 {
3370         if (!caps)
3371                 return 0;
3372
3373         if (caps->aux_support) {
3374                 // Firmware limits are in nits, DC API wants millinits.
3375                 *max = 1000 * caps->aux_max_input_signal;
3376                 *min = 1000 * caps->aux_min_input_signal;
3377         } else {
3378                 // Firmware limits are 8-bit, PWM control is 16-bit.
3379                 *max = 0x101 * caps->max_input_signal;
3380                 *min = 0x101 * caps->min_input_signal;
3381         }
3382         return 1;
3383 }
3384
3385 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3386                                         uint32_t brightness)
3387 {
3388         unsigned min, max;
3389
3390         if (!get_brightness_range(caps, &min, &max))
3391                 return brightness;
3392
3393         // Rescale 0..255 to min..max
3394         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3395                                        AMDGPU_MAX_BL_LEVEL);
3396 }
3397
3398 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3399                                       uint32_t brightness)
3400 {
3401         unsigned min, max;
3402
3403         if (!get_brightness_range(caps, &min, &max))
3404                 return brightness;
3405
3406         if (brightness < min)
3407                 return 0;
3408         // Rescale min..max to 0..255
3409         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3410                                  max - min);
3411 }
3412
3413 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3414 {
3415         struct amdgpu_display_manager *dm = bl_get_data(bd);
3416         struct amdgpu_dm_backlight_caps caps;
3417         struct dc_link *link = NULL;
3418         u32 brightness;
3419         bool rc;
3420
3421         amdgpu_dm_update_backlight_caps(dm);
3422         caps = dm->backlight_caps;
3423
3424         link = (struct dc_link *)dm->backlight_link;
3425
3426         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3427         // Change brightness based on AUX property
3428         if (caps.aux_support)
3429                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3430                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3431         else
3432                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3433
3434         return rc ? 0 : 1;
3435 }
3436
3437 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3438 {
3439         struct amdgpu_display_manager *dm = bl_get_data(bd);
3440         struct amdgpu_dm_backlight_caps caps;
3441
3442         amdgpu_dm_update_backlight_caps(dm);
3443         caps = dm->backlight_caps;
3444
3445         if (caps.aux_support) {
3446                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3447                 u32 avg, peak;
3448                 bool rc;
3449
3450                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3451                 if (!rc)
3452                         return bd->props.brightness;
3453                 return convert_brightness_to_user(&caps, avg);
3454         } else {
3455                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3456
3457                 if (ret == DC_ERROR_UNEXPECTED)
3458                         return bd->props.brightness;
3459                 return convert_brightness_to_user(&caps, ret);
3460         }
3461 }
3462
3463 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3464         .options = BL_CORE_SUSPENDRESUME,
3465         .get_brightness = amdgpu_dm_backlight_get_brightness,
3466         .update_status  = amdgpu_dm_backlight_update_status,
3467 };
3468
3469 static void
3470 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3471 {
3472         char bl_name[16];
3473         struct backlight_properties props = { 0 };
3474
3475         amdgpu_dm_update_backlight_caps(dm);
3476
3477         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3478         props.brightness = AMDGPU_MAX_BL_LEVEL;
3479         props.type = BACKLIGHT_RAW;
3480
3481         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3482                  adev_to_drm(dm->adev)->primary->index);
3483
3484         dm->backlight_dev = backlight_device_register(bl_name,
3485                                                       adev_to_drm(dm->adev)->dev,
3486                                                       dm,
3487                                                       &amdgpu_dm_backlight_ops,
3488                                                       &props);
3489
3490         if (IS_ERR(dm->backlight_dev))
3491                 DRM_ERROR("DM: Backlight registration failed!\n");
3492         else
3493                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3494 }
3495
3496 #endif
3497
3498 static int initialize_plane(struct amdgpu_display_manager *dm,
3499                             struct amdgpu_mode_info *mode_info, int plane_id,
3500                             enum drm_plane_type plane_type,
3501                             const struct dc_plane_cap *plane_cap)
3502 {
3503         struct drm_plane *plane;
3504         unsigned long possible_crtcs;
3505         int ret = 0;
3506
3507         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3508         if (!plane) {
3509                 DRM_ERROR("KMS: Failed to allocate plane\n");
3510                 return -ENOMEM;
3511         }
3512         plane->type = plane_type;
3513
3514         /*
3515          * HACK: IGT tests expect that the primary plane for a CRTC
3516          * can only have one possible CRTC. Only expose support for
3517          * any CRTC if they're not going to be used as a primary plane
3518          * for a CRTC - like overlay or underlay planes.
3519          */
3520         possible_crtcs = 1 << plane_id;
3521         if (plane_id >= dm->dc->caps.max_streams)
3522                 possible_crtcs = 0xff;
3523
3524         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3525
3526         if (ret) {
3527                 DRM_ERROR("KMS: Failed to initialize plane\n");
3528                 kfree(plane);
3529                 return ret;
3530         }
3531
3532         if (mode_info)
3533                 mode_info->planes[plane_id] = plane;
3534
3535         return ret;
3536 }
3537
3538
3539 static void register_backlight_device(struct amdgpu_display_manager *dm,
3540                                       struct dc_link *link)
3541 {
3542 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3543         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3544
3545         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3546             link->type != dc_connection_none) {
3547                 /*
3548                  * Event if registration failed, we should continue with
3549                  * DM initialization because not having a backlight control
3550                  * is better then a black screen.
3551                  */
3552                 amdgpu_dm_register_backlight_device(dm);
3553
3554                 if (dm->backlight_dev)
3555                         dm->backlight_link = link;
3556         }
3557 #endif
3558 }
3559
3560
3561 /*
3562  * In this architecture, the association
3563  * connector -> encoder -> crtc
3564  * id not really requried. The crtc and connector will hold the
3565  * display_index as an abstraction to use with DAL component
3566  *
3567  * Returns 0 on success
3568  */
3569 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3570 {
3571         struct amdgpu_display_manager *dm = &adev->dm;
3572         int32_t i;
3573         struct amdgpu_dm_connector *aconnector = NULL;
3574         struct amdgpu_encoder *aencoder = NULL;
3575         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3576         uint32_t link_cnt;
3577         int32_t primary_planes;
3578         enum dc_connection_type new_connection_type = dc_connection_none;
3579         const struct dc_plane_cap *plane;
3580
3581         dm->display_indexes_num = dm->dc->caps.max_streams;
3582         /* Update the actual used number of crtc */
3583         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3584
3585         link_cnt = dm->dc->caps.max_links;
3586         if (amdgpu_dm_mode_config_init(dm->adev)) {
3587                 DRM_ERROR("DM: Failed to initialize mode config\n");
3588                 return -EINVAL;
3589         }
3590
3591         /* There is one primary plane per CRTC */
3592         primary_planes = dm->dc->caps.max_streams;
3593         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3594
3595         /*
3596          * Initialize primary planes, implicit planes for legacy IOCTLS.
3597          * Order is reversed to match iteration order in atomic check.
3598          */
3599         for (i = (primary_planes - 1); i >= 0; i--) {
3600                 plane = &dm->dc->caps.planes[i];
3601
3602                 if (initialize_plane(dm, mode_info, i,
3603                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3604                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3605                         goto fail;
3606                 }
3607         }
3608
3609         /*
3610          * Initialize overlay planes, index starting after primary planes.
3611          * These planes have a higher DRM index than the primary planes since
3612          * they should be considered as having a higher z-order.
3613          * Order is reversed to match iteration order in atomic check.
3614          *
3615          * Only support DCN for now, and only expose one so we don't encourage
3616          * userspace to use up all the pipes.
3617          */
3618         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3619                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3620
3621                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3622                         continue;
3623
3624                 if (!plane->blends_with_above || !plane->blends_with_below)
3625                         continue;
3626
3627                 if (!plane->pixel_format_support.argb8888)
3628                         continue;
3629
3630                 if (initialize_plane(dm, NULL, primary_planes + i,
3631                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3632                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3633                         goto fail;
3634                 }
3635
3636                 /* Only create one overlay plane. */
3637                 break;
3638         }
3639
3640         for (i = 0; i < dm->dc->caps.max_streams; i++)
3641                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3642                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3643                         goto fail;
3644                 }
3645
3646         /* loops over all connectors on the board */
3647         for (i = 0; i < link_cnt; i++) {
3648                 struct dc_link *link = NULL;
3649
3650                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3651                         DRM_ERROR(
3652                                 "KMS: Cannot support more than %d display indexes\n",
3653                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3654                         continue;
3655                 }
3656
3657                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3658                 if (!aconnector)
3659                         goto fail;
3660
3661                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3662                 if (!aencoder)
3663                         goto fail;
3664
3665                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3666                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3667                         goto fail;
3668                 }
3669
3670                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3671                         DRM_ERROR("KMS: Failed to initialize connector\n");
3672                         goto fail;
3673                 }
3674
3675                 link = dc_get_link_at_index(dm->dc, i);
3676
3677                 if (!dc_link_detect_sink(link, &new_connection_type))
3678                         DRM_ERROR("KMS: Failed to detect connector\n");
3679
3680                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3681                         emulated_link_detect(link);
3682                         amdgpu_dm_update_connector_after_detect(aconnector);
3683
3684                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3685                         amdgpu_dm_update_connector_after_detect(aconnector);
3686                         register_backlight_device(dm, link);
3687                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3688                                 amdgpu_dm_set_psr_caps(link);
3689                 }
3690
3691
3692         }
3693
3694         /* Software is initialized. Now we can register interrupt handlers. */
3695         switch (adev->asic_type) {
3696 #if defined(CONFIG_DRM_AMD_DC_SI)
3697         case CHIP_TAHITI:
3698         case CHIP_PITCAIRN:
3699         case CHIP_VERDE:
3700         case CHIP_OLAND:
3701                 if (dce60_register_irq_handlers(dm->adev)) {
3702                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3703                         goto fail;
3704                 }
3705                 break;
3706 #endif
3707         case CHIP_BONAIRE:
3708         case CHIP_HAWAII:
3709         case CHIP_KAVERI:
3710         case CHIP_KABINI:
3711         case CHIP_MULLINS:
3712         case CHIP_TONGA:
3713         case CHIP_FIJI:
3714         case CHIP_CARRIZO:
3715         case CHIP_STONEY:
3716         case CHIP_POLARIS11:
3717         case CHIP_POLARIS10:
3718         case CHIP_POLARIS12:
3719         case CHIP_VEGAM:
3720         case CHIP_VEGA10:
3721         case CHIP_VEGA12:
3722         case CHIP_VEGA20:
3723                 if (dce110_register_irq_handlers(dm->adev)) {
3724                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3725                         goto fail;
3726                 }
3727                 break;
3728 #if defined(CONFIG_DRM_AMD_DC_DCN)
3729         case CHIP_RAVEN:
3730         case CHIP_NAVI12:
3731         case CHIP_NAVI10:
3732         case CHIP_NAVI14:
3733         case CHIP_RENOIR:
3734         case CHIP_SIENNA_CICHLID:
3735         case CHIP_NAVY_FLOUNDER:
3736         case CHIP_DIMGREY_CAVEFISH:
3737         case CHIP_VANGOGH:
3738                 if (dcn10_register_irq_handlers(dm->adev)) {
3739                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3740                         goto fail;
3741                 }
3742                 break;
3743 #endif
3744         default:
3745                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3746                 goto fail;
3747         }
3748
3749         return 0;
3750 fail:
3751         kfree(aencoder);
3752         kfree(aconnector);
3753
3754         return -EINVAL;
3755 }
3756
3757 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3758 {
3759         drm_mode_config_cleanup(dm->ddev);
3760         drm_atomic_private_obj_fini(&dm->atomic_obj);
3761         return;
3762 }
3763
3764 /******************************************************************************
3765  * amdgpu_display_funcs functions
3766  *****************************************************************************/
3767
3768 /*
3769  * dm_bandwidth_update - program display watermarks
3770  *
3771  * @adev: amdgpu_device pointer
3772  *
3773  * Calculate and program the display watermarks and line buffer allocation.
3774  */
3775 static void dm_bandwidth_update(struct amdgpu_device *adev)
3776 {
3777         /* TODO: implement later */
3778 }
3779
3780 static const struct amdgpu_display_funcs dm_display_funcs = {
3781         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3782         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3783         .backlight_set_level = NULL, /* never called for DC */
3784         .backlight_get_level = NULL, /* never called for DC */
3785         .hpd_sense = NULL,/* called unconditionally */
3786         .hpd_set_polarity = NULL, /* called unconditionally */
3787         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3788         .page_flip_get_scanoutpos =
3789                 dm_crtc_get_scanoutpos,/* called unconditionally */
3790         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3791         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3792 };
3793
3794 #if defined(CONFIG_DEBUG_KERNEL_DC)
3795
3796 static ssize_t s3_debug_store(struct device *device,
3797                               struct device_attribute *attr,
3798                               const char *buf,
3799                               size_t count)
3800 {
3801         int ret;
3802         int s3_state;
3803         struct drm_device *drm_dev = dev_get_drvdata(device);
3804         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3805
3806         ret = kstrtoint(buf, 0, &s3_state);
3807
3808         if (ret == 0) {
3809                 if (s3_state) {
3810                         dm_resume(adev);
3811                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3812                 } else
3813                         dm_suspend(adev);
3814         }
3815
3816         return ret == 0 ? count : 0;
3817 }
3818
3819 DEVICE_ATTR_WO(s3_debug);
3820
3821 #endif
3822
3823 static int dm_early_init(void *handle)
3824 {
3825         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3826
3827         switch (adev->asic_type) {
3828 #if defined(CONFIG_DRM_AMD_DC_SI)
3829         case CHIP_TAHITI:
3830         case CHIP_PITCAIRN:
3831         case CHIP_VERDE:
3832                 adev->mode_info.num_crtc = 6;
3833                 adev->mode_info.num_hpd = 6;
3834                 adev->mode_info.num_dig = 6;
3835                 break;
3836         case CHIP_OLAND:
3837                 adev->mode_info.num_crtc = 2;
3838                 adev->mode_info.num_hpd = 2;
3839                 adev->mode_info.num_dig = 2;
3840                 break;
3841 #endif
3842         case CHIP_BONAIRE:
3843         case CHIP_HAWAII:
3844                 adev->mode_info.num_crtc = 6;
3845                 adev->mode_info.num_hpd = 6;
3846                 adev->mode_info.num_dig = 6;
3847                 break;
3848         case CHIP_KAVERI:
3849                 adev->mode_info.num_crtc = 4;
3850                 adev->mode_info.num_hpd = 6;
3851                 adev->mode_info.num_dig = 7;
3852                 break;
3853         case CHIP_KABINI:
3854         case CHIP_MULLINS:
3855                 adev->mode_info.num_crtc = 2;
3856                 adev->mode_info.num_hpd = 6;
3857                 adev->mode_info.num_dig = 6;
3858                 break;
3859         case CHIP_FIJI:
3860         case CHIP_TONGA:
3861                 adev->mode_info.num_crtc = 6;
3862                 adev->mode_info.num_hpd = 6;
3863                 adev->mode_info.num_dig = 7;
3864                 break;
3865         case CHIP_CARRIZO:
3866                 adev->mode_info.num_crtc = 3;
3867                 adev->mode_info.num_hpd = 6;
3868                 adev->mode_info.num_dig = 9;
3869                 break;
3870         case CHIP_STONEY:
3871                 adev->mode_info.num_crtc = 2;
3872                 adev->mode_info.num_hpd = 6;
3873                 adev->mode_info.num_dig = 9;
3874                 break;
3875         case CHIP_POLARIS11:
3876         case CHIP_POLARIS12:
3877                 adev->mode_info.num_crtc = 5;
3878                 adev->mode_info.num_hpd = 5;
3879                 adev->mode_info.num_dig = 5;
3880                 break;
3881         case CHIP_POLARIS10:
3882         case CHIP_VEGAM:
3883                 adev->mode_info.num_crtc = 6;
3884                 adev->mode_info.num_hpd = 6;
3885                 adev->mode_info.num_dig = 6;
3886                 break;
3887         case CHIP_VEGA10:
3888         case CHIP_VEGA12:
3889         case CHIP_VEGA20:
3890                 adev->mode_info.num_crtc = 6;
3891                 adev->mode_info.num_hpd = 6;
3892                 adev->mode_info.num_dig = 6;
3893                 break;
3894 #if defined(CONFIG_DRM_AMD_DC_DCN)
3895         case CHIP_RAVEN:
3896         case CHIP_RENOIR:
3897         case CHIP_VANGOGH:
3898                 adev->mode_info.num_crtc = 4;
3899                 adev->mode_info.num_hpd = 4;
3900                 adev->mode_info.num_dig = 4;
3901                 break;
3902         case CHIP_NAVI10:
3903         case CHIP_NAVI12:
3904         case CHIP_SIENNA_CICHLID:
3905         case CHIP_NAVY_FLOUNDER:
3906                 adev->mode_info.num_crtc = 6;
3907                 adev->mode_info.num_hpd = 6;
3908                 adev->mode_info.num_dig = 6;
3909                 break;
3910         case CHIP_NAVI14:
3911         case CHIP_DIMGREY_CAVEFISH:
3912                 adev->mode_info.num_crtc = 5;
3913                 adev->mode_info.num_hpd = 5;
3914                 adev->mode_info.num_dig = 5;
3915                 break;
3916 #endif
3917         default:
3918                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3919                 return -EINVAL;
3920         }
3921
3922         amdgpu_dm_set_irq_funcs(adev);
3923
3924         if (adev->mode_info.funcs == NULL)
3925                 adev->mode_info.funcs = &dm_display_funcs;
3926
3927         /*
3928          * Note: Do NOT change adev->audio_endpt_rreg and
3929          * adev->audio_endpt_wreg because they are initialised in
3930          * amdgpu_device_init()
3931          */
3932 #if defined(CONFIG_DEBUG_KERNEL_DC)
3933         device_create_file(
3934                 adev_to_drm(adev)->dev,
3935                 &dev_attr_s3_debug);
3936 #endif
3937
3938         return 0;
3939 }
3940
3941 static bool modeset_required(struct drm_crtc_state *crtc_state,
3942                              struct dc_stream_state *new_stream,
3943                              struct dc_stream_state *old_stream)
3944 {
3945         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3946 }
3947
3948 static bool modereset_required(struct drm_crtc_state *crtc_state)
3949 {
3950         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3951 }
3952
3953 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3954 {
3955         drm_encoder_cleanup(encoder);
3956         kfree(encoder);
3957 }
3958
3959 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3960         .destroy = amdgpu_dm_encoder_destroy,
3961 };
3962
3963
3964 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3965                                          struct drm_framebuffer *fb,
3966                                          int *min_downscale, int *max_upscale)
3967 {
3968         struct amdgpu_device *adev = drm_to_adev(dev);
3969         struct dc *dc = adev->dm.dc;
3970         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3971         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3972
3973         switch (fb->format->format) {
3974         case DRM_FORMAT_P010:
3975         case DRM_FORMAT_NV12:
3976         case DRM_FORMAT_NV21:
3977                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3978                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3979                 break;
3980
3981         case DRM_FORMAT_XRGB16161616F:
3982         case DRM_FORMAT_ARGB16161616F:
3983         case DRM_FORMAT_XBGR16161616F:
3984         case DRM_FORMAT_ABGR16161616F:
3985                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3986                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3987                 break;
3988
3989         default:
3990                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3991                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3992                 break;
3993         }
3994
3995         /*
3996          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3997          * scaling factor of 1.0 == 1000 units.
3998          */
3999         if (*max_upscale == 1)
4000                 *max_upscale = 1000;
4001
4002         if (*min_downscale == 1)
4003                 *min_downscale = 1000;
4004 }
4005
4006
4007 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4008                                 struct dc_scaling_info *scaling_info)
4009 {
4010         int scale_w, scale_h, min_downscale, max_upscale;
4011
4012         memset(scaling_info, 0, sizeof(*scaling_info));
4013
4014         /* Source is fixed 16.16 but we ignore mantissa for now... */
4015         scaling_info->src_rect.x = state->src_x >> 16;
4016         scaling_info->src_rect.y = state->src_y >> 16;
4017
4018         scaling_info->src_rect.width = state->src_w >> 16;
4019         if (scaling_info->src_rect.width == 0)
4020                 return -EINVAL;
4021
4022         scaling_info->src_rect.height = state->src_h >> 16;
4023         if (scaling_info->src_rect.height == 0)
4024                 return -EINVAL;
4025
4026         scaling_info->dst_rect.x = state->crtc_x;
4027         scaling_info->dst_rect.y = state->crtc_y;
4028
4029         if (state->crtc_w == 0)
4030                 return -EINVAL;
4031
4032         scaling_info->dst_rect.width = state->crtc_w;
4033
4034         if (state->crtc_h == 0)
4035                 return -EINVAL;
4036
4037         scaling_info->dst_rect.height = state->crtc_h;
4038
4039         /* DRM doesn't specify clipping on destination output. */
4040         scaling_info->clip_rect = scaling_info->dst_rect;
4041
4042         /* Validate scaling per-format with DC plane caps */
4043         if (state->plane && state->plane->dev && state->fb) {
4044                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4045                                              &min_downscale, &max_upscale);
4046         } else {
4047                 min_downscale = 250;
4048                 max_upscale = 16000;
4049         }
4050
4051         scale_w = scaling_info->dst_rect.width * 1000 /
4052                   scaling_info->src_rect.width;
4053
4054         if (scale_w < min_downscale || scale_w > max_upscale)
4055                 return -EINVAL;
4056
4057         scale_h = scaling_info->dst_rect.height * 1000 /
4058                   scaling_info->src_rect.height;
4059
4060         if (scale_h < min_downscale || scale_h > max_upscale)
4061                 return -EINVAL;
4062
4063         /*
4064          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4065          * assume reasonable defaults based on the format.
4066          */
4067
4068         return 0;
4069 }
4070
4071 static void
4072 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4073                                  uint64_t tiling_flags)
4074 {
4075         /* Fill GFX8 params */
4076         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4077                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4078
4079                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4080                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4081                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4082                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4083                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4084
4085                 /* XXX fix me for VI */
4086                 tiling_info->gfx8.num_banks = num_banks;
4087                 tiling_info->gfx8.array_mode =
4088                                 DC_ARRAY_2D_TILED_THIN1;
4089                 tiling_info->gfx8.tile_split = tile_split;
4090                 tiling_info->gfx8.bank_width = bankw;
4091                 tiling_info->gfx8.bank_height = bankh;
4092                 tiling_info->gfx8.tile_aspect = mtaspect;
4093                 tiling_info->gfx8.tile_mode =
4094                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4095         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4096                         == DC_ARRAY_1D_TILED_THIN1) {
4097                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4098         }
4099
4100         tiling_info->gfx8.pipe_config =
4101                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4102 }
4103
4104 static void
4105 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4106                                   union dc_tiling_info *tiling_info)
4107 {
4108         tiling_info->gfx9.num_pipes =
4109                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4110         tiling_info->gfx9.num_banks =
4111                 adev->gfx.config.gb_addr_config_fields.num_banks;
4112         tiling_info->gfx9.pipe_interleave =
4113                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4114         tiling_info->gfx9.num_shader_engines =
4115                 adev->gfx.config.gb_addr_config_fields.num_se;
4116         tiling_info->gfx9.max_compressed_frags =
4117                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4118         tiling_info->gfx9.num_rb_per_se =
4119                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4120         tiling_info->gfx9.shaderEnable = 1;
4121         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4122             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4123             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4124             adev->asic_type == CHIP_VANGOGH)
4125                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4126 }
4127
4128 static int
4129 validate_dcc(struct amdgpu_device *adev,
4130              const enum surface_pixel_format format,
4131              const enum dc_rotation_angle rotation,
4132              const union dc_tiling_info *tiling_info,
4133              const struct dc_plane_dcc_param *dcc,
4134              const struct dc_plane_address *address,
4135              const struct plane_size *plane_size)
4136 {
4137         struct dc *dc = adev->dm.dc;
4138         struct dc_dcc_surface_param input;
4139         struct dc_surface_dcc_cap output;
4140
4141         memset(&input, 0, sizeof(input));
4142         memset(&output, 0, sizeof(output));
4143
4144         if (!dcc->enable)
4145                 return 0;
4146
4147         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4148             !dc->cap_funcs.get_dcc_compression_cap)
4149                 return -EINVAL;
4150
4151         input.format = format;
4152         input.surface_size.width = plane_size->surface_size.width;
4153         input.surface_size.height = plane_size->surface_size.height;
4154         input.swizzle_mode = tiling_info->gfx9.swizzle;
4155
4156         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4157                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4158         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4159                 input.scan = SCAN_DIRECTION_VERTICAL;
4160
4161         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4162                 return -EINVAL;
4163
4164         if (!output.capable)
4165                 return -EINVAL;
4166
4167         if (dcc->independent_64b_blks == 0 &&
4168             output.grph.rgb.independent_64b_blks != 0)
4169                 return -EINVAL;
4170
4171         return 0;
4172 }
4173
4174 static bool
4175 modifier_has_dcc(uint64_t modifier)
4176 {
4177         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4178 }
4179
4180 static unsigned
4181 modifier_gfx9_swizzle_mode(uint64_t modifier)
4182 {
4183         if (modifier == DRM_FORMAT_MOD_LINEAR)
4184                 return 0;
4185
4186         return AMD_FMT_MOD_GET(TILE, modifier);
4187 }
4188
4189 static const struct drm_format_info *
4190 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4191 {
4192         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4193 }
4194
4195 static void
4196 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4197                                     union dc_tiling_info *tiling_info,
4198                                     uint64_t modifier)
4199 {
4200         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4201         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4202         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4203         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4204
4205         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4206
4207         if (!IS_AMD_FMT_MOD(modifier))
4208                 return;
4209
4210         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4211         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4212
4213         if (adev->family >= AMDGPU_FAMILY_NV) {
4214                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4215         } else {
4216                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4217
4218                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4219         }
4220 }
4221
4222 enum dm_micro_swizzle {
4223         MICRO_SWIZZLE_Z = 0,
4224         MICRO_SWIZZLE_S = 1,
4225         MICRO_SWIZZLE_D = 2,
4226         MICRO_SWIZZLE_R = 3
4227 };
4228
4229 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4230                                           uint32_t format,
4231                                           uint64_t modifier)
4232 {
4233         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4234         const struct drm_format_info *info = drm_format_info(format);
4235         int i;
4236
4237         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4238
4239         if (!info)
4240                 return false;
4241
4242         /*
4243          * We always have to allow these modifiers:
4244          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4245          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4246          */
4247         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4248             modifier == DRM_FORMAT_MOD_INVALID) {
4249                 return true;
4250         }
4251
4252         /* Check that the modifier is on the list of the plane's supported modifiers. */
4253         for (i = 0; i < plane->modifier_count; i++) {
4254                 if (modifier == plane->modifiers[i])
4255                         break;
4256         }
4257         if (i == plane->modifier_count)
4258                 return false;
4259
4260         /*
4261          * For D swizzle the canonical modifier depends on the bpp, so check
4262          * it here.
4263          */
4264         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4265             adev->family >= AMDGPU_FAMILY_NV) {
4266                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4267                         return false;
4268         }
4269
4270         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4271             info->cpp[0] < 8)
4272                 return false;
4273
4274         if (modifier_has_dcc(modifier)) {
4275                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4276                 if (info->cpp[0] != 4)
4277                         return false;
4278                 /* We support multi-planar formats, but not when combined with
4279                  * additional DCC metadata planes. */
4280                 if (info->num_planes > 1)
4281                         return false;
4282         }
4283
4284         return true;
4285 }
4286
4287 static void
4288 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4289 {
4290         if (!*mods)
4291                 return;
4292
4293         if (*cap - *size < 1) {
4294                 uint64_t new_cap = *cap * 2;
4295                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4296
4297                 if (!new_mods) {
4298                         kfree(*mods);
4299                         *mods = NULL;
4300                         return;
4301                 }
4302
4303                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4304                 kfree(*mods);
4305                 *mods = new_mods;
4306                 *cap = new_cap;
4307         }
4308
4309         (*mods)[*size] = mod;
4310         *size += 1;
4311 }
4312
4313 static void
4314 add_gfx9_modifiers(const struct amdgpu_device *adev,
4315                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4316 {
4317         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4318         int pipe_xor_bits = min(8, pipes +
4319                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4320         int bank_xor_bits = min(8 - pipe_xor_bits,
4321                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4322         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4323                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4324
4325
4326         if (adev->family == AMDGPU_FAMILY_RV) {
4327                 /* Raven2 and later */
4328                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4329
4330                 /*
4331                  * No _D DCC swizzles yet because we only allow 32bpp, which
4332                  * doesn't support _D on DCN
4333                  */
4334
4335                 if (has_constant_encode) {
4336                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4337                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4338                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4339                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4340                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4341                                     AMD_FMT_MOD_SET(DCC, 1) |
4342                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4343                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4344                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4345                 }
4346
4347                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4348                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4349                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4350                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4351                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4352                             AMD_FMT_MOD_SET(DCC, 1) |
4353                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4354                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4355                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4356
4357                 if (has_constant_encode) {
4358                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4359                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4360                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4361                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4362                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4363                                     AMD_FMT_MOD_SET(DCC, 1) |
4364                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4365                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4366                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4367
4368                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4369                                     AMD_FMT_MOD_SET(RB, rb) |
4370                                     AMD_FMT_MOD_SET(PIPE, pipes));
4371                 }
4372
4373                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4374                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4375                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4376                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4377                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4378                             AMD_FMT_MOD_SET(DCC, 1) |
4379                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4380                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4381                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4382                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4383                             AMD_FMT_MOD_SET(RB, rb) |
4384                             AMD_FMT_MOD_SET(PIPE, pipes));
4385         }
4386
4387         /*
4388          * Only supported for 64bpp on Raven, will be filtered on format in
4389          * dm_plane_format_mod_supported.
4390          */
4391         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4392                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4393                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4394                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4395                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4396
4397         if (adev->family == AMDGPU_FAMILY_RV) {
4398                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4399                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4400                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4401                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4402                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4403         }
4404
4405         /*
4406          * Only supported for 64bpp on Raven, will be filtered on format in
4407          * dm_plane_format_mod_supported.
4408          */
4409         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4410                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4411                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4412
4413         if (adev->family == AMDGPU_FAMILY_RV) {
4414                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4415                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4416                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4417         }
4418 }
4419
4420 static void
4421 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4422                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4423 {
4424         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4425
4426         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4427                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4428                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4429                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4430                     AMD_FMT_MOD_SET(DCC, 1) |
4431                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4432                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4433                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4434
4435         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4436                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4437                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4438                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4439                     AMD_FMT_MOD_SET(DCC, 1) |
4440                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4441                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4442                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4443                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4444
4445         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4447                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4448                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4449
4450         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4451                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4452                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4453                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4454
4455
4456         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4457         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4458                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4459                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4460
4461         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4462                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4463                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4464 }
4465
4466 static void
4467 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4468                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4469 {
4470         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4471         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4472
4473         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4474                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4475                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4476                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4477                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4478                     AMD_FMT_MOD_SET(DCC, 1) |
4479                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4480                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4481                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4482                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4483
4484         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4485                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4486                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4487                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4488                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4489                     AMD_FMT_MOD_SET(DCC, 1) |
4490                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4491                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4492                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4493                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4494                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4495
4496         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4497                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4498                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4499                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4500                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4501
4502         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4503                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4504                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4505                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4506                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4507
4508         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4509         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4510                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4511                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4512
4513         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4514                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4515                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4516 }
4517
4518 static int
4519 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4520 {
4521         uint64_t size = 0, capacity = 128;
4522         *mods = NULL;
4523
4524         /* We have not hooked up any pre-GFX9 modifiers. */
4525         if (adev->family < AMDGPU_FAMILY_AI)
4526                 return 0;
4527
4528         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4529
4530         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4531                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4532                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4533                 return *mods ? 0 : -ENOMEM;
4534         }
4535
4536         switch (adev->family) {
4537         case AMDGPU_FAMILY_AI:
4538         case AMDGPU_FAMILY_RV:
4539                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4540                 break;
4541         case AMDGPU_FAMILY_NV:
4542         case AMDGPU_FAMILY_VGH:
4543                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4544                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4545                 else
4546                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4547                 break;
4548         }
4549
4550         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4551
4552         /* INVALID marks the end of the list. */
4553         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4554
4555         if (!*mods)
4556                 return -ENOMEM;
4557
4558         return 0;
4559 }
4560
4561 static int
4562 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4563                                           const struct amdgpu_framebuffer *afb,
4564                                           const enum surface_pixel_format format,
4565                                           const enum dc_rotation_angle rotation,
4566                                           const struct plane_size *plane_size,
4567                                           union dc_tiling_info *tiling_info,
4568                                           struct dc_plane_dcc_param *dcc,
4569                                           struct dc_plane_address *address,
4570                                           const bool force_disable_dcc)
4571 {
4572         const uint64_t modifier = afb->base.modifier;
4573         int ret;
4574
4575         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4576         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4577
4578         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4579                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4580
4581                 dcc->enable = 1;
4582                 dcc->meta_pitch = afb->base.pitches[1];
4583                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4584
4585                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4586                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4587         }
4588
4589         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4590         if (ret)
4591                 return ret;
4592
4593         return 0;
4594 }
4595
4596 static int
4597 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4598                              const struct amdgpu_framebuffer *afb,
4599                              const enum surface_pixel_format format,
4600                              const enum dc_rotation_angle rotation,
4601                              const uint64_t tiling_flags,
4602                              union dc_tiling_info *tiling_info,
4603                              struct plane_size *plane_size,
4604                              struct dc_plane_dcc_param *dcc,
4605                              struct dc_plane_address *address,
4606                              bool tmz_surface,
4607                              bool force_disable_dcc)
4608 {
4609         const struct drm_framebuffer *fb = &afb->base;
4610         int ret;
4611
4612         memset(tiling_info, 0, sizeof(*tiling_info));
4613         memset(plane_size, 0, sizeof(*plane_size));
4614         memset(dcc, 0, sizeof(*dcc));
4615         memset(address, 0, sizeof(*address));
4616
4617         address->tmz_surface = tmz_surface;
4618
4619         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4620                 uint64_t addr = afb->address + fb->offsets[0];
4621
4622                 plane_size->surface_size.x = 0;
4623                 plane_size->surface_size.y = 0;
4624                 plane_size->surface_size.width = fb->width;
4625                 plane_size->surface_size.height = fb->height;
4626                 plane_size->surface_pitch =
4627                         fb->pitches[0] / fb->format->cpp[0];
4628
4629                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4630                 address->grph.addr.low_part = lower_32_bits(addr);
4631                 address->grph.addr.high_part = upper_32_bits(addr);
4632         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4633                 uint64_t luma_addr = afb->address + fb->offsets[0];
4634                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4635
4636                 plane_size->surface_size.x = 0;
4637                 plane_size->surface_size.y = 0;
4638                 plane_size->surface_size.width = fb->width;
4639                 plane_size->surface_size.height = fb->height;
4640                 plane_size->surface_pitch =
4641                         fb->pitches[0] / fb->format->cpp[0];
4642
4643                 plane_size->chroma_size.x = 0;
4644                 plane_size->chroma_size.y = 0;
4645                 /* TODO: set these based on surface format */
4646                 plane_size->chroma_size.width = fb->width / 2;
4647                 plane_size->chroma_size.height = fb->height / 2;
4648
4649                 plane_size->chroma_pitch =
4650                         fb->pitches[1] / fb->format->cpp[1];
4651
4652                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4653                 address->video_progressive.luma_addr.low_part =
4654                         lower_32_bits(luma_addr);
4655                 address->video_progressive.luma_addr.high_part =
4656                         upper_32_bits(luma_addr);
4657                 address->video_progressive.chroma_addr.low_part =
4658                         lower_32_bits(chroma_addr);
4659                 address->video_progressive.chroma_addr.high_part =
4660                         upper_32_bits(chroma_addr);
4661         }
4662
4663         if (adev->family >= AMDGPU_FAMILY_AI) {
4664                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4665                                                                 rotation, plane_size,
4666                                                                 tiling_info, dcc,
4667                                                                 address,
4668                                                                 force_disable_dcc);
4669                 if (ret)
4670                         return ret;
4671         } else {
4672                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4673         }
4674
4675         return 0;
4676 }
4677
4678 static void
4679 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4680                                bool *per_pixel_alpha, bool *global_alpha,
4681                                int *global_alpha_value)
4682 {
4683         *per_pixel_alpha = false;
4684         *global_alpha = false;
4685         *global_alpha_value = 0xff;
4686
4687         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4688                 return;
4689
4690         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4691                 static const uint32_t alpha_formats[] = {
4692                         DRM_FORMAT_ARGB8888,
4693                         DRM_FORMAT_RGBA8888,
4694                         DRM_FORMAT_ABGR8888,
4695                 };
4696                 uint32_t format = plane_state->fb->format->format;
4697                 unsigned int i;
4698
4699                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4700                         if (format == alpha_formats[i]) {
4701                                 *per_pixel_alpha = true;
4702                                 break;
4703                         }
4704                 }
4705         }
4706
4707         if (plane_state->alpha < 0xffff) {
4708                 *global_alpha = true;
4709                 *global_alpha_value = plane_state->alpha >> 8;
4710         }
4711 }
4712
4713 static int
4714 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4715                             const enum surface_pixel_format format,
4716                             enum dc_color_space *color_space)
4717 {
4718         bool full_range;
4719
4720         *color_space = COLOR_SPACE_SRGB;
4721
4722         /* DRM color properties only affect non-RGB formats. */
4723         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4724                 return 0;
4725
4726         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4727
4728         switch (plane_state->color_encoding) {
4729         case DRM_COLOR_YCBCR_BT601:
4730                 if (full_range)
4731                         *color_space = COLOR_SPACE_YCBCR601;
4732                 else
4733                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4734                 break;
4735
4736         case DRM_COLOR_YCBCR_BT709:
4737                 if (full_range)
4738                         *color_space = COLOR_SPACE_YCBCR709;
4739                 else
4740                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4741                 break;
4742
4743         case DRM_COLOR_YCBCR_BT2020:
4744                 if (full_range)
4745                         *color_space = COLOR_SPACE_2020_YCBCR;
4746                 else
4747                         return -EINVAL;
4748                 break;
4749
4750         default:
4751                 return -EINVAL;
4752         }
4753
4754         return 0;
4755 }
4756
4757 static int
4758 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4759                             const struct drm_plane_state *plane_state,
4760                             const uint64_t tiling_flags,
4761                             struct dc_plane_info *plane_info,
4762                             struct dc_plane_address *address,
4763                             bool tmz_surface,
4764                             bool force_disable_dcc)
4765 {
4766         const struct drm_framebuffer *fb = plane_state->fb;
4767         const struct amdgpu_framebuffer *afb =
4768                 to_amdgpu_framebuffer(plane_state->fb);
4769         int ret;
4770
4771         memset(plane_info, 0, sizeof(*plane_info));
4772
4773         switch (fb->format->format) {
4774         case DRM_FORMAT_C8:
4775                 plane_info->format =
4776                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4777                 break;
4778         case DRM_FORMAT_RGB565:
4779                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4780                 break;
4781         case DRM_FORMAT_XRGB8888:
4782         case DRM_FORMAT_ARGB8888:
4783                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4784                 break;
4785         case DRM_FORMAT_XRGB2101010:
4786         case DRM_FORMAT_ARGB2101010:
4787                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4788                 break;
4789         case DRM_FORMAT_XBGR2101010:
4790         case DRM_FORMAT_ABGR2101010:
4791                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4792                 break;
4793         case DRM_FORMAT_XBGR8888:
4794         case DRM_FORMAT_ABGR8888:
4795                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4796                 break;
4797         case DRM_FORMAT_NV21:
4798                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4799                 break;
4800         case DRM_FORMAT_NV12:
4801                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4802                 break;
4803         case DRM_FORMAT_P010:
4804                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4805                 break;
4806         case DRM_FORMAT_XRGB16161616F:
4807         case DRM_FORMAT_ARGB16161616F:
4808                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4809                 break;
4810         case DRM_FORMAT_XBGR16161616F:
4811         case DRM_FORMAT_ABGR16161616F:
4812                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4813                 break;
4814         default:
4815                 DRM_ERROR(
4816                         "Unsupported screen format %p4cc\n",
4817                         &fb->format->format);
4818                 return -EINVAL;
4819         }
4820
4821         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4822         case DRM_MODE_ROTATE_0:
4823                 plane_info->rotation = ROTATION_ANGLE_0;
4824                 break;
4825         case DRM_MODE_ROTATE_90:
4826                 plane_info->rotation = ROTATION_ANGLE_90;
4827                 break;
4828         case DRM_MODE_ROTATE_180:
4829                 plane_info->rotation = ROTATION_ANGLE_180;
4830                 break;
4831         case DRM_MODE_ROTATE_270:
4832                 plane_info->rotation = ROTATION_ANGLE_270;
4833                 break;
4834         default:
4835                 plane_info->rotation = ROTATION_ANGLE_0;
4836                 break;
4837         }
4838
4839         plane_info->visible = true;
4840         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4841
4842         plane_info->layer_index = 0;
4843
4844         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4845                                           &plane_info->color_space);
4846         if (ret)
4847                 return ret;
4848
4849         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4850                                            plane_info->rotation, tiling_flags,
4851                                            &plane_info->tiling_info,
4852                                            &plane_info->plane_size,
4853                                            &plane_info->dcc, address, tmz_surface,
4854                                            force_disable_dcc);
4855         if (ret)
4856                 return ret;
4857
4858         fill_blending_from_plane_state(
4859                 plane_state, &plane_info->per_pixel_alpha,
4860                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4861
4862         return 0;
4863 }
4864
4865 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4866                                     struct dc_plane_state *dc_plane_state,
4867                                     struct drm_plane_state *plane_state,
4868                                     struct drm_crtc_state *crtc_state)
4869 {
4870         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4871         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4872         struct dc_scaling_info scaling_info;
4873         struct dc_plane_info plane_info;
4874         int ret;
4875         bool force_disable_dcc = false;
4876
4877         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4878         if (ret)
4879                 return ret;
4880
4881         dc_plane_state->src_rect = scaling_info.src_rect;
4882         dc_plane_state->dst_rect = scaling_info.dst_rect;
4883         dc_plane_state->clip_rect = scaling_info.clip_rect;
4884         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4885
4886         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4887         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4888                                           afb->tiling_flags,
4889                                           &plane_info,
4890                                           &dc_plane_state->address,
4891                                           afb->tmz_surface,
4892                                           force_disable_dcc);
4893         if (ret)
4894                 return ret;
4895
4896         dc_plane_state->format = plane_info.format;
4897         dc_plane_state->color_space = plane_info.color_space;
4898         dc_plane_state->format = plane_info.format;
4899         dc_plane_state->plane_size = plane_info.plane_size;
4900         dc_plane_state->rotation = plane_info.rotation;
4901         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4902         dc_plane_state->stereo_format = plane_info.stereo_format;
4903         dc_plane_state->tiling_info = plane_info.tiling_info;
4904         dc_plane_state->visible = plane_info.visible;
4905         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4906         dc_plane_state->global_alpha = plane_info.global_alpha;
4907         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4908         dc_plane_state->dcc = plane_info.dcc;
4909         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4910         dc_plane_state->flip_int_enabled = true;
4911
4912         /*
4913          * Always set input transfer function, since plane state is refreshed
4914          * every time.
4915          */
4916         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4917         if (ret)
4918                 return ret;
4919
4920         return 0;
4921 }
4922
4923 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4924                                            const struct dm_connector_state *dm_state,
4925                                            struct dc_stream_state *stream)
4926 {
4927         enum amdgpu_rmx_type rmx_type;
4928
4929         struct rect src = { 0 }; /* viewport in composition space*/
4930         struct rect dst = { 0 }; /* stream addressable area */
4931
4932         /* no mode. nothing to be done */
4933         if (!mode)
4934                 return;
4935
4936         /* Full screen scaling by default */
4937         src.width = mode->hdisplay;
4938         src.height = mode->vdisplay;
4939         dst.width = stream->timing.h_addressable;
4940         dst.height = stream->timing.v_addressable;
4941
4942         if (dm_state) {
4943                 rmx_type = dm_state->scaling;
4944                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4945                         if (src.width * dst.height <
4946                                         src.height * dst.width) {
4947                                 /* height needs less upscaling/more downscaling */
4948                                 dst.width = src.width *
4949                                                 dst.height / src.height;
4950                         } else {
4951                                 /* width needs less upscaling/more downscaling */
4952                                 dst.height = src.height *
4953                                                 dst.width / src.width;
4954                         }
4955                 } else if (rmx_type == RMX_CENTER) {
4956                         dst = src;
4957                 }
4958
4959                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4960                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4961
4962                 if (dm_state->underscan_enable) {
4963                         dst.x += dm_state->underscan_hborder / 2;
4964                         dst.y += dm_state->underscan_vborder / 2;
4965                         dst.width -= dm_state->underscan_hborder;
4966                         dst.height -= dm_state->underscan_vborder;
4967                 }
4968         }
4969
4970         stream->src = src;
4971         stream->dst = dst;
4972
4973         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4974                       dst.x, dst.y, dst.width, dst.height);
4975
4976 }
4977
4978 static enum dc_color_depth
4979 convert_color_depth_from_display_info(const struct drm_connector *connector,
4980                                       bool is_y420, int requested_bpc)
4981 {
4982         uint8_t bpc;
4983
4984         if (is_y420) {
4985                 bpc = 8;
4986
4987                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4988                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4989                         bpc = 16;
4990                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4991                         bpc = 12;
4992                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4993                         bpc = 10;
4994         } else {
4995                 bpc = (uint8_t)connector->display_info.bpc;
4996                 /* Assume 8 bpc by default if no bpc is specified. */
4997                 bpc = bpc ? bpc : 8;
4998         }
4999
5000         if (requested_bpc > 0) {
5001                 /*
5002                  * Cap display bpc based on the user requested value.
5003                  *
5004                  * The value for state->max_bpc may not correctly updated
5005                  * depending on when the connector gets added to the state
5006                  * or if this was called outside of atomic check, so it
5007                  * can't be used directly.
5008                  */
5009                 bpc = min_t(u8, bpc, requested_bpc);
5010
5011                 /* Round down to the nearest even number. */
5012                 bpc = bpc - (bpc & 1);
5013         }
5014
5015         switch (bpc) {
5016         case 0:
5017                 /*
5018                  * Temporary Work around, DRM doesn't parse color depth for
5019                  * EDID revision before 1.4
5020                  * TODO: Fix edid parsing
5021                  */
5022                 return COLOR_DEPTH_888;
5023         case 6:
5024                 return COLOR_DEPTH_666;
5025         case 8:
5026                 return COLOR_DEPTH_888;
5027         case 10:
5028                 return COLOR_DEPTH_101010;
5029         case 12:
5030                 return COLOR_DEPTH_121212;
5031         case 14:
5032                 return COLOR_DEPTH_141414;
5033         case 16:
5034                 return COLOR_DEPTH_161616;
5035         default:
5036                 return COLOR_DEPTH_UNDEFINED;
5037         }
5038 }
5039
5040 static enum dc_aspect_ratio
5041 get_aspect_ratio(const struct drm_display_mode *mode_in)
5042 {
5043         /* 1-1 mapping, since both enums follow the HDMI spec. */
5044         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5045 }
5046
5047 static enum dc_color_space
5048 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5049 {
5050         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5051
5052         switch (dc_crtc_timing->pixel_encoding) {
5053         case PIXEL_ENCODING_YCBCR422:
5054         case PIXEL_ENCODING_YCBCR444:
5055         case PIXEL_ENCODING_YCBCR420:
5056         {
5057                 /*
5058                  * 27030khz is the separation point between HDTV and SDTV
5059                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5060                  * respectively
5061                  */
5062                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5063                         if (dc_crtc_timing->flags.Y_ONLY)
5064                                 color_space =
5065                                         COLOR_SPACE_YCBCR709_LIMITED;
5066                         else
5067                                 color_space = COLOR_SPACE_YCBCR709;
5068                 } else {
5069                         if (dc_crtc_timing->flags.Y_ONLY)
5070                                 color_space =
5071                                         COLOR_SPACE_YCBCR601_LIMITED;
5072                         else
5073                                 color_space = COLOR_SPACE_YCBCR601;
5074                 }
5075
5076         }
5077         break;
5078         case PIXEL_ENCODING_RGB:
5079                 color_space = COLOR_SPACE_SRGB;
5080                 break;
5081
5082         default:
5083                 WARN_ON(1);
5084                 break;
5085         }
5086
5087         return color_space;
5088 }
5089
5090 static bool adjust_colour_depth_from_display_info(
5091         struct dc_crtc_timing *timing_out,
5092         const struct drm_display_info *info)
5093 {
5094         enum dc_color_depth depth = timing_out->display_color_depth;
5095         int normalized_clk;
5096         do {
5097                 normalized_clk = timing_out->pix_clk_100hz / 10;
5098                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5099                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5100                         normalized_clk /= 2;
5101                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5102                 switch (depth) {
5103                 case COLOR_DEPTH_888:
5104                         break;
5105                 case COLOR_DEPTH_101010:
5106                         normalized_clk = (normalized_clk * 30) / 24;
5107                         break;
5108                 case COLOR_DEPTH_121212:
5109                         normalized_clk = (normalized_clk * 36) / 24;
5110                         break;
5111                 case COLOR_DEPTH_161616:
5112                         normalized_clk = (normalized_clk * 48) / 24;
5113                         break;
5114                 default:
5115                         /* The above depths are the only ones valid for HDMI. */
5116                         return false;
5117                 }
5118                 if (normalized_clk <= info->max_tmds_clock) {
5119                         timing_out->display_color_depth = depth;
5120                         return true;
5121                 }
5122         } while (--depth > COLOR_DEPTH_666);
5123         return false;
5124 }
5125
5126 static void fill_stream_properties_from_drm_display_mode(
5127         struct dc_stream_state *stream,
5128         const struct drm_display_mode *mode_in,
5129         const struct drm_connector *connector,
5130         const struct drm_connector_state *connector_state,
5131         const struct dc_stream_state *old_stream,
5132         int requested_bpc)
5133 {
5134         struct dc_crtc_timing *timing_out = &stream->timing;
5135         const struct drm_display_info *info = &connector->display_info;
5136         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5137         struct hdmi_vendor_infoframe hv_frame;
5138         struct hdmi_avi_infoframe avi_frame;
5139
5140         memset(&hv_frame, 0, sizeof(hv_frame));
5141         memset(&avi_frame, 0, sizeof(avi_frame));
5142
5143         timing_out->h_border_left = 0;
5144         timing_out->h_border_right = 0;
5145         timing_out->v_border_top = 0;
5146         timing_out->v_border_bottom = 0;
5147         /* TODO: un-hardcode */
5148         if (drm_mode_is_420_only(info, mode_in)
5149                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5150                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5151         else if (drm_mode_is_420_also(info, mode_in)
5152                         && aconnector->force_yuv420_output)
5153                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5154         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5155                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5156                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5157         else
5158                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5159
5160         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5161         timing_out->display_color_depth = convert_color_depth_from_display_info(
5162                 connector,
5163                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5164                 requested_bpc);
5165         timing_out->scan_type = SCANNING_TYPE_NODATA;
5166         timing_out->hdmi_vic = 0;
5167
5168         if(old_stream) {
5169                 timing_out->vic = old_stream->timing.vic;
5170                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5171                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5172         } else {
5173                 timing_out->vic = drm_match_cea_mode(mode_in);
5174                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5175                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5176                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5177                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5178         }
5179
5180         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5181                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5182                 timing_out->vic = avi_frame.video_code;
5183                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5184                 timing_out->hdmi_vic = hv_frame.vic;
5185         }
5186
5187         if (is_freesync_video_mode(mode_in, aconnector)) {
5188                 timing_out->h_addressable = mode_in->hdisplay;
5189                 timing_out->h_total = mode_in->htotal;
5190                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5191                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5192                 timing_out->v_total = mode_in->vtotal;
5193                 timing_out->v_addressable = mode_in->vdisplay;
5194                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5195                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5196                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5197         } else {
5198                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5199                 timing_out->h_total = mode_in->crtc_htotal;
5200                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5201                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5202                 timing_out->v_total = mode_in->crtc_vtotal;
5203                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5204                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5205                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5206                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5207         }
5208
5209         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5210
5211         stream->output_color_space = get_output_color_space(timing_out);
5212
5213         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5214         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5215         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5216                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5217                     drm_mode_is_420_also(info, mode_in) &&
5218                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5219                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5220                         adjust_colour_depth_from_display_info(timing_out, info);
5221                 }
5222         }
5223 }
5224
5225 static void fill_audio_info(struct audio_info *audio_info,
5226                             const struct drm_connector *drm_connector,
5227                             const struct dc_sink *dc_sink)
5228 {
5229         int i = 0;
5230         int cea_revision = 0;
5231         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5232
5233         audio_info->manufacture_id = edid_caps->manufacturer_id;
5234         audio_info->product_id = edid_caps->product_id;
5235
5236         cea_revision = drm_connector->display_info.cea_rev;
5237
5238         strscpy(audio_info->display_name,
5239                 edid_caps->display_name,
5240                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5241
5242         if (cea_revision >= 3) {
5243                 audio_info->mode_count = edid_caps->audio_mode_count;
5244
5245                 for (i = 0; i < audio_info->mode_count; ++i) {
5246                         audio_info->modes[i].format_code =
5247                                         (enum audio_format_code)
5248                                         (edid_caps->audio_modes[i].format_code);
5249                         audio_info->modes[i].channel_count =
5250                                         edid_caps->audio_modes[i].channel_count;
5251                         audio_info->modes[i].sample_rates.all =
5252                                         edid_caps->audio_modes[i].sample_rate;
5253                         audio_info->modes[i].sample_size =
5254                                         edid_caps->audio_modes[i].sample_size;
5255                 }
5256         }
5257
5258         audio_info->flags.all = edid_caps->speaker_flags;
5259
5260         /* TODO: We only check for the progressive mode, check for interlace mode too */
5261         if (drm_connector->latency_present[0]) {
5262                 audio_info->video_latency = drm_connector->video_latency[0];
5263                 audio_info->audio_latency = drm_connector->audio_latency[0];
5264         }
5265
5266         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5267
5268 }
5269
5270 static void
5271 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5272                                       struct drm_display_mode *dst_mode)
5273 {
5274         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5275         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5276         dst_mode->crtc_clock = src_mode->crtc_clock;
5277         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5278         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5279         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5280         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5281         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5282         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5283         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5284         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5285         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5286         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5287         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5288 }
5289
5290 static void
5291 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5292                                         const struct drm_display_mode *native_mode,
5293                                         bool scale_enabled)
5294 {
5295         if (scale_enabled) {
5296                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5297         } else if (native_mode->clock == drm_mode->clock &&
5298                         native_mode->htotal == drm_mode->htotal &&
5299                         native_mode->vtotal == drm_mode->vtotal) {
5300                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5301         } else {
5302                 /* no scaling nor amdgpu inserted, no need to patch */
5303         }
5304 }
5305
5306 static struct dc_sink *
5307 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5308 {
5309         struct dc_sink_init_data sink_init_data = { 0 };
5310         struct dc_sink *sink = NULL;
5311         sink_init_data.link = aconnector->dc_link;
5312         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5313
5314         sink = dc_sink_create(&sink_init_data);
5315         if (!sink) {
5316                 DRM_ERROR("Failed to create sink!\n");
5317                 return NULL;
5318         }
5319         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5320
5321         return sink;
5322 }
5323
5324 static void set_multisync_trigger_params(
5325                 struct dc_stream_state *stream)
5326 {
5327         struct dc_stream_state *master = NULL;
5328
5329         if (stream->triggered_crtc_reset.enabled) {
5330                 master = stream->triggered_crtc_reset.event_source;
5331                 stream->triggered_crtc_reset.event =
5332                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5333                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5334                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5335         }
5336 }
5337
5338 static void set_master_stream(struct dc_stream_state *stream_set[],
5339                               int stream_count)
5340 {
5341         int j, highest_rfr = 0, master_stream = 0;
5342
5343         for (j = 0;  j < stream_count; j++) {
5344                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5345                         int refresh_rate = 0;
5346
5347                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5348                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5349                         if (refresh_rate > highest_rfr) {
5350                                 highest_rfr = refresh_rate;
5351                                 master_stream = j;
5352                         }
5353                 }
5354         }
5355         for (j = 0;  j < stream_count; j++) {
5356                 if (stream_set[j])
5357                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5358         }
5359 }
5360
5361 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5362 {
5363         int i = 0;
5364         struct dc_stream_state *stream;
5365
5366         if (context->stream_count < 2)
5367                 return;
5368         for (i = 0; i < context->stream_count ; i++) {
5369                 if (!context->streams[i])
5370                         continue;
5371                 /*
5372                  * TODO: add a function to read AMD VSDB bits and set
5373                  * crtc_sync_master.multi_sync_enabled flag
5374                  * For now it's set to false
5375                  */
5376         }
5377
5378         set_master_stream(context->streams, context->stream_count);
5379
5380         for (i = 0; i < context->stream_count ; i++) {
5381                 stream = context->streams[i];
5382
5383                 if (!stream)
5384                         continue;
5385
5386                 set_multisync_trigger_params(stream);
5387         }
5388 }
5389
5390 static struct drm_display_mode *
5391 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5392                           bool use_probed_modes)
5393 {
5394         struct drm_display_mode *m, *m_pref = NULL;
5395         u16 current_refresh, highest_refresh;
5396         struct list_head *list_head = use_probed_modes ?
5397                                                     &aconnector->base.probed_modes :
5398                                                     &aconnector->base.modes;
5399
5400         if (aconnector->freesync_vid_base.clock != 0)
5401                 return &aconnector->freesync_vid_base;
5402
5403         /* Find the preferred mode */
5404         list_for_each_entry (m, list_head, head) {
5405                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5406                         m_pref = m;
5407                         break;
5408                 }
5409         }
5410
5411         if (!m_pref) {
5412                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5413                 m_pref = list_first_entry_or_null(
5414                         &aconnector->base.modes, struct drm_display_mode, head);
5415                 if (!m_pref) {
5416                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5417                         return NULL;
5418                 }
5419         }
5420
5421         highest_refresh = drm_mode_vrefresh(m_pref);
5422
5423         /*
5424          * Find the mode with highest refresh rate with same resolution.
5425          * For some monitors, preferred mode is not the mode with highest
5426          * supported refresh rate.
5427          */
5428         list_for_each_entry (m, list_head, head) {
5429                 current_refresh  = drm_mode_vrefresh(m);
5430
5431                 if (m->hdisplay == m_pref->hdisplay &&
5432                     m->vdisplay == m_pref->vdisplay &&
5433                     highest_refresh < current_refresh) {
5434                         highest_refresh = current_refresh;
5435                         m_pref = m;
5436                 }
5437         }
5438
5439         aconnector->freesync_vid_base = *m_pref;
5440         return m_pref;
5441 }
5442
5443 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5444                                    struct amdgpu_dm_connector *aconnector)
5445 {
5446         struct drm_display_mode *high_mode;
5447         int timing_diff;
5448
5449         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5450         if (!high_mode || !mode)
5451                 return false;
5452
5453         timing_diff = high_mode->vtotal - mode->vtotal;
5454
5455         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5456             high_mode->hdisplay != mode->hdisplay ||
5457             high_mode->vdisplay != mode->vdisplay ||
5458             high_mode->hsync_start != mode->hsync_start ||
5459             high_mode->hsync_end != mode->hsync_end ||
5460             high_mode->htotal != mode->htotal ||
5461             high_mode->hskew != mode->hskew ||
5462             high_mode->vscan != mode->vscan ||
5463             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5464             high_mode->vsync_end - mode->vsync_end != timing_diff)
5465                 return false;
5466         else
5467                 return true;
5468 }
5469
5470 static struct dc_stream_state *
5471 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5472                        const struct drm_display_mode *drm_mode,
5473                        const struct dm_connector_state *dm_state,
5474                        const struct dc_stream_state *old_stream,
5475                        int requested_bpc)
5476 {
5477         struct drm_display_mode *preferred_mode = NULL;
5478         struct drm_connector *drm_connector;
5479         const struct drm_connector_state *con_state =
5480                 dm_state ? &dm_state->base : NULL;
5481         struct dc_stream_state *stream = NULL;
5482         struct drm_display_mode mode = *drm_mode;
5483         struct drm_display_mode saved_mode;
5484         struct drm_display_mode *freesync_mode = NULL;
5485         bool native_mode_found = false;
5486         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5487         int mode_refresh;
5488         int preferred_refresh = 0;
5489 #if defined(CONFIG_DRM_AMD_DC_DCN)
5490         struct dsc_dec_dpcd_caps dsc_caps;
5491         uint32_t link_bandwidth_kbps;
5492 #endif
5493         struct dc_sink *sink = NULL;
5494
5495         memset(&saved_mode, 0, sizeof(saved_mode));
5496
5497         if (aconnector == NULL) {
5498                 DRM_ERROR("aconnector is NULL!\n");
5499                 return stream;
5500         }
5501
5502         drm_connector = &aconnector->base;
5503
5504         if (!aconnector->dc_sink) {
5505                 sink = create_fake_sink(aconnector);
5506                 if (!sink)
5507                         return stream;
5508         } else {
5509                 sink = aconnector->dc_sink;
5510                 dc_sink_retain(sink);
5511         }
5512
5513         stream = dc_create_stream_for_sink(sink);
5514
5515         if (stream == NULL) {
5516                 DRM_ERROR("Failed to create stream for sink!\n");
5517                 goto finish;
5518         }
5519
5520         stream->dm_stream_context = aconnector;
5521
5522         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5523                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5524
5525         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5526                 /* Search for preferred mode */
5527                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5528                         native_mode_found = true;
5529                         break;
5530                 }
5531         }
5532         if (!native_mode_found)
5533                 preferred_mode = list_first_entry_or_null(
5534                                 &aconnector->base.modes,
5535                                 struct drm_display_mode,
5536                                 head);
5537
5538         mode_refresh = drm_mode_vrefresh(&mode);
5539
5540         if (preferred_mode == NULL) {
5541                 /*
5542                  * This may not be an error, the use case is when we have no
5543                  * usermode calls to reset and set mode upon hotplug. In this
5544                  * case, we call set mode ourselves to restore the previous mode
5545                  * and the modelist may not be filled in in time.
5546                  */
5547                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5548         } else {
5549                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5550                                  is_freesync_video_mode(&mode, aconnector);
5551                 if (recalculate_timing) {
5552                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5553                         saved_mode = mode;
5554                         mode = *freesync_mode;
5555                 } else {
5556                         decide_crtc_timing_for_drm_display_mode(
5557                                 &mode, preferred_mode,
5558                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5559                 }
5560
5561                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5562         }
5563
5564         if (recalculate_timing)
5565                 drm_mode_set_crtcinfo(&saved_mode, 0);
5566         else if (!dm_state)
5567                 drm_mode_set_crtcinfo(&mode, 0);
5568
5569        /*
5570         * If scaling is enabled and refresh rate didn't change
5571         * we copy the vic and polarities of the old timings
5572         */
5573         if (!recalculate_timing || mode_refresh != preferred_refresh)
5574                 fill_stream_properties_from_drm_display_mode(
5575                         stream, &mode, &aconnector->base, con_state, NULL,
5576                         requested_bpc);
5577         else
5578                 fill_stream_properties_from_drm_display_mode(
5579                         stream, &mode, &aconnector->base, con_state, old_stream,
5580                         requested_bpc);
5581
5582         stream->timing.flags.DSC = 0;
5583
5584         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5585 #if defined(CONFIG_DRM_AMD_DC_DCN)
5586                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5587                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5588                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5589                                       &dsc_caps);
5590                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5591                                                              dc_link_get_link_cap(aconnector->dc_link));
5592
5593                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5594                         /* Set DSC policy according to dsc_clock_en */
5595                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5596                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5597
5598                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5599                                                   &dsc_caps,
5600                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5601                                                   0,
5602                                                   link_bandwidth_kbps,
5603                                                   &stream->timing,
5604                                                   &stream->timing.dsc_cfg))
5605                                 stream->timing.flags.DSC = 1;
5606                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5607                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5608                                 stream->timing.flags.DSC = 1;
5609
5610                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5611                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5612
5613                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5614                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5615
5616                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5617                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5618                 }
5619 #endif
5620         }
5621
5622         update_stream_scaling_settings(&mode, dm_state, stream);
5623
5624         fill_audio_info(
5625                 &stream->audio_info,
5626                 drm_connector,
5627                 sink);
5628
5629         update_stream_signal(stream, sink);
5630
5631         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5632                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5633
5634         if (stream->link->psr_settings.psr_feature_enabled) {
5635                 //
5636                 // should decide stream support vsc sdp colorimetry capability
5637                 // before building vsc info packet
5638                 //
5639                 stream->use_vsc_sdp_for_colorimetry = false;
5640                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5641                         stream->use_vsc_sdp_for_colorimetry =
5642                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5643                 } else {
5644                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5645                                 stream->use_vsc_sdp_for_colorimetry = true;
5646                 }
5647                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5648         }
5649 finish:
5650         dc_sink_release(sink);
5651
5652         return stream;
5653 }
5654
5655 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5656 {
5657         drm_crtc_cleanup(crtc);
5658         kfree(crtc);
5659 }
5660
5661 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5662                                   struct drm_crtc_state *state)
5663 {
5664         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5665
5666         /* TODO Destroy dc_stream objects are stream object is flattened */
5667         if (cur->stream)
5668                 dc_stream_release(cur->stream);
5669
5670
5671         __drm_atomic_helper_crtc_destroy_state(state);
5672
5673
5674         kfree(state);
5675 }
5676
5677 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5678 {
5679         struct dm_crtc_state *state;
5680
5681         if (crtc->state)
5682                 dm_crtc_destroy_state(crtc, crtc->state);
5683
5684         state = kzalloc(sizeof(*state), GFP_KERNEL);
5685         if (WARN_ON(!state))
5686                 return;
5687
5688         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5689 }
5690
5691 static struct drm_crtc_state *
5692 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5693 {
5694         struct dm_crtc_state *state, *cur;
5695
5696         cur = to_dm_crtc_state(crtc->state);
5697
5698         if (WARN_ON(!crtc->state))
5699                 return NULL;
5700
5701         state = kzalloc(sizeof(*state), GFP_KERNEL);
5702         if (!state)
5703                 return NULL;
5704
5705         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5706
5707         if (cur->stream) {
5708                 state->stream = cur->stream;
5709                 dc_stream_retain(state->stream);
5710         }
5711
5712         state->active_planes = cur->active_planes;
5713         state->vrr_infopacket = cur->vrr_infopacket;
5714         state->abm_level = cur->abm_level;
5715         state->vrr_supported = cur->vrr_supported;
5716         state->freesync_config = cur->freesync_config;
5717         state->cm_has_degamma = cur->cm_has_degamma;
5718         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5719         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5720
5721         return &state->base;
5722 }
5723
5724 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5725 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5726 {
5727         crtc_debugfs_init(crtc);
5728
5729         return 0;
5730 }
5731 #endif
5732
5733 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5734 {
5735         enum dc_irq_source irq_source;
5736         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5737         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5738         int rc;
5739
5740         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5741
5742         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5743
5744         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5745                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5746         return rc;
5747 }
5748
5749 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5750 {
5751         enum dc_irq_source irq_source;
5752         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5753         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5754         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5755 #if defined(CONFIG_DRM_AMD_DC_DCN)
5756         struct amdgpu_display_manager *dm = &adev->dm;
5757         unsigned long flags;
5758 #endif
5759         int rc = 0;
5760
5761         if (enable) {
5762                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5763                 if (amdgpu_dm_vrr_active(acrtc_state))
5764                         rc = dm_set_vupdate_irq(crtc, true);
5765         } else {
5766                 /* vblank irq off -> vupdate irq off */
5767                 rc = dm_set_vupdate_irq(crtc, false);
5768         }
5769
5770         if (rc)
5771                 return rc;
5772
5773         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5774
5775         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5776                 return -EBUSY;
5777
5778         if (amdgpu_in_reset(adev))
5779                 return 0;
5780
5781 #if defined(CONFIG_DRM_AMD_DC_DCN)
5782         spin_lock_irqsave(&dm->vblank_lock, flags);
5783         dm->vblank_workqueue->dm = dm;
5784         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5785         dm->vblank_workqueue->enable = enable;
5786         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5787         schedule_work(&dm->vblank_workqueue->mall_work);
5788 #endif
5789
5790         return 0;
5791 }
5792
5793 static int dm_enable_vblank(struct drm_crtc *crtc)
5794 {
5795         return dm_set_vblank(crtc, true);
5796 }
5797
5798 static void dm_disable_vblank(struct drm_crtc *crtc)
5799 {
5800         dm_set_vblank(crtc, false);
5801 }
5802
5803 /* Implemented only the options currently availible for the driver */
5804 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5805         .reset = dm_crtc_reset_state,
5806         .destroy = amdgpu_dm_crtc_destroy,
5807         .set_config = drm_atomic_helper_set_config,
5808         .page_flip = drm_atomic_helper_page_flip,
5809         .atomic_duplicate_state = dm_crtc_duplicate_state,
5810         .atomic_destroy_state = dm_crtc_destroy_state,
5811         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5812         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5813         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5814         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5815         .enable_vblank = dm_enable_vblank,
5816         .disable_vblank = dm_disable_vblank,
5817         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5818 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5819         .late_register = amdgpu_dm_crtc_late_register,
5820 #endif
5821 };
5822
5823 static enum drm_connector_status
5824 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5825 {
5826         bool connected;
5827         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5828
5829         /*
5830          * Notes:
5831          * 1. This interface is NOT called in context of HPD irq.
5832          * 2. This interface *is called* in context of user-mode ioctl. Which
5833          * makes it a bad place for *any* MST-related activity.
5834          */
5835
5836         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5837             !aconnector->fake_enable)
5838                 connected = (aconnector->dc_sink != NULL);
5839         else
5840                 connected = (aconnector->base.force == DRM_FORCE_ON);
5841
5842         update_subconnector_property(aconnector);
5843
5844         return (connected ? connector_status_connected :
5845                         connector_status_disconnected);
5846 }
5847
5848 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5849                                             struct drm_connector_state *connector_state,
5850                                             struct drm_property *property,
5851                                             uint64_t val)
5852 {
5853         struct drm_device *dev = connector->dev;
5854         struct amdgpu_device *adev = drm_to_adev(dev);
5855         struct dm_connector_state *dm_old_state =
5856                 to_dm_connector_state(connector->state);
5857         struct dm_connector_state *dm_new_state =
5858                 to_dm_connector_state(connector_state);
5859
5860         int ret = -EINVAL;
5861
5862         if (property == dev->mode_config.scaling_mode_property) {
5863                 enum amdgpu_rmx_type rmx_type;
5864
5865                 switch (val) {
5866                 case DRM_MODE_SCALE_CENTER:
5867                         rmx_type = RMX_CENTER;
5868                         break;
5869                 case DRM_MODE_SCALE_ASPECT:
5870                         rmx_type = RMX_ASPECT;
5871                         break;
5872                 case DRM_MODE_SCALE_FULLSCREEN:
5873                         rmx_type = RMX_FULL;
5874                         break;
5875                 case DRM_MODE_SCALE_NONE:
5876                 default:
5877                         rmx_type = RMX_OFF;
5878                         break;
5879                 }
5880
5881                 if (dm_old_state->scaling == rmx_type)
5882                         return 0;
5883
5884                 dm_new_state->scaling = rmx_type;
5885                 ret = 0;
5886         } else if (property == adev->mode_info.underscan_hborder_property) {
5887                 dm_new_state->underscan_hborder = val;
5888                 ret = 0;
5889         } else if (property == adev->mode_info.underscan_vborder_property) {
5890                 dm_new_state->underscan_vborder = val;
5891                 ret = 0;
5892         } else if (property == adev->mode_info.underscan_property) {
5893                 dm_new_state->underscan_enable = val;
5894                 ret = 0;
5895         } else if (property == adev->mode_info.abm_level_property) {
5896                 dm_new_state->abm_level = val;
5897                 ret = 0;
5898         }
5899
5900         return ret;
5901 }
5902
5903 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5904                                             const struct drm_connector_state *state,
5905                                             struct drm_property *property,
5906                                             uint64_t *val)
5907 {
5908         struct drm_device *dev = connector->dev;
5909         struct amdgpu_device *adev = drm_to_adev(dev);
5910         struct dm_connector_state *dm_state =
5911                 to_dm_connector_state(state);
5912         int ret = -EINVAL;
5913
5914         if (property == dev->mode_config.scaling_mode_property) {
5915                 switch (dm_state->scaling) {
5916                 case RMX_CENTER:
5917                         *val = DRM_MODE_SCALE_CENTER;
5918                         break;
5919                 case RMX_ASPECT:
5920                         *val = DRM_MODE_SCALE_ASPECT;
5921                         break;
5922                 case RMX_FULL:
5923                         *val = DRM_MODE_SCALE_FULLSCREEN;
5924                         break;
5925                 case RMX_OFF:
5926                 default:
5927                         *val = DRM_MODE_SCALE_NONE;
5928                         break;
5929                 }
5930                 ret = 0;
5931         } else if (property == adev->mode_info.underscan_hborder_property) {
5932                 *val = dm_state->underscan_hborder;
5933                 ret = 0;
5934         } else if (property == adev->mode_info.underscan_vborder_property) {
5935                 *val = dm_state->underscan_vborder;
5936                 ret = 0;
5937         } else if (property == adev->mode_info.underscan_property) {
5938                 *val = dm_state->underscan_enable;
5939                 ret = 0;
5940         } else if (property == adev->mode_info.abm_level_property) {
5941                 *val = dm_state->abm_level;
5942                 ret = 0;
5943         }
5944
5945         return ret;
5946 }
5947
5948 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5949 {
5950         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5951
5952         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5953 }
5954
5955 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5956 {
5957         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5958         const struct dc_link *link = aconnector->dc_link;
5959         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5960         struct amdgpu_display_manager *dm = &adev->dm;
5961
5962         /*
5963          * Call only if mst_mgr was iniitalized before since it's not done
5964          * for all connector types.
5965          */
5966         if (aconnector->mst_mgr.dev)
5967                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5968
5969 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5970         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5971
5972         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5973             link->type != dc_connection_none &&
5974             dm->backlight_dev) {
5975                 backlight_device_unregister(dm->backlight_dev);
5976                 dm->backlight_dev = NULL;
5977         }
5978 #endif
5979
5980         if (aconnector->dc_em_sink)
5981                 dc_sink_release(aconnector->dc_em_sink);
5982         aconnector->dc_em_sink = NULL;
5983         if (aconnector->dc_sink)
5984                 dc_sink_release(aconnector->dc_sink);
5985         aconnector->dc_sink = NULL;
5986
5987         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5988         drm_connector_unregister(connector);
5989         drm_connector_cleanup(connector);
5990         if (aconnector->i2c) {
5991                 i2c_del_adapter(&aconnector->i2c->base);
5992                 kfree(aconnector->i2c);
5993         }
5994         kfree(aconnector->dm_dp_aux.aux.name);
5995
5996         kfree(connector);
5997 }
5998
5999 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6000 {
6001         struct dm_connector_state *state =
6002                 to_dm_connector_state(connector->state);
6003
6004         if (connector->state)
6005                 __drm_atomic_helper_connector_destroy_state(connector->state);
6006
6007         kfree(state);
6008
6009         state = kzalloc(sizeof(*state), GFP_KERNEL);
6010
6011         if (state) {
6012                 state->scaling = RMX_OFF;
6013                 state->underscan_enable = false;
6014                 state->underscan_hborder = 0;
6015                 state->underscan_vborder = 0;
6016                 state->base.max_requested_bpc = 8;
6017                 state->vcpi_slots = 0;
6018                 state->pbn = 0;
6019                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6020                         state->abm_level = amdgpu_dm_abm_level;
6021
6022                 __drm_atomic_helper_connector_reset(connector, &state->base);
6023         }
6024 }
6025
6026 struct drm_connector_state *
6027 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6028 {
6029         struct dm_connector_state *state =
6030                 to_dm_connector_state(connector->state);
6031
6032         struct dm_connector_state *new_state =
6033                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6034
6035         if (!new_state)
6036                 return NULL;
6037
6038         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6039
6040         new_state->freesync_capable = state->freesync_capable;
6041         new_state->abm_level = state->abm_level;
6042         new_state->scaling = state->scaling;
6043         new_state->underscan_enable = state->underscan_enable;
6044         new_state->underscan_hborder = state->underscan_hborder;
6045         new_state->underscan_vborder = state->underscan_vborder;
6046         new_state->vcpi_slots = state->vcpi_slots;
6047         new_state->pbn = state->pbn;
6048         return &new_state->base;
6049 }
6050
6051 static int
6052 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6053 {
6054         struct amdgpu_dm_connector *amdgpu_dm_connector =
6055                 to_amdgpu_dm_connector(connector);
6056         int r;
6057
6058         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6059             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6060                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6061                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6062                 if (r)
6063                         return r;
6064         }
6065
6066 #if defined(CONFIG_DEBUG_FS)
6067         connector_debugfs_init(amdgpu_dm_connector);
6068 #endif
6069
6070         return 0;
6071 }
6072
6073 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6074         .reset = amdgpu_dm_connector_funcs_reset,
6075         .detect = amdgpu_dm_connector_detect,
6076         .fill_modes = drm_helper_probe_single_connector_modes,
6077         .destroy = amdgpu_dm_connector_destroy,
6078         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6079         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6080         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6081         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6082         .late_register = amdgpu_dm_connector_late_register,
6083         .early_unregister = amdgpu_dm_connector_unregister
6084 };
6085
6086 static int get_modes(struct drm_connector *connector)
6087 {
6088         return amdgpu_dm_connector_get_modes(connector);
6089 }
6090
6091 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6092 {
6093         struct dc_sink_init_data init_params = {
6094                         .link = aconnector->dc_link,
6095                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6096         };
6097         struct edid *edid;
6098
6099         if (!aconnector->base.edid_blob_ptr) {
6100                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6101                                 aconnector->base.name);
6102
6103                 aconnector->base.force = DRM_FORCE_OFF;
6104                 aconnector->base.override_edid = false;
6105                 return;
6106         }
6107
6108         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6109
6110         aconnector->edid = edid;
6111
6112         aconnector->dc_em_sink = dc_link_add_remote_sink(
6113                 aconnector->dc_link,
6114                 (uint8_t *)edid,
6115                 (edid->extensions + 1) * EDID_LENGTH,
6116                 &init_params);
6117
6118         if (aconnector->base.force == DRM_FORCE_ON) {
6119                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6120                 aconnector->dc_link->local_sink :
6121                 aconnector->dc_em_sink;
6122                 dc_sink_retain(aconnector->dc_sink);
6123         }
6124 }
6125
6126 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6127 {
6128         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6129
6130         /*
6131          * In case of headless boot with force on for DP managed connector
6132          * Those settings have to be != 0 to get initial modeset
6133          */
6134         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6135                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6136                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6137         }
6138
6139
6140         aconnector->base.override_edid = true;
6141         create_eml_sink(aconnector);
6142 }
6143
6144 static struct dc_stream_state *
6145 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6146                                 const struct drm_display_mode *drm_mode,
6147                                 const struct dm_connector_state *dm_state,
6148                                 const struct dc_stream_state *old_stream)
6149 {
6150         struct drm_connector *connector = &aconnector->base;
6151         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6152         struct dc_stream_state *stream;
6153         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6154         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6155         enum dc_status dc_result = DC_OK;
6156
6157         do {
6158                 stream = create_stream_for_sink(aconnector, drm_mode,
6159                                                 dm_state, old_stream,
6160                                                 requested_bpc);
6161                 if (stream == NULL) {
6162                         DRM_ERROR("Failed to create stream for sink!\n");
6163                         break;
6164                 }
6165
6166                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6167
6168                 if (dc_result != DC_OK) {
6169                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6170                                       drm_mode->hdisplay,
6171                                       drm_mode->vdisplay,
6172                                       drm_mode->clock,
6173                                       dc_result,
6174                                       dc_status_to_str(dc_result));
6175
6176                         dc_stream_release(stream);
6177                         stream = NULL;
6178                         requested_bpc -= 2; /* lower bpc to retry validation */
6179                 }
6180
6181         } while (stream == NULL && requested_bpc >= 6);
6182
6183         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6184                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6185
6186                 aconnector->force_yuv420_output = true;
6187                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6188                                                 dm_state, old_stream);
6189                 aconnector->force_yuv420_output = false;
6190         }
6191
6192         return stream;
6193 }
6194
6195 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6196                                    struct drm_display_mode *mode)
6197 {
6198         int result = MODE_ERROR;
6199         struct dc_sink *dc_sink;
6200         /* TODO: Unhardcode stream count */
6201         struct dc_stream_state *stream;
6202         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6203
6204         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6205                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6206                 return result;
6207
6208         /*
6209          * Only run this the first time mode_valid is called to initilialize
6210          * EDID mgmt
6211          */
6212         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6213                 !aconnector->dc_em_sink)
6214                 handle_edid_mgmt(aconnector);
6215
6216         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6217
6218         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6219                                 aconnector->base.force != DRM_FORCE_ON) {
6220                 DRM_ERROR("dc_sink is NULL!\n");
6221                 goto fail;
6222         }
6223
6224         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6225         if (stream) {
6226                 dc_stream_release(stream);
6227                 result = MODE_OK;
6228         }
6229
6230 fail:
6231         /* TODO: error handling*/
6232         return result;
6233 }
6234
6235 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6236                                 struct dc_info_packet *out)
6237 {
6238         struct hdmi_drm_infoframe frame;
6239         unsigned char buf[30]; /* 26 + 4 */
6240         ssize_t len;
6241         int ret, i;
6242
6243         memset(out, 0, sizeof(*out));
6244
6245         if (!state->hdr_output_metadata)
6246                 return 0;
6247
6248         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6249         if (ret)
6250                 return ret;
6251
6252         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6253         if (len < 0)
6254                 return (int)len;
6255
6256         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6257         if (len != 30)
6258                 return -EINVAL;
6259
6260         /* Prepare the infopacket for DC. */
6261         switch (state->connector->connector_type) {
6262         case DRM_MODE_CONNECTOR_HDMIA:
6263                 out->hb0 = 0x87; /* type */
6264                 out->hb1 = 0x01; /* version */
6265                 out->hb2 = 0x1A; /* length */
6266                 out->sb[0] = buf[3]; /* checksum */
6267                 i = 1;
6268                 break;
6269
6270         case DRM_MODE_CONNECTOR_DisplayPort:
6271         case DRM_MODE_CONNECTOR_eDP:
6272                 out->hb0 = 0x00; /* sdp id, zero */
6273                 out->hb1 = 0x87; /* type */
6274                 out->hb2 = 0x1D; /* payload len - 1 */
6275                 out->hb3 = (0x13 << 2); /* sdp version */
6276                 out->sb[0] = 0x01; /* version */
6277                 out->sb[1] = 0x1A; /* length */
6278                 i = 2;
6279                 break;
6280
6281         default:
6282                 return -EINVAL;
6283         }
6284
6285         memcpy(&out->sb[i], &buf[4], 26);
6286         out->valid = true;
6287
6288         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6289                        sizeof(out->sb), false);
6290
6291         return 0;
6292 }
6293
6294 static bool
6295 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6296                           const struct drm_connector_state *new_state)
6297 {
6298         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6299         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6300
6301         if (old_blob != new_blob) {
6302                 if (old_blob && new_blob &&
6303                     old_blob->length == new_blob->length)
6304                         return memcmp(old_blob->data, new_blob->data,
6305                                       old_blob->length);
6306
6307                 return true;
6308         }
6309
6310         return false;
6311 }
6312
6313 static int
6314 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6315                                  struct drm_atomic_state *state)
6316 {
6317         struct drm_connector_state *new_con_state =
6318                 drm_atomic_get_new_connector_state(state, conn);
6319         struct drm_connector_state *old_con_state =
6320                 drm_atomic_get_old_connector_state(state, conn);
6321         struct drm_crtc *crtc = new_con_state->crtc;
6322         struct drm_crtc_state *new_crtc_state;
6323         int ret;
6324
6325         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6326
6327         if (!crtc)
6328                 return 0;
6329
6330         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6331                 struct dc_info_packet hdr_infopacket;
6332
6333                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6334                 if (ret)
6335                         return ret;
6336
6337                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6338                 if (IS_ERR(new_crtc_state))
6339                         return PTR_ERR(new_crtc_state);
6340
6341                 /*
6342                  * DC considers the stream backends changed if the
6343                  * static metadata changes. Forcing the modeset also
6344                  * gives a simple way for userspace to switch from
6345                  * 8bpc to 10bpc when setting the metadata to enter
6346                  * or exit HDR.
6347                  *
6348                  * Changing the static metadata after it's been
6349                  * set is permissible, however. So only force a
6350                  * modeset if we're entering or exiting HDR.
6351                  */
6352                 new_crtc_state->mode_changed =
6353                         !old_con_state->hdr_output_metadata ||
6354                         !new_con_state->hdr_output_metadata;
6355         }
6356
6357         return 0;
6358 }
6359
6360 static const struct drm_connector_helper_funcs
6361 amdgpu_dm_connector_helper_funcs = {
6362         /*
6363          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6364          * modes will be filtered by drm_mode_validate_size(), and those modes
6365          * are missing after user start lightdm. So we need to renew modes list.
6366          * in get_modes call back, not just return the modes count
6367          */
6368         .get_modes = get_modes,
6369         .mode_valid = amdgpu_dm_connector_mode_valid,
6370         .atomic_check = amdgpu_dm_connector_atomic_check,
6371 };
6372
6373 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6374 {
6375 }
6376
6377 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6378 {
6379         struct drm_atomic_state *state = new_crtc_state->state;
6380         struct drm_plane *plane;
6381         int num_active = 0;
6382
6383         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6384                 struct drm_plane_state *new_plane_state;
6385
6386                 /* Cursor planes are "fake". */
6387                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6388                         continue;
6389
6390                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6391
6392                 if (!new_plane_state) {
6393                         /*
6394                          * The plane is enable on the CRTC and hasn't changed
6395                          * state. This means that it previously passed
6396                          * validation and is therefore enabled.
6397                          */
6398                         num_active += 1;
6399                         continue;
6400                 }
6401
6402                 /* We need a framebuffer to be considered enabled. */
6403                 num_active += (new_plane_state->fb != NULL);
6404         }
6405
6406         return num_active;
6407 }
6408
6409 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6410                                          struct drm_crtc_state *new_crtc_state)
6411 {
6412         struct dm_crtc_state *dm_new_crtc_state =
6413                 to_dm_crtc_state(new_crtc_state);
6414
6415         dm_new_crtc_state->active_planes = 0;
6416
6417         if (!dm_new_crtc_state->stream)
6418                 return;
6419
6420         dm_new_crtc_state->active_planes =
6421                 count_crtc_active_planes(new_crtc_state);
6422 }
6423
6424 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6425                                        struct drm_atomic_state *state)
6426 {
6427         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6428                                                                           crtc);
6429         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6430         struct dc *dc = adev->dm.dc;
6431         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6432         int ret = -EINVAL;
6433
6434         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6435
6436         dm_update_crtc_active_planes(crtc, crtc_state);
6437
6438         if (unlikely(!dm_crtc_state->stream &&
6439                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6440                 WARN_ON(1);
6441                 return ret;
6442         }
6443
6444         /*
6445          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6446          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6447          * planes are disabled, which is not supported by the hardware. And there is legacy
6448          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6449          */
6450         if (crtc_state->enable &&
6451             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6452                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6453                 return -EINVAL;
6454         }
6455
6456         /* In some use cases, like reset, no stream is attached */
6457         if (!dm_crtc_state->stream)
6458                 return 0;
6459
6460         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6461                 return 0;
6462
6463         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6464         return ret;
6465 }
6466
6467 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6468                                       const struct drm_display_mode *mode,
6469                                       struct drm_display_mode *adjusted_mode)
6470 {
6471         return true;
6472 }
6473
6474 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6475         .disable = dm_crtc_helper_disable,
6476         .atomic_check = dm_crtc_helper_atomic_check,
6477         .mode_fixup = dm_crtc_helper_mode_fixup,
6478         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6479 };
6480
6481 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6482 {
6483
6484 }
6485
6486 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6487 {
6488         switch (display_color_depth) {
6489                 case COLOR_DEPTH_666:
6490                         return 6;
6491                 case COLOR_DEPTH_888:
6492                         return 8;
6493                 case COLOR_DEPTH_101010:
6494                         return 10;
6495                 case COLOR_DEPTH_121212:
6496                         return 12;
6497                 case COLOR_DEPTH_141414:
6498                         return 14;
6499                 case COLOR_DEPTH_161616:
6500                         return 16;
6501                 default:
6502                         break;
6503                 }
6504         return 0;
6505 }
6506
6507 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6508                                           struct drm_crtc_state *crtc_state,
6509                                           struct drm_connector_state *conn_state)
6510 {
6511         struct drm_atomic_state *state = crtc_state->state;
6512         struct drm_connector *connector = conn_state->connector;
6513         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6514         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6515         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6516         struct drm_dp_mst_topology_mgr *mst_mgr;
6517         struct drm_dp_mst_port *mst_port;
6518         enum dc_color_depth color_depth;
6519         int clock, bpp = 0;
6520         bool is_y420 = false;
6521
6522         if (!aconnector->port || !aconnector->dc_sink)
6523                 return 0;
6524
6525         mst_port = aconnector->port;
6526         mst_mgr = &aconnector->mst_port->mst_mgr;
6527
6528         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6529                 return 0;
6530
6531         if (!state->duplicated) {
6532                 int max_bpc = conn_state->max_requested_bpc;
6533                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6534                                 aconnector->force_yuv420_output;
6535                 color_depth = convert_color_depth_from_display_info(connector,
6536                                                                     is_y420,
6537                                                                     max_bpc);
6538                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6539                 clock = adjusted_mode->clock;
6540                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6541         }
6542         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6543                                                                            mst_mgr,
6544                                                                            mst_port,
6545                                                                            dm_new_connector_state->pbn,
6546                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6547         if (dm_new_connector_state->vcpi_slots < 0) {
6548                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6549                 return dm_new_connector_state->vcpi_slots;
6550         }
6551         return 0;
6552 }
6553
6554 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6555         .disable = dm_encoder_helper_disable,
6556         .atomic_check = dm_encoder_helper_atomic_check
6557 };
6558
6559 #if defined(CONFIG_DRM_AMD_DC_DCN)
6560 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6561                                             struct dc_state *dc_state)
6562 {
6563         struct dc_stream_state *stream = NULL;
6564         struct drm_connector *connector;
6565         struct drm_connector_state *new_con_state, *old_con_state;
6566         struct amdgpu_dm_connector *aconnector;
6567         struct dm_connector_state *dm_conn_state;
6568         int i, j, clock, bpp;
6569         int vcpi, pbn_div, pbn = 0;
6570
6571         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6572
6573                 aconnector = to_amdgpu_dm_connector(connector);
6574
6575                 if (!aconnector->port)
6576                         continue;
6577
6578                 if (!new_con_state || !new_con_state->crtc)
6579                         continue;
6580
6581                 dm_conn_state = to_dm_connector_state(new_con_state);
6582
6583                 for (j = 0; j < dc_state->stream_count; j++) {
6584                         stream = dc_state->streams[j];
6585                         if (!stream)
6586                                 continue;
6587
6588                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6589                                 break;
6590
6591                         stream = NULL;
6592                 }
6593
6594                 if (!stream)
6595                         continue;
6596
6597                 if (stream->timing.flags.DSC != 1) {
6598                         drm_dp_mst_atomic_enable_dsc(state,
6599                                                      aconnector->port,
6600                                                      dm_conn_state->pbn,
6601                                                      0,
6602                                                      false);
6603                         continue;
6604                 }
6605
6606                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6607                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6608                 clock = stream->timing.pix_clk_100hz / 10;
6609                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6610                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6611                                                     aconnector->port,
6612                                                     pbn, pbn_div,
6613                                                     true);
6614                 if (vcpi < 0)
6615                         return vcpi;
6616
6617                 dm_conn_state->pbn = pbn;
6618                 dm_conn_state->vcpi_slots = vcpi;
6619         }
6620         return 0;
6621 }
6622 #endif
6623
6624 static void dm_drm_plane_reset(struct drm_plane *plane)
6625 {
6626         struct dm_plane_state *amdgpu_state = NULL;
6627
6628         if (plane->state)
6629                 plane->funcs->atomic_destroy_state(plane, plane->state);
6630
6631         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6632         WARN_ON(amdgpu_state == NULL);
6633
6634         if (amdgpu_state)
6635                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6636 }
6637
6638 static struct drm_plane_state *
6639 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6640 {
6641         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6642
6643         old_dm_plane_state = to_dm_plane_state(plane->state);
6644         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6645         if (!dm_plane_state)
6646                 return NULL;
6647
6648         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6649
6650         if (old_dm_plane_state->dc_state) {
6651                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6652                 dc_plane_state_retain(dm_plane_state->dc_state);
6653         }
6654
6655         return &dm_plane_state->base;
6656 }
6657
6658 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6659                                 struct drm_plane_state *state)
6660 {
6661         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6662
6663         if (dm_plane_state->dc_state)
6664                 dc_plane_state_release(dm_plane_state->dc_state);
6665
6666         drm_atomic_helper_plane_destroy_state(plane, state);
6667 }
6668
6669 static const struct drm_plane_funcs dm_plane_funcs = {
6670         .update_plane   = drm_atomic_helper_update_plane,
6671         .disable_plane  = drm_atomic_helper_disable_plane,
6672         .destroy        = drm_primary_helper_destroy,
6673         .reset = dm_drm_plane_reset,
6674         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6675         .atomic_destroy_state = dm_drm_plane_destroy_state,
6676         .format_mod_supported = dm_plane_format_mod_supported,
6677 };
6678
6679 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6680                                       struct drm_plane_state *new_state)
6681 {
6682         struct amdgpu_framebuffer *afb;
6683         struct drm_gem_object *obj;
6684         struct amdgpu_device *adev;
6685         struct amdgpu_bo *rbo;
6686         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6687         struct list_head list;
6688         struct ttm_validate_buffer tv;
6689         struct ww_acquire_ctx ticket;
6690         uint32_t domain;
6691         int r;
6692
6693         if (!new_state->fb) {
6694                 DRM_DEBUG_KMS("No FB bound\n");
6695                 return 0;
6696         }
6697
6698         afb = to_amdgpu_framebuffer(new_state->fb);
6699         obj = new_state->fb->obj[0];
6700         rbo = gem_to_amdgpu_bo(obj);
6701         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6702         INIT_LIST_HEAD(&list);
6703
6704         tv.bo = &rbo->tbo;
6705         tv.num_shared = 1;
6706         list_add(&tv.head, &list);
6707
6708         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6709         if (r) {
6710                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6711                 return r;
6712         }
6713
6714         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6715                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6716         else
6717                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6718
6719         r = amdgpu_bo_pin(rbo, domain);
6720         if (unlikely(r != 0)) {
6721                 if (r != -ERESTARTSYS)
6722                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6723                 ttm_eu_backoff_reservation(&ticket, &list);
6724                 return r;
6725         }
6726
6727         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6728         if (unlikely(r != 0)) {
6729                 amdgpu_bo_unpin(rbo);
6730                 ttm_eu_backoff_reservation(&ticket, &list);
6731                 DRM_ERROR("%p bind failed\n", rbo);
6732                 return r;
6733         }
6734
6735         ttm_eu_backoff_reservation(&ticket, &list);
6736
6737         afb->address = amdgpu_bo_gpu_offset(rbo);
6738
6739         amdgpu_bo_ref(rbo);
6740
6741         /**
6742          * We don't do surface updates on planes that have been newly created,
6743          * but we also don't have the afb->address during atomic check.
6744          *
6745          * Fill in buffer attributes depending on the address here, but only on
6746          * newly created planes since they're not being used by DC yet and this
6747          * won't modify global state.
6748          */
6749         dm_plane_state_old = to_dm_plane_state(plane->state);
6750         dm_plane_state_new = to_dm_plane_state(new_state);
6751
6752         if (dm_plane_state_new->dc_state &&
6753             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6754                 struct dc_plane_state *plane_state =
6755                         dm_plane_state_new->dc_state;
6756                 bool force_disable_dcc = !plane_state->dcc.enable;
6757
6758                 fill_plane_buffer_attributes(
6759                         adev, afb, plane_state->format, plane_state->rotation,
6760                         afb->tiling_flags,
6761                         &plane_state->tiling_info, &plane_state->plane_size,
6762                         &plane_state->dcc, &plane_state->address,
6763                         afb->tmz_surface, force_disable_dcc);
6764         }
6765
6766         return 0;
6767 }
6768
6769 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6770                                        struct drm_plane_state *old_state)
6771 {
6772         struct amdgpu_bo *rbo;
6773         int r;
6774
6775         if (!old_state->fb)
6776                 return;
6777
6778         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6779         r = amdgpu_bo_reserve(rbo, false);
6780         if (unlikely(r)) {
6781                 DRM_ERROR("failed to reserve rbo before unpin\n");
6782                 return;
6783         }
6784
6785         amdgpu_bo_unpin(rbo);
6786         amdgpu_bo_unreserve(rbo);
6787         amdgpu_bo_unref(&rbo);
6788 }
6789
6790 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6791                                        struct drm_crtc_state *new_crtc_state)
6792 {
6793         struct drm_framebuffer *fb = state->fb;
6794         int min_downscale, max_upscale;
6795         int min_scale = 0;
6796         int max_scale = INT_MAX;
6797
6798         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6799         if (fb && state->crtc) {
6800                 /* Validate viewport to cover the case when only the position changes */
6801                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6802                         int viewport_width = state->crtc_w;
6803                         int viewport_height = state->crtc_h;
6804
6805                         if (state->crtc_x < 0)
6806                                 viewport_width += state->crtc_x;
6807                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6808                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6809
6810                         if (state->crtc_y < 0)
6811                                 viewport_height += state->crtc_y;
6812                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6813                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6814
6815                         if (viewport_width < 0 || viewport_height < 0) {
6816                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6817                                 return -EINVAL;
6818                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6819                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6820                                 return -EINVAL;
6821                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6822                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6823                                 return -EINVAL;
6824                         }
6825
6826                 }
6827
6828                 /* Get min/max allowed scaling factors from plane caps. */
6829                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6830                                              &min_downscale, &max_upscale);
6831                 /*
6832                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6833                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6834                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6835                  */
6836                 min_scale = (1000 << 16) / max_upscale;
6837                 max_scale = (1000 << 16) / min_downscale;
6838         }
6839
6840         return drm_atomic_helper_check_plane_state(
6841                 state, new_crtc_state, min_scale, max_scale, true, true);
6842 }
6843
6844 static int dm_plane_atomic_check(struct drm_plane *plane,
6845                                  struct drm_atomic_state *state)
6846 {
6847         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6848                                                                                  plane);
6849         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6850         struct dc *dc = adev->dm.dc;
6851         struct dm_plane_state *dm_plane_state;
6852         struct dc_scaling_info scaling_info;
6853         struct drm_crtc_state *new_crtc_state;
6854         int ret;
6855
6856         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6857
6858         dm_plane_state = to_dm_plane_state(new_plane_state);
6859
6860         if (!dm_plane_state->dc_state)
6861                 return 0;
6862
6863         new_crtc_state =
6864                 drm_atomic_get_new_crtc_state(state,
6865                                               new_plane_state->crtc);
6866         if (!new_crtc_state)
6867                 return -EINVAL;
6868
6869         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6870         if (ret)
6871                 return ret;
6872
6873         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6874         if (ret)
6875                 return ret;
6876
6877         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6878                 return 0;
6879
6880         return -EINVAL;
6881 }
6882
6883 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6884                                        struct drm_atomic_state *state)
6885 {
6886         /* Only support async updates on cursor planes. */
6887         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6888                 return -EINVAL;
6889
6890         return 0;
6891 }
6892
6893 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6894                                          struct drm_atomic_state *state)
6895 {
6896         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6897                                                                            plane);
6898         struct drm_plane_state *old_state =
6899                 drm_atomic_get_old_plane_state(state, plane);
6900
6901         trace_amdgpu_dm_atomic_update_cursor(new_state);
6902
6903         swap(plane->state->fb, new_state->fb);
6904
6905         plane->state->src_x = new_state->src_x;
6906         plane->state->src_y = new_state->src_y;
6907         plane->state->src_w = new_state->src_w;
6908         plane->state->src_h = new_state->src_h;
6909         plane->state->crtc_x = new_state->crtc_x;
6910         plane->state->crtc_y = new_state->crtc_y;
6911         plane->state->crtc_w = new_state->crtc_w;
6912         plane->state->crtc_h = new_state->crtc_h;
6913
6914         handle_cursor_update(plane, old_state);
6915 }
6916
6917 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6918         .prepare_fb = dm_plane_helper_prepare_fb,
6919         .cleanup_fb = dm_plane_helper_cleanup_fb,
6920         .atomic_check = dm_plane_atomic_check,
6921         .atomic_async_check = dm_plane_atomic_async_check,
6922         .atomic_async_update = dm_plane_atomic_async_update
6923 };
6924
6925 /*
6926  * TODO: these are currently initialized to rgb formats only.
6927  * For future use cases we should either initialize them dynamically based on
6928  * plane capabilities, or initialize this array to all formats, so internal drm
6929  * check will succeed, and let DC implement proper check
6930  */
6931 static const uint32_t rgb_formats[] = {
6932         DRM_FORMAT_XRGB8888,
6933         DRM_FORMAT_ARGB8888,
6934         DRM_FORMAT_RGBA8888,
6935         DRM_FORMAT_XRGB2101010,
6936         DRM_FORMAT_XBGR2101010,
6937         DRM_FORMAT_ARGB2101010,
6938         DRM_FORMAT_ABGR2101010,
6939         DRM_FORMAT_XBGR8888,
6940         DRM_FORMAT_ABGR8888,
6941         DRM_FORMAT_RGB565,
6942 };
6943
6944 static const uint32_t overlay_formats[] = {
6945         DRM_FORMAT_XRGB8888,
6946         DRM_FORMAT_ARGB8888,
6947         DRM_FORMAT_RGBA8888,
6948         DRM_FORMAT_XBGR8888,
6949         DRM_FORMAT_ABGR8888,
6950         DRM_FORMAT_RGB565
6951 };
6952
6953 static const u32 cursor_formats[] = {
6954         DRM_FORMAT_ARGB8888
6955 };
6956
6957 static int get_plane_formats(const struct drm_plane *plane,
6958                              const struct dc_plane_cap *plane_cap,
6959                              uint32_t *formats, int max_formats)
6960 {
6961         int i, num_formats = 0;
6962
6963         /*
6964          * TODO: Query support for each group of formats directly from
6965          * DC plane caps. This will require adding more formats to the
6966          * caps list.
6967          */
6968
6969         switch (plane->type) {
6970         case DRM_PLANE_TYPE_PRIMARY:
6971                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6972                         if (num_formats >= max_formats)
6973                                 break;
6974
6975                         formats[num_formats++] = rgb_formats[i];
6976                 }
6977
6978                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6979                         formats[num_formats++] = DRM_FORMAT_NV12;
6980                 if (plane_cap && plane_cap->pixel_format_support.p010)
6981                         formats[num_formats++] = DRM_FORMAT_P010;
6982                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6983                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6984                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6985                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6986                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6987                 }
6988                 break;
6989
6990         case DRM_PLANE_TYPE_OVERLAY:
6991                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6992                         if (num_formats >= max_formats)
6993                                 break;
6994
6995                         formats[num_formats++] = overlay_formats[i];
6996                 }
6997                 break;
6998
6999         case DRM_PLANE_TYPE_CURSOR:
7000                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7001                         if (num_formats >= max_formats)
7002                                 break;
7003
7004                         formats[num_formats++] = cursor_formats[i];
7005                 }
7006                 break;
7007         }
7008
7009         return num_formats;
7010 }
7011
7012 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7013                                 struct drm_plane *plane,
7014                                 unsigned long possible_crtcs,
7015                                 const struct dc_plane_cap *plane_cap)
7016 {
7017         uint32_t formats[32];
7018         int num_formats;
7019         int res = -EPERM;
7020         unsigned int supported_rotations;
7021         uint64_t *modifiers = NULL;
7022
7023         num_formats = get_plane_formats(plane, plane_cap, formats,
7024                                         ARRAY_SIZE(formats));
7025
7026         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7027         if (res)
7028                 return res;
7029
7030         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7031                                        &dm_plane_funcs, formats, num_formats,
7032                                        modifiers, plane->type, NULL);
7033         kfree(modifiers);
7034         if (res)
7035                 return res;
7036
7037         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7038             plane_cap && plane_cap->per_pixel_alpha) {
7039                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7040                                           BIT(DRM_MODE_BLEND_PREMULTI);
7041
7042                 drm_plane_create_alpha_property(plane);
7043                 drm_plane_create_blend_mode_property(plane, blend_caps);
7044         }
7045
7046         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7047             plane_cap &&
7048             (plane_cap->pixel_format_support.nv12 ||
7049              plane_cap->pixel_format_support.p010)) {
7050                 /* This only affects YUV formats. */
7051                 drm_plane_create_color_properties(
7052                         plane,
7053                         BIT(DRM_COLOR_YCBCR_BT601) |
7054                         BIT(DRM_COLOR_YCBCR_BT709) |
7055                         BIT(DRM_COLOR_YCBCR_BT2020),
7056                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7057                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7058                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7059         }
7060
7061         supported_rotations =
7062                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7063                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7064
7065         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7066             plane->type != DRM_PLANE_TYPE_CURSOR)
7067                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7068                                                    supported_rotations);
7069
7070         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7071
7072         /* Create (reset) the plane state */
7073         if (plane->funcs->reset)
7074                 plane->funcs->reset(plane);
7075
7076         return 0;
7077 }
7078
7079 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7080                                struct drm_plane *plane,
7081                                uint32_t crtc_index)
7082 {
7083         struct amdgpu_crtc *acrtc = NULL;
7084         struct drm_plane *cursor_plane;
7085
7086         int res = -ENOMEM;
7087
7088         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7089         if (!cursor_plane)
7090                 goto fail;
7091
7092         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7093         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7094
7095         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7096         if (!acrtc)
7097                 goto fail;
7098
7099         res = drm_crtc_init_with_planes(
7100                         dm->ddev,
7101                         &acrtc->base,
7102                         plane,
7103                         cursor_plane,
7104                         &amdgpu_dm_crtc_funcs, NULL);
7105
7106         if (res)
7107                 goto fail;
7108
7109         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7110
7111         /* Create (reset) the plane state */
7112         if (acrtc->base.funcs->reset)
7113                 acrtc->base.funcs->reset(&acrtc->base);
7114
7115         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7116         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7117
7118         acrtc->crtc_id = crtc_index;
7119         acrtc->base.enabled = false;
7120         acrtc->otg_inst = -1;
7121
7122         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7123         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7124                                    true, MAX_COLOR_LUT_ENTRIES);
7125         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7126
7127         return 0;
7128
7129 fail:
7130         kfree(acrtc);
7131         kfree(cursor_plane);
7132         return res;
7133 }
7134
7135
7136 static int to_drm_connector_type(enum signal_type st)
7137 {
7138         switch (st) {
7139         case SIGNAL_TYPE_HDMI_TYPE_A:
7140                 return DRM_MODE_CONNECTOR_HDMIA;
7141         case SIGNAL_TYPE_EDP:
7142                 return DRM_MODE_CONNECTOR_eDP;
7143         case SIGNAL_TYPE_LVDS:
7144                 return DRM_MODE_CONNECTOR_LVDS;
7145         case SIGNAL_TYPE_RGB:
7146                 return DRM_MODE_CONNECTOR_VGA;
7147         case SIGNAL_TYPE_DISPLAY_PORT:
7148         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7149                 return DRM_MODE_CONNECTOR_DisplayPort;
7150         case SIGNAL_TYPE_DVI_DUAL_LINK:
7151         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7152                 return DRM_MODE_CONNECTOR_DVID;
7153         case SIGNAL_TYPE_VIRTUAL:
7154                 return DRM_MODE_CONNECTOR_VIRTUAL;
7155
7156         default:
7157                 return DRM_MODE_CONNECTOR_Unknown;
7158         }
7159 }
7160
7161 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7162 {
7163         struct drm_encoder *encoder;
7164
7165         /* There is only one encoder per connector */
7166         drm_connector_for_each_possible_encoder(connector, encoder)
7167                 return encoder;
7168
7169         return NULL;
7170 }
7171
7172 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7173 {
7174         struct drm_encoder *encoder;
7175         struct amdgpu_encoder *amdgpu_encoder;
7176
7177         encoder = amdgpu_dm_connector_to_encoder(connector);
7178
7179         if (encoder == NULL)
7180                 return;
7181
7182         amdgpu_encoder = to_amdgpu_encoder(encoder);
7183
7184         amdgpu_encoder->native_mode.clock = 0;
7185
7186         if (!list_empty(&connector->probed_modes)) {
7187                 struct drm_display_mode *preferred_mode = NULL;
7188
7189                 list_for_each_entry(preferred_mode,
7190                                     &connector->probed_modes,
7191                                     head) {
7192                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7193                                 amdgpu_encoder->native_mode = *preferred_mode;
7194
7195                         break;
7196                 }
7197
7198         }
7199 }
7200
7201 static struct drm_display_mode *
7202 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7203                              char *name,
7204                              int hdisplay, int vdisplay)
7205 {
7206         struct drm_device *dev = encoder->dev;
7207         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7208         struct drm_display_mode *mode = NULL;
7209         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7210
7211         mode = drm_mode_duplicate(dev, native_mode);
7212
7213         if (mode == NULL)
7214                 return NULL;
7215
7216         mode->hdisplay = hdisplay;
7217         mode->vdisplay = vdisplay;
7218         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7219         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7220
7221         return mode;
7222
7223 }
7224
7225 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7226                                                  struct drm_connector *connector)
7227 {
7228         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7229         struct drm_display_mode *mode = NULL;
7230         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7231         struct amdgpu_dm_connector *amdgpu_dm_connector =
7232                                 to_amdgpu_dm_connector(connector);
7233         int i;
7234         int n;
7235         struct mode_size {
7236                 char name[DRM_DISPLAY_MODE_LEN];
7237                 int w;
7238                 int h;
7239         } common_modes[] = {
7240                 {  "640x480",  640,  480},
7241                 {  "800x600",  800,  600},
7242                 { "1024x768", 1024,  768},
7243                 { "1280x720", 1280,  720},
7244                 { "1280x800", 1280,  800},
7245                 {"1280x1024", 1280, 1024},
7246                 { "1440x900", 1440,  900},
7247                 {"1680x1050", 1680, 1050},
7248                 {"1600x1200", 1600, 1200},
7249                 {"1920x1080", 1920, 1080},
7250                 {"1920x1200", 1920, 1200}
7251         };
7252
7253         n = ARRAY_SIZE(common_modes);
7254
7255         for (i = 0; i < n; i++) {
7256                 struct drm_display_mode *curmode = NULL;
7257                 bool mode_existed = false;
7258
7259                 if (common_modes[i].w > native_mode->hdisplay ||
7260                     common_modes[i].h > native_mode->vdisplay ||
7261                    (common_modes[i].w == native_mode->hdisplay &&
7262                     common_modes[i].h == native_mode->vdisplay))
7263                         continue;
7264
7265                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7266                         if (common_modes[i].w == curmode->hdisplay &&
7267                             common_modes[i].h == curmode->vdisplay) {
7268                                 mode_existed = true;
7269                                 break;
7270                         }
7271                 }
7272
7273                 if (mode_existed)
7274                         continue;
7275
7276                 mode = amdgpu_dm_create_common_mode(encoder,
7277                                 common_modes[i].name, common_modes[i].w,
7278                                 common_modes[i].h);
7279                 drm_mode_probed_add(connector, mode);
7280                 amdgpu_dm_connector->num_modes++;
7281         }
7282 }
7283
7284 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7285                                               struct edid *edid)
7286 {
7287         struct amdgpu_dm_connector *amdgpu_dm_connector =
7288                         to_amdgpu_dm_connector(connector);
7289
7290         if (edid) {
7291                 /* empty probed_modes */
7292                 INIT_LIST_HEAD(&connector->probed_modes);
7293                 amdgpu_dm_connector->num_modes =
7294                                 drm_add_edid_modes(connector, edid);
7295
7296                 /* sorting the probed modes before calling function
7297                  * amdgpu_dm_get_native_mode() since EDID can have
7298                  * more than one preferred mode. The modes that are
7299                  * later in the probed mode list could be of higher
7300                  * and preferred resolution. For example, 3840x2160
7301                  * resolution in base EDID preferred timing and 4096x2160
7302                  * preferred resolution in DID extension block later.
7303                  */
7304                 drm_mode_sort(&connector->probed_modes);
7305                 amdgpu_dm_get_native_mode(connector);
7306
7307                 /* Freesync capabilities are reset by calling
7308                  * drm_add_edid_modes() and need to be
7309                  * restored here.
7310                  */
7311                 amdgpu_dm_update_freesync_caps(connector, edid);
7312         } else {
7313                 amdgpu_dm_connector->num_modes = 0;
7314         }
7315 }
7316
7317 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7318                               struct drm_display_mode *mode)
7319 {
7320         struct drm_display_mode *m;
7321
7322         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7323                 if (drm_mode_equal(m, mode))
7324                         return true;
7325         }
7326
7327         return false;
7328 }
7329
7330 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7331 {
7332         const struct drm_display_mode *m;
7333         struct drm_display_mode *new_mode;
7334         uint i;
7335         uint32_t new_modes_count = 0;
7336
7337         /* Standard FPS values
7338          *
7339          * 23.976   - TV/NTSC
7340          * 24       - Cinema
7341          * 25       - TV/PAL
7342          * 29.97    - TV/NTSC
7343          * 30       - TV/NTSC
7344          * 48       - Cinema HFR
7345          * 50       - TV/PAL
7346          * 60       - Commonly used
7347          * 48,72,96 - Multiples of 24
7348          */
7349         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7350                                          48000, 50000, 60000, 72000, 96000 };
7351
7352         /*
7353          * Find mode with highest refresh rate with the same resolution
7354          * as the preferred mode. Some monitors report a preferred mode
7355          * with lower resolution than the highest refresh rate supported.
7356          */
7357
7358         m = get_highest_refresh_rate_mode(aconnector, true);
7359         if (!m)
7360                 return 0;
7361
7362         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7363                 uint64_t target_vtotal, target_vtotal_diff;
7364                 uint64_t num, den;
7365
7366                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7367                         continue;
7368
7369                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7370                     common_rates[i] > aconnector->max_vfreq * 1000)
7371                         continue;
7372
7373                 num = (unsigned long long)m->clock * 1000 * 1000;
7374                 den = common_rates[i] * (unsigned long long)m->htotal;
7375                 target_vtotal = div_u64(num, den);
7376                 target_vtotal_diff = target_vtotal - m->vtotal;
7377
7378                 /* Check for illegal modes */
7379                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7380                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7381                     m->vtotal + target_vtotal_diff < m->vsync_end)
7382                         continue;
7383
7384                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7385                 if (!new_mode)
7386                         goto out;
7387
7388                 new_mode->vtotal += (u16)target_vtotal_diff;
7389                 new_mode->vsync_start += (u16)target_vtotal_diff;
7390                 new_mode->vsync_end += (u16)target_vtotal_diff;
7391                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7392                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7393
7394                 if (!is_duplicate_mode(aconnector, new_mode)) {
7395                         drm_mode_probed_add(&aconnector->base, new_mode);
7396                         new_modes_count += 1;
7397                 } else
7398                         drm_mode_destroy(aconnector->base.dev, new_mode);
7399         }
7400  out:
7401         return new_modes_count;
7402 }
7403
7404 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7405                                                    struct edid *edid)
7406 {
7407         struct amdgpu_dm_connector *amdgpu_dm_connector =
7408                 to_amdgpu_dm_connector(connector);
7409
7410         if (!(amdgpu_freesync_vid_mode && edid))
7411                 return;
7412
7413         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7414                 amdgpu_dm_connector->num_modes +=
7415                         add_fs_modes(amdgpu_dm_connector);
7416 }
7417
7418 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7419 {
7420         struct amdgpu_dm_connector *amdgpu_dm_connector =
7421                         to_amdgpu_dm_connector(connector);
7422         struct drm_encoder *encoder;
7423         struct edid *edid = amdgpu_dm_connector->edid;
7424
7425         encoder = amdgpu_dm_connector_to_encoder(connector);
7426
7427         if (!drm_edid_is_valid(edid)) {
7428                 amdgpu_dm_connector->num_modes =
7429                                 drm_add_modes_noedid(connector, 640, 480);
7430         } else {
7431                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7432                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7433                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7434         }
7435         amdgpu_dm_fbc_init(connector);
7436
7437         return amdgpu_dm_connector->num_modes;
7438 }
7439
7440 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7441                                      struct amdgpu_dm_connector *aconnector,
7442                                      int connector_type,
7443                                      struct dc_link *link,
7444                                      int link_index)
7445 {
7446         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7447
7448         /*
7449          * Some of the properties below require access to state, like bpc.
7450          * Allocate some default initial connector state with our reset helper.
7451          */
7452         if (aconnector->base.funcs->reset)
7453                 aconnector->base.funcs->reset(&aconnector->base);
7454
7455         aconnector->connector_id = link_index;
7456         aconnector->dc_link = link;
7457         aconnector->base.interlace_allowed = false;
7458         aconnector->base.doublescan_allowed = false;
7459         aconnector->base.stereo_allowed = false;
7460         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7461         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7462         aconnector->audio_inst = -1;
7463         mutex_init(&aconnector->hpd_lock);
7464
7465         /*
7466          * configure support HPD hot plug connector_>polled default value is 0
7467          * which means HPD hot plug not supported
7468          */
7469         switch (connector_type) {
7470         case DRM_MODE_CONNECTOR_HDMIA:
7471                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7472                 aconnector->base.ycbcr_420_allowed =
7473                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7474                 break;
7475         case DRM_MODE_CONNECTOR_DisplayPort:
7476                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7477                 aconnector->base.ycbcr_420_allowed =
7478                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7479                 break;
7480         case DRM_MODE_CONNECTOR_DVID:
7481                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7482                 break;
7483         default:
7484                 break;
7485         }
7486
7487         drm_object_attach_property(&aconnector->base.base,
7488                                 dm->ddev->mode_config.scaling_mode_property,
7489                                 DRM_MODE_SCALE_NONE);
7490
7491         drm_object_attach_property(&aconnector->base.base,
7492                                 adev->mode_info.underscan_property,
7493                                 UNDERSCAN_OFF);
7494         drm_object_attach_property(&aconnector->base.base,
7495                                 adev->mode_info.underscan_hborder_property,
7496                                 0);
7497         drm_object_attach_property(&aconnector->base.base,
7498                                 adev->mode_info.underscan_vborder_property,
7499                                 0);
7500
7501         if (!aconnector->mst_port)
7502                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7503
7504         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7505         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7506         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7507
7508         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7509             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7510                 drm_object_attach_property(&aconnector->base.base,
7511                                 adev->mode_info.abm_level_property, 0);
7512         }
7513
7514         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7515             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7516             connector_type == DRM_MODE_CONNECTOR_eDP) {
7517                 drm_object_attach_property(
7518                         &aconnector->base.base,
7519                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7520
7521                 if (!aconnector->mst_port)
7522                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7523
7524 #ifdef CONFIG_DRM_AMD_DC_HDCP
7525                 if (adev->dm.hdcp_workqueue)
7526                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7527 #endif
7528         }
7529 }
7530
7531 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7532                               struct i2c_msg *msgs, int num)
7533 {
7534         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7535         struct ddc_service *ddc_service = i2c->ddc_service;
7536         struct i2c_command cmd;
7537         int i;
7538         int result = -EIO;
7539
7540         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7541
7542         if (!cmd.payloads)
7543                 return result;
7544
7545         cmd.number_of_payloads = num;
7546         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7547         cmd.speed = 100;
7548
7549         for (i = 0; i < num; i++) {
7550                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7551                 cmd.payloads[i].address = msgs[i].addr;
7552                 cmd.payloads[i].length = msgs[i].len;
7553                 cmd.payloads[i].data = msgs[i].buf;
7554         }
7555
7556         if (dc_submit_i2c(
7557                         ddc_service->ctx->dc,
7558                         ddc_service->ddc_pin->hw_info.ddc_channel,
7559                         &cmd))
7560                 result = num;
7561
7562         kfree(cmd.payloads);
7563         return result;
7564 }
7565
7566 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7567 {
7568         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7569 }
7570
7571 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7572         .master_xfer = amdgpu_dm_i2c_xfer,
7573         .functionality = amdgpu_dm_i2c_func,
7574 };
7575
7576 static struct amdgpu_i2c_adapter *
7577 create_i2c(struct ddc_service *ddc_service,
7578            int link_index,
7579            int *res)
7580 {
7581         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7582         struct amdgpu_i2c_adapter *i2c;
7583
7584         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7585         if (!i2c)
7586                 return NULL;
7587         i2c->base.owner = THIS_MODULE;
7588         i2c->base.class = I2C_CLASS_DDC;
7589         i2c->base.dev.parent = &adev->pdev->dev;
7590         i2c->base.algo = &amdgpu_dm_i2c_algo;
7591         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7592         i2c_set_adapdata(&i2c->base, i2c);
7593         i2c->ddc_service = ddc_service;
7594         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7595
7596         return i2c;
7597 }
7598
7599
7600 /*
7601  * Note: this function assumes that dc_link_detect() was called for the
7602  * dc_link which will be represented by this aconnector.
7603  */
7604 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7605                                     struct amdgpu_dm_connector *aconnector,
7606                                     uint32_t link_index,
7607                                     struct amdgpu_encoder *aencoder)
7608 {
7609         int res = 0;
7610         int connector_type;
7611         struct dc *dc = dm->dc;
7612         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7613         struct amdgpu_i2c_adapter *i2c;
7614
7615         link->priv = aconnector;
7616
7617         DRM_DEBUG_DRIVER("%s()\n", __func__);
7618
7619         i2c = create_i2c(link->ddc, link->link_index, &res);
7620         if (!i2c) {
7621                 DRM_ERROR("Failed to create i2c adapter data\n");
7622                 return -ENOMEM;
7623         }
7624
7625         aconnector->i2c = i2c;
7626         res = i2c_add_adapter(&i2c->base);
7627
7628         if (res) {
7629                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7630                 goto out_free;
7631         }
7632
7633         connector_type = to_drm_connector_type(link->connector_signal);
7634
7635         res = drm_connector_init_with_ddc(
7636                         dm->ddev,
7637                         &aconnector->base,
7638                         &amdgpu_dm_connector_funcs,
7639                         connector_type,
7640                         &i2c->base);
7641
7642         if (res) {
7643                 DRM_ERROR("connector_init failed\n");
7644                 aconnector->connector_id = -1;
7645                 goto out_free;
7646         }
7647
7648         drm_connector_helper_add(
7649                         &aconnector->base,
7650                         &amdgpu_dm_connector_helper_funcs);
7651
7652         amdgpu_dm_connector_init_helper(
7653                 dm,
7654                 aconnector,
7655                 connector_type,
7656                 link,
7657                 link_index);
7658
7659         drm_connector_attach_encoder(
7660                 &aconnector->base, &aencoder->base);
7661
7662         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7663                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7664                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7665
7666 out_free:
7667         if (res) {
7668                 kfree(i2c);
7669                 aconnector->i2c = NULL;
7670         }
7671         return res;
7672 }
7673
7674 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7675 {
7676         switch (adev->mode_info.num_crtc) {
7677         case 1:
7678                 return 0x1;
7679         case 2:
7680                 return 0x3;
7681         case 3:
7682                 return 0x7;
7683         case 4:
7684                 return 0xf;
7685         case 5:
7686                 return 0x1f;
7687         case 6:
7688         default:
7689                 return 0x3f;
7690         }
7691 }
7692
7693 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7694                                   struct amdgpu_encoder *aencoder,
7695                                   uint32_t link_index)
7696 {
7697         struct amdgpu_device *adev = drm_to_adev(dev);
7698
7699         int res = drm_encoder_init(dev,
7700                                    &aencoder->base,
7701                                    &amdgpu_dm_encoder_funcs,
7702                                    DRM_MODE_ENCODER_TMDS,
7703                                    NULL);
7704
7705         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7706
7707         if (!res)
7708                 aencoder->encoder_id = link_index;
7709         else
7710                 aencoder->encoder_id = -1;
7711
7712         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7713
7714         return res;
7715 }
7716
7717 static void manage_dm_interrupts(struct amdgpu_device *adev,
7718                                  struct amdgpu_crtc *acrtc,
7719                                  bool enable)
7720 {
7721         /*
7722          * We have no guarantee that the frontend index maps to the same
7723          * backend index - some even map to more than one.
7724          *
7725          * TODO: Use a different interrupt or check DC itself for the mapping.
7726          */
7727         int irq_type =
7728                 amdgpu_display_crtc_idx_to_irq_type(
7729                         adev,
7730                         acrtc->crtc_id);
7731
7732         if (enable) {
7733                 drm_crtc_vblank_on(&acrtc->base);
7734                 amdgpu_irq_get(
7735                         adev,
7736                         &adev->pageflip_irq,
7737                         irq_type);
7738 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7739                 amdgpu_irq_get(
7740                         adev,
7741                         &adev->vline0_irq,
7742                         irq_type);
7743 #endif
7744         } else {
7745 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7746                 amdgpu_irq_put(
7747                         adev,
7748                         &adev->vline0_irq,
7749                         irq_type);
7750 #endif
7751                 amdgpu_irq_put(
7752                         adev,
7753                         &adev->pageflip_irq,
7754                         irq_type);
7755                 drm_crtc_vblank_off(&acrtc->base);
7756         }
7757 }
7758
7759 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7760                                       struct amdgpu_crtc *acrtc)
7761 {
7762         int irq_type =
7763                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7764
7765         /**
7766          * This reads the current state for the IRQ and force reapplies
7767          * the setting to hardware.
7768          */
7769         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7770 }
7771
7772 static bool
7773 is_scaling_state_different(const struct dm_connector_state *dm_state,
7774                            const struct dm_connector_state *old_dm_state)
7775 {
7776         if (dm_state->scaling != old_dm_state->scaling)
7777                 return true;
7778         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7779                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7780                         return true;
7781         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7782                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7783                         return true;
7784         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7785                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7786                 return true;
7787         return false;
7788 }
7789
7790 #ifdef CONFIG_DRM_AMD_DC_HDCP
7791 static bool is_content_protection_different(struct drm_connector_state *state,
7792                                             const struct drm_connector_state *old_state,
7793                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7794 {
7795         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7796         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7797
7798         /* Handle: Type0/1 change */
7799         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7800             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7801                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7802                 return true;
7803         }
7804
7805         /* CP is being re enabled, ignore this
7806          *
7807          * Handles:     ENABLED -> DESIRED
7808          */
7809         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7810             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7811                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7812                 return false;
7813         }
7814
7815         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7816          *
7817          * Handles:     UNDESIRED -> ENABLED
7818          */
7819         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7820             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7821                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7822
7823         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7824          * hot-plug, headless s3, dpms
7825          *
7826          * Handles:     DESIRED -> DESIRED (Special case)
7827          */
7828         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7829             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7830                 dm_con_state->update_hdcp = false;
7831                 return true;
7832         }
7833
7834         /*
7835          * Handles:     UNDESIRED -> UNDESIRED
7836          *              DESIRED -> DESIRED
7837          *              ENABLED -> ENABLED
7838          */
7839         if (old_state->content_protection == state->content_protection)
7840                 return false;
7841
7842         /*
7843          * Handles:     UNDESIRED -> DESIRED
7844          *              DESIRED -> UNDESIRED
7845          *              ENABLED -> UNDESIRED
7846          */
7847         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7848                 return true;
7849
7850         /*
7851          * Handles:     DESIRED -> ENABLED
7852          */
7853         return false;
7854 }
7855
7856 #endif
7857 static void remove_stream(struct amdgpu_device *adev,
7858                           struct amdgpu_crtc *acrtc,
7859                           struct dc_stream_state *stream)
7860 {
7861         /* this is the update mode case */
7862
7863         acrtc->otg_inst = -1;
7864         acrtc->enabled = false;
7865 }
7866
7867 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7868                                struct dc_cursor_position *position)
7869 {
7870         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7871         int x, y;
7872         int xorigin = 0, yorigin = 0;
7873
7874         if (!crtc || !plane->state->fb)
7875                 return 0;
7876
7877         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7878             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7879                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7880                           __func__,
7881                           plane->state->crtc_w,
7882                           plane->state->crtc_h);
7883                 return -EINVAL;
7884         }
7885
7886         x = plane->state->crtc_x;
7887         y = plane->state->crtc_y;
7888
7889         if (x <= -amdgpu_crtc->max_cursor_width ||
7890             y <= -amdgpu_crtc->max_cursor_height)
7891                 return 0;
7892
7893         if (x < 0) {
7894                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7895                 x = 0;
7896         }
7897         if (y < 0) {
7898                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7899                 y = 0;
7900         }
7901         position->enable = true;
7902         position->translate_by_source = true;
7903         position->x = x;
7904         position->y = y;
7905         position->x_hotspot = xorigin;
7906         position->y_hotspot = yorigin;
7907
7908         return 0;
7909 }
7910
7911 static void handle_cursor_update(struct drm_plane *plane,
7912                                  struct drm_plane_state *old_plane_state)
7913 {
7914         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7915         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7916         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7917         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7918         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7919         uint64_t address = afb ? afb->address : 0;
7920         struct dc_cursor_position position = {0};
7921         struct dc_cursor_attributes attributes;
7922         int ret;
7923
7924         if (!plane->state->fb && !old_plane_state->fb)
7925                 return;
7926
7927         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7928                       __func__,
7929                       amdgpu_crtc->crtc_id,
7930                       plane->state->crtc_w,
7931                       plane->state->crtc_h);
7932
7933         ret = get_cursor_position(plane, crtc, &position);
7934         if (ret)
7935                 return;
7936
7937         if (!position.enable) {
7938                 /* turn off cursor */
7939                 if (crtc_state && crtc_state->stream) {
7940                         mutex_lock(&adev->dm.dc_lock);
7941                         dc_stream_set_cursor_position(crtc_state->stream,
7942                                                       &position);
7943                         mutex_unlock(&adev->dm.dc_lock);
7944                 }
7945                 return;
7946         }
7947
7948         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7949         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7950
7951         memset(&attributes, 0, sizeof(attributes));
7952         attributes.address.high_part = upper_32_bits(address);
7953         attributes.address.low_part  = lower_32_bits(address);
7954         attributes.width             = plane->state->crtc_w;
7955         attributes.height            = plane->state->crtc_h;
7956         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7957         attributes.rotation_angle    = 0;
7958         attributes.attribute_flags.value = 0;
7959
7960         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7961
7962         if (crtc_state->stream) {
7963                 mutex_lock(&adev->dm.dc_lock);
7964                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7965                                                          &attributes))
7966                         DRM_ERROR("DC failed to set cursor attributes\n");
7967
7968                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7969                                                    &position))
7970                         DRM_ERROR("DC failed to set cursor position\n");
7971                 mutex_unlock(&adev->dm.dc_lock);
7972         }
7973 }
7974
7975 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7976 {
7977
7978         assert_spin_locked(&acrtc->base.dev->event_lock);
7979         WARN_ON(acrtc->event);
7980
7981         acrtc->event = acrtc->base.state->event;
7982
7983         /* Set the flip status */
7984         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7985
7986         /* Mark this event as consumed */
7987         acrtc->base.state->event = NULL;
7988
7989         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7990                      acrtc->crtc_id);
7991 }
7992
7993 static void update_freesync_state_on_stream(
7994         struct amdgpu_display_manager *dm,
7995         struct dm_crtc_state *new_crtc_state,
7996         struct dc_stream_state *new_stream,
7997         struct dc_plane_state *surface,
7998         u32 flip_timestamp_in_us)
7999 {
8000         struct mod_vrr_params vrr_params;
8001         struct dc_info_packet vrr_infopacket = {0};
8002         struct amdgpu_device *adev = dm->adev;
8003         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8004         unsigned long flags;
8005         bool pack_sdp_v1_3 = false;
8006
8007         if (!new_stream)
8008                 return;
8009
8010         /*
8011          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8012          * For now it's sufficient to just guard against these conditions.
8013          */
8014
8015         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8016                 return;
8017
8018         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8019         vrr_params = acrtc->dm_irq_params.vrr_params;
8020
8021         if (surface) {
8022                 mod_freesync_handle_preflip(
8023                         dm->freesync_module,
8024                         surface,
8025                         new_stream,
8026                         flip_timestamp_in_us,
8027                         &vrr_params);
8028
8029                 if (adev->family < AMDGPU_FAMILY_AI &&
8030                     amdgpu_dm_vrr_active(new_crtc_state)) {
8031                         mod_freesync_handle_v_update(dm->freesync_module,
8032                                                      new_stream, &vrr_params);
8033
8034                         /* Need to call this before the frame ends. */
8035                         dc_stream_adjust_vmin_vmax(dm->dc,
8036                                                    new_crtc_state->stream,
8037                                                    &vrr_params.adjust);
8038                 }
8039         }
8040
8041         mod_freesync_build_vrr_infopacket(
8042                 dm->freesync_module,
8043                 new_stream,
8044                 &vrr_params,
8045                 PACKET_TYPE_VRR,
8046                 TRANSFER_FUNC_UNKNOWN,
8047                 &vrr_infopacket,
8048                 pack_sdp_v1_3);
8049
8050         new_crtc_state->freesync_timing_changed |=
8051                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8052                         &vrr_params.adjust,
8053                         sizeof(vrr_params.adjust)) != 0);
8054
8055         new_crtc_state->freesync_vrr_info_changed |=
8056                 (memcmp(&new_crtc_state->vrr_infopacket,
8057                         &vrr_infopacket,
8058                         sizeof(vrr_infopacket)) != 0);
8059
8060         acrtc->dm_irq_params.vrr_params = vrr_params;
8061         new_crtc_state->vrr_infopacket = vrr_infopacket;
8062
8063         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8064         new_stream->vrr_infopacket = vrr_infopacket;
8065
8066         if (new_crtc_state->freesync_vrr_info_changed)
8067                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8068                               new_crtc_state->base.crtc->base.id,
8069                               (int)new_crtc_state->base.vrr_enabled,
8070                               (int)vrr_params.state);
8071
8072         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8073 }
8074
8075 static void update_stream_irq_parameters(
8076         struct amdgpu_display_manager *dm,
8077         struct dm_crtc_state *new_crtc_state)
8078 {
8079         struct dc_stream_state *new_stream = new_crtc_state->stream;
8080         struct mod_vrr_params vrr_params;
8081         struct mod_freesync_config config = new_crtc_state->freesync_config;
8082         struct amdgpu_device *adev = dm->adev;
8083         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8084         unsigned long flags;
8085
8086         if (!new_stream)
8087                 return;
8088
8089         /*
8090          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8091          * For now it's sufficient to just guard against these conditions.
8092          */
8093         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8094                 return;
8095
8096         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8097         vrr_params = acrtc->dm_irq_params.vrr_params;
8098
8099         if (new_crtc_state->vrr_supported &&
8100             config.min_refresh_in_uhz &&
8101             config.max_refresh_in_uhz) {
8102                 /*
8103                  * if freesync compatible mode was set, config.state will be set
8104                  * in atomic check
8105                  */
8106                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8107                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8108                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8109                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8110                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8111                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8112                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8113                 } else {
8114                         config.state = new_crtc_state->base.vrr_enabled ?
8115                                                      VRR_STATE_ACTIVE_VARIABLE :
8116                                                      VRR_STATE_INACTIVE;
8117                 }
8118         } else {
8119                 config.state = VRR_STATE_UNSUPPORTED;
8120         }
8121
8122         mod_freesync_build_vrr_params(dm->freesync_module,
8123                                       new_stream,
8124                                       &config, &vrr_params);
8125
8126         new_crtc_state->freesync_timing_changed |=
8127                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8128                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8129
8130         new_crtc_state->freesync_config = config;
8131         /* Copy state for access from DM IRQ handler */
8132         acrtc->dm_irq_params.freesync_config = config;
8133         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8134         acrtc->dm_irq_params.vrr_params = vrr_params;
8135         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8136 }
8137
8138 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8139                                             struct dm_crtc_state *new_state)
8140 {
8141         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8142         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8143
8144         if (!old_vrr_active && new_vrr_active) {
8145                 /* Transition VRR inactive -> active:
8146                  * While VRR is active, we must not disable vblank irq, as a
8147                  * reenable after disable would compute bogus vblank/pflip
8148                  * timestamps if it likely happened inside display front-porch.
8149                  *
8150                  * We also need vupdate irq for the actual core vblank handling
8151                  * at end of vblank.
8152                  */
8153                 dm_set_vupdate_irq(new_state->base.crtc, true);
8154                 drm_crtc_vblank_get(new_state->base.crtc);
8155                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8156                                  __func__, new_state->base.crtc->base.id);
8157         } else if (old_vrr_active && !new_vrr_active) {
8158                 /* Transition VRR active -> inactive:
8159                  * Allow vblank irq disable again for fixed refresh rate.
8160                  */
8161                 dm_set_vupdate_irq(new_state->base.crtc, false);
8162                 drm_crtc_vblank_put(new_state->base.crtc);
8163                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8164                                  __func__, new_state->base.crtc->base.id);
8165         }
8166 }
8167
8168 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8169 {
8170         struct drm_plane *plane;
8171         struct drm_plane_state *old_plane_state, *new_plane_state;
8172         int i;
8173
8174         /*
8175          * TODO: Make this per-stream so we don't issue redundant updates for
8176          * commits with multiple streams.
8177          */
8178         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8179                                        new_plane_state, i)
8180                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8181                         handle_cursor_update(plane, old_plane_state);
8182 }
8183
8184 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8185                                     struct dc_state *dc_state,
8186                                     struct drm_device *dev,
8187                                     struct amdgpu_display_manager *dm,
8188                                     struct drm_crtc *pcrtc,
8189                                     bool wait_for_vblank)
8190 {
8191         uint32_t i;
8192         uint64_t timestamp_ns;
8193         struct drm_plane *plane;
8194         struct drm_plane_state *old_plane_state, *new_plane_state;
8195         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8196         struct drm_crtc_state *new_pcrtc_state =
8197                         drm_atomic_get_new_crtc_state(state, pcrtc);
8198         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8199         struct dm_crtc_state *dm_old_crtc_state =
8200                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8201         int planes_count = 0, vpos, hpos;
8202         long r;
8203         unsigned long flags;
8204         struct amdgpu_bo *abo;
8205         uint32_t target_vblank, last_flip_vblank;
8206         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8207         bool pflip_present = false;
8208         struct {
8209                 struct dc_surface_update surface_updates[MAX_SURFACES];
8210                 struct dc_plane_info plane_infos[MAX_SURFACES];
8211                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8212                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8213                 struct dc_stream_update stream_update;
8214         } *bundle;
8215
8216         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8217
8218         if (!bundle) {
8219                 dm_error("Failed to allocate update bundle\n");
8220                 goto cleanup;
8221         }
8222
8223         /*
8224          * Disable the cursor first if we're disabling all the planes.
8225          * It'll remain on the screen after the planes are re-enabled
8226          * if we don't.
8227          */
8228         if (acrtc_state->active_planes == 0)
8229                 amdgpu_dm_commit_cursors(state);
8230
8231         /* update planes when needed */
8232         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8233                 struct drm_crtc *crtc = new_plane_state->crtc;
8234                 struct drm_crtc_state *new_crtc_state;
8235                 struct drm_framebuffer *fb = new_plane_state->fb;
8236                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8237                 bool plane_needs_flip;
8238                 struct dc_plane_state *dc_plane;
8239                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8240
8241                 /* Cursor plane is handled after stream updates */
8242                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8243                         continue;
8244
8245                 if (!fb || !crtc || pcrtc != crtc)
8246                         continue;
8247
8248                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8249                 if (!new_crtc_state->active)
8250                         continue;
8251
8252                 dc_plane = dm_new_plane_state->dc_state;
8253
8254                 bundle->surface_updates[planes_count].surface = dc_plane;
8255                 if (new_pcrtc_state->color_mgmt_changed) {
8256                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8257                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8258                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8259                 }
8260
8261                 fill_dc_scaling_info(new_plane_state,
8262                                      &bundle->scaling_infos[planes_count]);
8263
8264                 bundle->surface_updates[planes_count].scaling_info =
8265                         &bundle->scaling_infos[planes_count];
8266
8267                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8268
8269                 pflip_present = pflip_present || plane_needs_flip;
8270
8271                 if (!plane_needs_flip) {
8272                         planes_count += 1;
8273                         continue;
8274                 }
8275
8276                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8277
8278                 /*
8279                  * Wait for all fences on this FB. Do limited wait to avoid
8280                  * deadlock during GPU reset when this fence will not signal
8281                  * but we hold reservation lock for the BO.
8282                  */
8283                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8284                                                         false,
8285                                                         msecs_to_jiffies(5000));
8286                 if (unlikely(r <= 0))
8287                         DRM_ERROR("Waiting for fences timed out!");
8288
8289                 fill_dc_plane_info_and_addr(
8290                         dm->adev, new_plane_state,
8291                         afb->tiling_flags,
8292                         &bundle->plane_infos[planes_count],
8293                         &bundle->flip_addrs[planes_count].address,
8294                         afb->tmz_surface, false);
8295
8296                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8297                                  new_plane_state->plane->index,
8298                                  bundle->plane_infos[planes_count].dcc.enable);
8299
8300                 bundle->surface_updates[planes_count].plane_info =
8301                         &bundle->plane_infos[planes_count];
8302
8303                 /*
8304                  * Only allow immediate flips for fast updates that don't
8305                  * change FB pitch, DCC state, rotation or mirroing.
8306                  */
8307                 bundle->flip_addrs[planes_count].flip_immediate =
8308                         crtc->state->async_flip &&
8309                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8310
8311                 timestamp_ns = ktime_get_ns();
8312                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8313                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8314                 bundle->surface_updates[planes_count].surface = dc_plane;
8315
8316                 if (!bundle->surface_updates[planes_count].surface) {
8317                         DRM_ERROR("No surface for CRTC: id=%d\n",
8318                                         acrtc_attach->crtc_id);
8319                         continue;
8320                 }
8321
8322                 if (plane == pcrtc->primary)
8323                         update_freesync_state_on_stream(
8324                                 dm,
8325                                 acrtc_state,
8326                                 acrtc_state->stream,
8327                                 dc_plane,
8328                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8329
8330                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8331                                  __func__,
8332                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8333                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8334
8335                 planes_count += 1;
8336
8337         }
8338
8339         if (pflip_present) {
8340                 if (!vrr_active) {
8341                         /* Use old throttling in non-vrr fixed refresh rate mode
8342                          * to keep flip scheduling based on target vblank counts
8343                          * working in a backwards compatible way, e.g., for
8344                          * clients using the GLX_OML_sync_control extension or
8345                          * DRI3/Present extension with defined target_msc.
8346                          */
8347                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8348                 }
8349                 else {
8350                         /* For variable refresh rate mode only:
8351                          * Get vblank of last completed flip to avoid > 1 vrr
8352                          * flips per video frame by use of throttling, but allow
8353                          * flip programming anywhere in the possibly large
8354                          * variable vrr vblank interval for fine-grained flip
8355                          * timing control and more opportunity to avoid stutter
8356                          * on late submission of flips.
8357                          */
8358                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8359                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8360                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8361                 }
8362
8363                 target_vblank = last_flip_vblank + wait_for_vblank;
8364
8365                 /*
8366                  * Wait until we're out of the vertical blank period before the one
8367                  * targeted by the flip
8368                  */
8369                 while ((acrtc_attach->enabled &&
8370                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8371                                                             0, &vpos, &hpos, NULL,
8372                                                             NULL, &pcrtc->hwmode)
8373                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8374                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8375                         (int)(target_vblank -
8376                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8377                         usleep_range(1000, 1100);
8378                 }
8379
8380                 /**
8381                  * Prepare the flip event for the pageflip interrupt to handle.
8382                  *
8383                  * This only works in the case where we've already turned on the
8384                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8385                  * from 0 -> n planes we have to skip a hardware generated event
8386                  * and rely on sending it from software.
8387                  */
8388                 if (acrtc_attach->base.state->event &&
8389                     acrtc_state->active_planes > 0) {
8390                         drm_crtc_vblank_get(pcrtc);
8391
8392                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8393
8394                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8395                         prepare_flip_isr(acrtc_attach);
8396
8397                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8398                 }
8399
8400                 if (acrtc_state->stream) {
8401                         if (acrtc_state->freesync_vrr_info_changed)
8402                                 bundle->stream_update.vrr_infopacket =
8403                                         &acrtc_state->stream->vrr_infopacket;
8404                 }
8405         }
8406
8407         /* Update the planes if changed or disable if we don't have any. */
8408         if ((planes_count || acrtc_state->active_planes == 0) &&
8409                 acrtc_state->stream) {
8410                 bundle->stream_update.stream = acrtc_state->stream;
8411                 if (new_pcrtc_state->mode_changed) {
8412                         bundle->stream_update.src = acrtc_state->stream->src;
8413                         bundle->stream_update.dst = acrtc_state->stream->dst;
8414                 }
8415
8416                 if (new_pcrtc_state->color_mgmt_changed) {
8417                         /*
8418                          * TODO: This isn't fully correct since we've actually
8419                          * already modified the stream in place.
8420                          */
8421                         bundle->stream_update.gamut_remap =
8422                                 &acrtc_state->stream->gamut_remap_matrix;
8423                         bundle->stream_update.output_csc_transform =
8424                                 &acrtc_state->stream->csc_color_matrix;
8425                         bundle->stream_update.out_transfer_func =
8426                                 acrtc_state->stream->out_transfer_func;
8427                 }
8428
8429                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8430                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8431                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8432
8433                 /*
8434                  * If FreeSync state on the stream has changed then we need to
8435                  * re-adjust the min/max bounds now that DC doesn't handle this
8436                  * as part of commit.
8437                  */
8438                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8439                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8440                         dc_stream_adjust_vmin_vmax(
8441                                 dm->dc, acrtc_state->stream,
8442                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8443                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8444                 }
8445                 mutex_lock(&dm->dc_lock);
8446                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8447                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8448                         amdgpu_dm_psr_disable(acrtc_state->stream);
8449
8450                 dc_commit_updates_for_stream(dm->dc,
8451                                                      bundle->surface_updates,
8452                                                      planes_count,
8453                                                      acrtc_state->stream,
8454                                                      &bundle->stream_update,
8455                                                      dc_state);
8456
8457                 /**
8458                  * Enable or disable the interrupts on the backend.
8459                  *
8460                  * Most pipes are put into power gating when unused.
8461                  *
8462                  * When power gating is enabled on a pipe we lose the
8463                  * interrupt enablement state when power gating is disabled.
8464                  *
8465                  * So we need to update the IRQ control state in hardware
8466                  * whenever the pipe turns on (since it could be previously
8467                  * power gated) or off (since some pipes can't be power gated
8468                  * on some ASICs).
8469                  */
8470                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8471                         dm_update_pflip_irq_state(drm_to_adev(dev),
8472                                                   acrtc_attach);
8473
8474                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8475                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8476                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8477                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8478                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8479                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8480                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8481                         amdgpu_dm_psr_enable(acrtc_state->stream);
8482                 }
8483
8484                 mutex_unlock(&dm->dc_lock);
8485         }
8486
8487         /*
8488          * Update cursor state *after* programming all the planes.
8489          * This avoids redundant programming in the case where we're going
8490          * to be disabling a single plane - those pipes are being disabled.
8491          */
8492         if (acrtc_state->active_planes)
8493                 amdgpu_dm_commit_cursors(state);
8494
8495 cleanup:
8496         kfree(bundle);
8497 }
8498
8499 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8500                                    struct drm_atomic_state *state)
8501 {
8502         struct amdgpu_device *adev = drm_to_adev(dev);
8503         struct amdgpu_dm_connector *aconnector;
8504         struct drm_connector *connector;
8505         struct drm_connector_state *old_con_state, *new_con_state;
8506         struct drm_crtc_state *new_crtc_state;
8507         struct dm_crtc_state *new_dm_crtc_state;
8508         const struct dc_stream_status *status;
8509         int i, inst;
8510
8511         /* Notify device removals. */
8512         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8513                 if (old_con_state->crtc != new_con_state->crtc) {
8514                         /* CRTC changes require notification. */
8515                         goto notify;
8516                 }
8517
8518                 if (!new_con_state->crtc)
8519                         continue;
8520
8521                 new_crtc_state = drm_atomic_get_new_crtc_state(
8522                         state, new_con_state->crtc);
8523
8524                 if (!new_crtc_state)
8525                         continue;
8526
8527                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8528                         continue;
8529
8530         notify:
8531                 aconnector = to_amdgpu_dm_connector(connector);
8532
8533                 mutex_lock(&adev->dm.audio_lock);
8534                 inst = aconnector->audio_inst;
8535                 aconnector->audio_inst = -1;
8536                 mutex_unlock(&adev->dm.audio_lock);
8537
8538                 amdgpu_dm_audio_eld_notify(adev, inst);
8539         }
8540
8541         /* Notify audio device additions. */
8542         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8543                 if (!new_con_state->crtc)
8544                         continue;
8545
8546                 new_crtc_state = drm_atomic_get_new_crtc_state(
8547                         state, new_con_state->crtc);
8548
8549                 if (!new_crtc_state)
8550                         continue;
8551
8552                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8553                         continue;
8554
8555                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8556                 if (!new_dm_crtc_state->stream)
8557                         continue;
8558
8559                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8560                 if (!status)
8561                         continue;
8562
8563                 aconnector = to_amdgpu_dm_connector(connector);
8564
8565                 mutex_lock(&adev->dm.audio_lock);
8566                 inst = status->audio_inst;
8567                 aconnector->audio_inst = inst;
8568                 mutex_unlock(&adev->dm.audio_lock);
8569
8570                 amdgpu_dm_audio_eld_notify(adev, inst);
8571         }
8572 }
8573
8574 /*
8575  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8576  * @crtc_state: the DRM CRTC state
8577  * @stream_state: the DC stream state.
8578  *
8579  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8580  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8581  */
8582 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8583                                                 struct dc_stream_state *stream_state)
8584 {
8585         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8586 }
8587
8588 /**
8589  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8590  * @state: The atomic state to commit
8591  *
8592  * This will tell DC to commit the constructed DC state from atomic_check,
8593  * programming the hardware. Any failures here implies a hardware failure, since
8594  * atomic check should have filtered anything non-kosher.
8595  */
8596 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8597 {
8598         struct drm_device *dev = state->dev;
8599         struct amdgpu_device *adev = drm_to_adev(dev);
8600         struct amdgpu_display_manager *dm = &adev->dm;
8601         struct dm_atomic_state *dm_state;
8602         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8603         uint32_t i, j;
8604         struct drm_crtc *crtc;
8605         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8606         unsigned long flags;
8607         bool wait_for_vblank = true;
8608         struct drm_connector *connector;
8609         struct drm_connector_state *old_con_state, *new_con_state;
8610         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8611         int crtc_disable_count = 0;
8612         bool mode_set_reset_required = false;
8613
8614         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8615
8616         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8617
8618         dm_state = dm_atomic_get_new_state(state);
8619         if (dm_state && dm_state->context) {
8620                 dc_state = dm_state->context;
8621         } else {
8622                 /* No state changes, retain current state. */
8623                 dc_state_temp = dc_create_state(dm->dc);
8624                 ASSERT(dc_state_temp);
8625                 dc_state = dc_state_temp;
8626                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8627         }
8628
8629         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8630                                        new_crtc_state, i) {
8631                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8632
8633                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8634
8635                 if (old_crtc_state->active &&
8636                     (!new_crtc_state->active ||
8637                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8638                         manage_dm_interrupts(adev, acrtc, false);
8639                         dc_stream_release(dm_old_crtc_state->stream);
8640                 }
8641         }
8642
8643         drm_atomic_helper_calc_timestamping_constants(state);
8644
8645         /* update changed items */
8646         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8647                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8648
8649                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8650                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8651
8652                 DRM_DEBUG_ATOMIC(
8653                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8654                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8655                         "connectors_changed:%d\n",
8656                         acrtc->crtc_id,
8657                         new_crtc_state->enable,
8658                         new_crtc_state->active,
8659                         new_crtc_state->planes_changed,
8660                         new_crtc_state->mode_changed,
8661                         new_crtc_state->active_changed,
8662                         new_crtc_state->connectors_changed);
8663
8664                 /* Disable cursor if disabling crtc */
8665                 if (old_crtc_state->active && !new_crtc_state->active) {
8666                         struct dc_cursor_position position;
8667
8668                         memset(&position, 0, sizeof(position));
8669                         mutex_lock(&dm->dc_lock);
8670                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8671                         mutex_unlock(&dm->dc_lock);
8672                 }
8673
8674                 /* Copy all transient state flags into dc state */
8675                 if (dm_new_crtc_state->stream) {
8676                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8677                                                             dm_new_crtc_state->stream);
8678                 }
8679
8680                 /* handles headless hotplug case, updating new_state and
8681                  * aconnector as needed
8682                  */
8683
8684                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8685
8686                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8687
8688                         if (!dm_new_crtc_state->stream) {
8689                                 /*
8690                                  * this could happen because of issues with
8691                                  * userspace notifications delivery.
8692                                  * In this case userspace tries to set mode on
8693                                  * display which is disconnected in fact.
8694                                  * dc_sink is NULL in this case on aconnector.
8695                                  * We expect reset mode will come soon.
8696                                  *
8697                                  * This can also happen when unplug is done
8698                                  * during resume sequence ended
8699                                  *
8700                                  * In this case, we want to pretend we still
8701                                  * have a sink to keep the pipe running so that
8702                                  * hw state is consistent with the sw state
8703                                  */
8704                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8705                                                 __func__, acrtc->base.base.id);
8706                                 continue;
8707                         }
8708
8709                         if (dm_old_crtc_state->stream)
8710                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8711
8712                         pm_runtime_get_noresume(dev->dev);
8713
8714                         acrtc->enabled = true;
8715                         acrtc->hw_mode = new_crtc_state->mode;
8716                         crtc->hwmode = new_crtc_state->mode;
8717                         mode_set_reset_required = true;
8718                 } else if (modereset_required(new_crtc_state)) {
8719                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8720                         /* i.e. reset mode */
8721                         if (dm_old_crtc_state->stream)
8722                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8723
8724                         mode_set_reset_required = true;
8725                 }
8726         } /* for_each_crtc_in_state() */
8727
8728         if (dc_state) {
8729                 /* if there mode set or reset, disable eDP PSR */
8730                 if (mode_set_reset_required)
8731                         amdgpu_dm_psr_disable_all(dm);
8732
8733                 dm_enable_per_frame_crtc_master_sync(dc_state);
8734                 mutex_lock(&dm->dc_lock);
8735                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8736 #if defined(CONFIG_DRM_AMD_DC_DCN)
8737                /* Allow idle optimization when vblank count is 0 for display off */
8738                if (dm->active_vblank_irq_count == 0)
8739                    dc_allow_idle_optimizations(dm->dc,true);
8740 #endif
8741                 mutex_unlock(&dm->dc_lock);
8742         }
8743
8744         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8745                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8746
8747                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8748
8749                 if (dm_new_crtc_state->stream != NULL) {
8750                         const struct dc_stream_status *status =
8751                                         dc_stream_get_status(dm_new_crtc_state->stream);
8752
8753                         if (!status)
8754                                 status = dc_stream_get_status_from_state(dc_state,
8755                                                                          dm_new_crtc_state->stream);
8756                         if (!status)
8757                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8758                         else
8759                                 acrtc->otg_inst = status->primary_otg_inst;
8760                 }
8761         }
8762 #ifdef CONFIG_DRM_AMD_DC_HDCP
8763         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8764                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8765                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8766                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8767
8768                 new_crtc_state = NULL;
8769
8770                 if (acrtc)
8771                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8772
8773                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8774
8775                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8776                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8777                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8778                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8779                         dm_new_con_state->update_hdcp = true;
8780                         continue;
8781                 }
8782
8783                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8784                         hdcp_update_display(
8785                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8786                                 new_con_state->hdcp_content_type,
8787                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8788         }
8789 #endif
8790
8791         /* Handle connector state changes */
8792         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8793                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8794                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8795                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8796                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8797                 struct dc_stream_update stream_update;
8798                 struct dc_info_packet hdr_packet;
8799                 struct dc_stream_status *status = NULL;
8800                 bool abm_changed, hdr_changed, scaling_changed;
8801
8802                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8803                 memset(&stream_update, 0, sizeof(stream_update));
8804
8805                 if (acrtc) {
8806                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8807                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8808                 }
8809
8810                 /* Skip any modesets/resets */
8811                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8812                         continue;
8813
8814                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8815                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8816
8817                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8818                                                              dm_old_con_state);
8819
8820                 abm_changed = dm_new_crtc_state->abm_level !=
8821                               dm_old_crtc_state->abm_level;
8822
8823                 hdr_changed =
8824                         is_hdr_metadata_different(old_con_state, new_con_state);
8825
8826                 if (!scaling_changed && !abm_changed && !hdr_changed)
8827                         continue;
8828
8829                 stream_update.stream = dm_new_crtc_state->stream;
8830                 if (scaling_changed) {
8831                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8832                                         dm_new_con_state, dm_new_crtc_state->stream);
8833
8834                         stream_update.src = dm_new_crtc_state->stream->src;
8835                         stream_update.dst = dm_new_crtc_state->stream->dst;
8836                 }
8837
8838                 if (abm_changed) {
8839                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8840
8841                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8842                 }
8843
8844                 if (hdr_changed) {
8845                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8846                         stream_update.hdr_static_metadata = &hdr_packet;
8847                 }
8848
8849                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8850                 WARN_ON(!status);
8851                 WARN_ON(!status->plane_count);
8852
8853                 /*
8854                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8855                  * Here we create an empty update on each plane.
8856                  * To fix this, DC should permit updating only stream properties.
8857                  */
8858                 for (j = 0; j < status->plane_count; j++)
8859                         dummy_updates[j].surface = status->plane_states[0];
8860
8861
8862                 mutex_lock(&dm->dc_lock);
8863                 dc_commit_updates_for_stream(dm->dc,
8864                                                      dummy_updates,
8865                                                      status->plane_count,
8866                                                      dm_new_crtc_state->stream,
8867                                                      &stream_update,
8868                                                      dc_state);
8869                 mutex_unlock(&dm->dc_lock);
8870         }
8871
8872         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8873         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8874                                       new_crtc_state, i) {
8875                 if (old_crtc_state->active && !new_crtc_state->active)
8876                         crtc_disable_count++;
8877
8878                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8879                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8880
8881                 /* For freesync config update on crtc state and params for irq */
8882                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8883
8884                 /* Handle vrr on->off / off->on transitions */
8885                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8886                                                 dm_new_crtc_state);
8887         }
8888
8889         /**
8890          * Enable interrupts for CRTCs that are newly enabled or went through
8891          * a modeset. It was intentionally deferred until after the front end
8892          * state was modified to wait until the OTG was on and so the IRQ
8893          * handlers didn't access stale or invalid state.
8894          */
8895         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8896                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8897 #ifdef CONFIG_DEBUG_FS
8898                 bool configure_crc = false;
8899                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8900 #endif
8901                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8902
8903                 if (new_crtc_state->active &&
8904                     (!old_crtc_state->active ||
8905                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8906                         dc_stream_retain(dm_new_crtc_state->stream);
8907                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8908                         manage_dm_interrupts(adev, acrtc, true);
8909
8910 #ifdef CONFIG_DEBUG_FS
8911                         /**
8912                          * Frontend may have changed so reapply the CRC capture
8913                          * settings for the stream.
8914                          */
8915                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8916                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8917                         cur_crc_src = acrtc->dm_irq_params.crc_src;
8918                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8919
8920                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8921                                 configure_crc = true;
8922 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8923                                 if (amdgpu_dm_crc_window_is_activated(crtc))
8924                                         configure_crc = false;
8925 #endif
8926                         }
8927
8928                         if (configure_crc)
8929                                 amdgpu_dm_crtc_configure_crc_source(
8930                                         crtc, dm_new_crtc_state, cur_crc_src);
8931 #endif
8932                 }
8933         }
8934
8935         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8936                 if (new_crtc_state->async_flip)
8937                         wait_for_vblank = false;
8938
8939         /* update planes when needed per crtc*/
8940         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8941                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8942
8943                 if (dm_new_crtc_state->stream)
8944                         amdgpu_dm_commit_planes(state, dc_state, dev,
8945                                                 dm, crtc, wait_for_vblank);
8946         }
8947
8948         /* Update audio instances for each connector. */
8949         amdgpu_dm_commit_audio(dev, state);
8950
8951         /*
8952          * send vblank event on all events not handled in flip and
8953          * mark consumed event for drm_atomic_helper_commit_hw_done
8954          */
8955         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8956         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8957
8958                 if (new_crtc_state->event)
8959                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8960
8961                 new_crtc_state->event = NULL;
8962         }
8963         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8964
8965         /* Signal HW programming completion */
8966         drm_atomic_helper_commit_hw_done(state);
8967
8968         if (wait_for_vblank)
8969                 drm_atomic_helper_wait_for_flip_done(dev, state);
8970
8971         drm_atomic_helper_cleanup_planes(dev, state);
8972
8973         /* return the stolen vga memory back to VRAM */
8974         if (!adev->mman.keep_stolen_vga_memory)
8975                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8976         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8977
8978         /*
8979          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8980          * so we can put the GPU into runtime suspend if we're not driving any
8981          * displays anymore
8982          */
8983         for (i = 0; i < crtc_disable_count; i++)
8984                 pm_runtime_put_autosuspend(dev->dev);
8985         pm_runtime_mark_last_busy(dev->dev);
8986
8987         if (dc_state_temp)
8988                 dc_release_state(dc_state_temp);
8989 }
8990
8991
8992 static int dm_force_atomic_commit(struct drm_connector *connector)
8993 {
8994         int ret = 0;
8995         struct drm_device *ddev = connector->dev;
8996         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8997         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8998         struct drm_plane *plane = disconnected_acrtc->base.primary;
8999         struct drm_connector_state *conn_state;
9000         struct drm_crtc_state *crtc_state;
9001         struct drm_plane_state *plane_state;
9002
9003         if (!state)
9004                 return -ENOMEM;
9005
9006         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9007
9008         /* Construct an atomic state to restore previous display setting */
9009
9010         /*
9011          * Attach connectors to drm_atomic_state
9012          */
9013         conn_state = drm_atomic_get_connector_state(state, connector);
9014
9015         ret = PTR_ERR_OR_ZERO(conn_state);
9016         if (ret)
9017                 goto out;
9018
9019         /* Attach crtc to drm_atomic_state*/
9020         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9021
9022         ret = PTR_ERR_OR_ZERO(crtc_state);
9023         if (ret)
9024                 goto out;
9025
9026         /* force a restore */
9027         crtc_state->mode_changed = true;
9028
9029         /* Attach plane to drm_atomic_state */
9030         plane_state = drm_atomic_get_plane_state(state, plane);
9031
9032         ret = PTR_ERR_OR_ZERO(plane_state);
9033         if (ret)
9034                 goto out;
9035
9036         /* Call commit internally with the state we just constructed */
9037         ret = drm_atomic_commit(state);
9038
9039 out:
9040         drm_atomic_state_put(state);
9041         if (ret)
9042                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9043
9044         return ret;
9045 }
9046
9047 /*
9048  * This function handles all cases when set mode does not come upon hotplug.
9049  * This includes when a display is unplugged then plugged back into the
9050  * same port and when running without usermode desktop manager supprot
9051  */
9052 void dm_restore_drm_connector_state(struct drm_device *dev,
9053                                     struct drm_connector *connector)
9054 {
9055         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9056         struct amdgpu_crtc *disconnected_acrtc;
9057         struct dm_crtc_state *acrtc_state;
9058
9059         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9060                 return;
9061
9062         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9063         if (!disconnected_acrtc)
9064                 return;
9065
9066         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9067         if (!acrtc_state->stream)
9068                 return;
9069
9070         /*
9071          * If the previous sink is not released and different from the current,
9072          * we deduce we are in a state where we can not rely on usermode call
9073          * to turn on the display, so we do it here
9074          */
9075         if (acrtc_state->stream->sink != aconnector->dc_sink)
9076                 dm_force_atomic_commit(&aconnector->base);
9077 }
9078
9079 /*
9080  * Grabs all modesetting locks to serialize against any blocking commits,
9081  * Waits for completion of all non blocking commits.
9082  */
9083 static int do_aquire_global_lock(struct drm_device *dev,
9084                                  struct drm_atomic_state *state)
9085 {
9086         struct drm_crtc *crtc;
9087         struct drm_crtc_commit *commit;
9088         long ret;
9089
9090         /*
9091          * Adding all modeset locks to aquire_ctx will
9092          * ensure that when the framework release it the
9093          * extra locks we are locking here will get released to
9094          */
9095         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9096         if (ret)
9097                 return ret;
9098
9099         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9100                 spin_lock(&crtc->commit_lock);
9101                 commit = list_first_entry_or_null(&crtc->commit_list,
9102                                 struct drm_crtc_commit, commit_entry);
9103                 if (commit)
9104                         drm_crtc_commit_get(commit);
9105                 spin_unlock(&crtc->commit_lock);
9106
9107                 if (!commit)
9108                         continue;
9109
9110                 /*
9111                  * Make sure all pending HW programming completed and
9112                  * page flips done
9113                  */
9114                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9115
9116                 if (ret > 0)
9117                         ret = wait_for_completion_interruptible_timeout(
9118                                         &commit->flip_done, 10*HZ);
9119
9120                 if (ret == 0)
9121                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9122                                   "timed out\n", crtc->base.id, crtc->name);
9123
9124                 drm_crtc_commit_put(commit);
9125         }
9126
9127         return ret < 0 ? ret : 0;
9128 }
9129
9130 static void get_freesync_config_for_crtc(
9131         struct dm_crtc_state *new_crtc_state,
9132         struct dm_connector_state *new_con_state)
9133 {
9134         struct mod_freesync_config config = {0};
9135         struct amdgpu_dm_connector *aconnector =
9136                         to_amdgpu_dm_connector(new_con_state->base.connector);
9137         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9138         int vrefresh = drm_mode_vrefresh(mode);
9139         bool fs_vid_mode = false;
9140
9141         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9142                                         vrefresh >= aconnector->min_vfreq &&
9143                                         vrefresh <= aconnector->max_vfreq;
9144
9145         if (new_crtc_state->vrr_supported) {
9146                 new_crtc_state->stream->ignore_msa_timing_param = true;
9147                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9148
9149                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9150                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9151                 config.vsif_supported = true;
9152                 config.btr = true;
9153
9154                 if (fs_vid_mode) {
9155                         config.state = VRR_STATE_ACTIVE_FIXED;
9156                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9157                         goto out;
9158                 } else if (new_crtc_state->base.vrr_enabled) {
9159                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9160                 } else {
9161                         config.state = VRR_STATE_INACTIVE;
9162                 }
9163         }
9164 out:
9165         new_crtc_state->freesync_config = config;
9166 }
9167
9168 static void reset_freesync_config_for_crtc(
9169         struct dm_crtc_state *new_crtc_state)
9170 {
9171         new_crtc_state->vrr_supported = false;
9172
9173         memset(&new_crtc_state->vrr_infopacket, 0,
9174                sizeof(new_crtc_state->vrr_infopacket));
9175 }
9176
9177 static bool
9178 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9179                                  struct drm_crtc_state *new_crtc_state)
9180 {
9181         struct drm_display_mode old_mode, new_mode;
9182
9183         if (!old_crtc_state || !new_crtc_state)
9184                 return false;
9185
9186         old_mode = old_crtc_state->mode;
9187         new_mode = new_crtc_state->mode;
9188
9189         if (old_mode.clock       == new_mode.clock &&
9190             old_mode.hdisplay    == new_mode.hdisplay &&
9191             old_mode.vdisplay    == new_mode.vdisplay &&
9192             old_mode.htotal      == new_mode.htotal &&
9193             old_mode.vtotal      != new_mode.vtotal &&
9194             old_mode.hsync_start == new_mode.hsync_start &&
9195             old_mode.vsync_start != new_mode.vsync_start &&
9196             old_mode.hsync_end   == new_mode.hsync_end &&
9197             old_mode.vsync_end   != new_mode.vsync_end &&
9198             old_mode.hskew       == new_mode.hskew &&
9199             old_mode.vscan       == new_mode.vscan &&
9200             (old_mode.vsync_end - old_mode.vsync_start) ==
9201             (new_mode.vsync_end - new_mode.vsync_start))
9202                 return true;
9203
9204         return false;
9205 }
9206
9207 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9208         uint64_t num, den, res;
9209         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9210
9211         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9212
9213         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9214         den = (unsigned long long)new_crtc_state->mode.htotal *
9215               (unsigned long long)new_crtc_state->mode.vtotal;
9216
9217         res = div_u64(num, den);
9218         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9219 }
9220
9221 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9222                                 struct drm_atomic_state *state,
9223                                 struct drm_crtc *crtc,
9224                                 struct drm_crtc_state *old_crtc_state,
9225                                 struct drm_crtc_state *new_crtc_state,
9226                                 bool enable,
9227                                 bool *lock_and_validation_needed)
9228 {
9229         struct dm_atomic_state *dm_state = NULL;
9230         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9231         struct dc_stream_state *new_stream;
9232         int ret = 0;
9233
9234         /*
9235          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9236          * update changed items
9237          */
9238         struct amdgpu_crtc *acrtc = NULL;
9239         struct amdgpu_dm_connector *aconnector = NULL;
9240         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9241         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9242
9243         new_stream = NULL;
9244
9245         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9246         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9247         acrtc = to_amdgpu_crtc(crtc);
9248         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9249
9250         /* TODO This hack should go away */
9251         if (aconnector && enable) {
9252                 /* Make sure fake sink is created in plug-in scenario */
9253                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9254                                                             &aconnector->base);
9255                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9256                                                             &aconnector->base);
9257
9258                 if (IS_ERR(drm_new_conn_state)) {
9259                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9260                         goto fail;
9261                 }
9262
9263                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9264                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9265
9266                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9267                         goto skip_modeset;
9268
9269                 new_stream = create_validate_stream_for_sink(aconnector,
9270                                                              &new_crtc_state->mode,
9271                                                              dm_new_conn_state,
9272                                                              dm_old_crtc_state->stream);
9273
9274                 /*
9275                  * we can have no stream on ACTION_SET if a display
9276                  * was disconnected during S3, in this case it is not an
9277                  * error, the OS will be updated after detection, and
9278                  * will do the right thing on next atomic commit
9279                  */
9280
9281                 if (!new_stream) {
9282                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9283                                         __func__, acrtc->base.base.id);
9284                         ret = -ENOMEM;
9285                         goto fail;
9286                 }
9287
9288                 /*
9289                  * TODO: Check VSDB bits to decide whether this should
9290                  * be enabled or not.
9291                  */
9292                 new_stream->triggered_crtc_reset.enabled =
9293                         dm->force_timing_sync;
9294
9295                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9296
9297                 ret = fill_hdr_info_packet(drm_new_conn_state,
9298                                            &new_stream->hdr_static_metadata);
9299                 if (ret)
9300                         goto fail;
9301
9302                 /*
9303                  * If we already removed the old stream from the context
9304                  * (and set the new stream to NULL) then we can't reuse
9305                  * the old stream even if the stream and scaling are unchanged.
9306                  * We'll hit the BUG_ON and black screen.
9307                  *
9308                  * TODO: Refactor this function to allow this check to work
9309                  * in all conditions.
9310                  */
9311                 if (amdgpu_freesync_vid_mode &&
9312                     dm_new_crtc_state->stream &&
9313                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9314                         goto skip_modeset;
9315
9316                 if (dm_new_crtc_state->stream &&
9317                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9318                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9319                         new_crtc_state->mode_changed = false;
9320                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9321                                          new_crtc_state->mode_changed);
9322                 }
9323         }
9324
9325         /* mode_changed flag may get updated above, need to check again */
9326         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9327                 goto skip_modeset;
9328
9329         DRM_DEBUG_ATOMIC(
9330                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9331                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9332                 "connectors_changed:%d\n",
9333                 acrtc->crtc_id,
9334                 new_crtc_state->enable,
9335                 new_crtc_state->active,
9336                 new_crtc_state->planes_changed,
9337                 new_crtc_state->mode_changed,
9338                 new_crtc_state->active_changed,
9339                 new_crtc_state->connectors_changed);
9340
9341         /* Remove stream for any changed/disabled CRTC */
9342         if (!enable) {
9343
9344                 if (!dm_old_crtc_state->stream)
9345                         goto skip_modeset;
9346
9347                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9348                     is_timing_unchanged_for_freesync(new_crtc_state,
9349                                                      old_crtc_state)) {
9350                         new_crtc_state->mode_changed = false;
9351                         DRM_DEBUG_DRIVER(
9352                                 "Mode change not required for front porch change, "
9353                                 "setting mode_changed to %d",
9354                                 new_crtc_state->mode_changed);
9355
9356                         set_freesync_fixed_config(dm_new_crtc_state);
9357
9358                         goto skip_modeset;
9359                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9360                            is_freesync_video_mode(&new_crtc_state->mode,
9361                                                   aconnector)) {
9362                         set_freesync_fixed_config(dm_new_crtc_state);
9363                 }
9364
9365                 ret = dm_atomic_get_state(state, &dm_state);
9366                 if (ret)
9367                         goto fail;
9368
9369                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9370                                 crtc->base.id);
9371
9372                 /* i.e. reset mode */
9373                 if (dc_remove_stream_from_ctx(
9374                                 dm->dc,
9375                                 dm_state->context,
9376                                 dm_old_crtc_state->stream) != DC_OK) {
9377                         ret = -EINVAL;
9378                         goto fail;
9379                 }
9380
9381                 dc_stream_release(dm_old_crtc_state->stream);
9382                 dm_new_crtc_state->stream = NULL;
9383
9384                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9385
9386                 *lock_and_validation_needed = true;
9387
9388         } else {/* Add stream for any updated/enabled CRTC */
9389                 /*
9390                  * Quick fix to prevent NULL pointer on new_stream when
9391                  * added MST connectors not found in existing crtc_state in the chained mode
9392                  * TODO: need to dig out the root cause of that
9393                  */
9394                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9395                         goto skip_modeset;
9396
9397                 if (modereset_required(new_crtc_state))
9398                         goto skip_modeset;
9399
9400                 if (modeset_required(new_crtc_state, new_stream,
9401                                      dm_old_crtc_state->stream)) {
9402
9403                         WARN_ON(dm_new_crtc_state->stream);
9404
9405                         ret = dm_atomic_get_state(state, &dm_state);
9406                         if (ret)
9407                                 goto fail;
9408
9409                         dm_new_crtc_state->stream = new_stream;
9410
9411                         dc_stream_retain(new_stream);
9412
9413                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9414                                          crtc->base.id);
9415
9416                         if (dc_add_stream_to_ctx(
9417                                         dm->dc,
9418                                         dm_state->context,
9419                                         dm_new_crtc_state->stream) != DC_OK) {
9420                                 ret = -EINVAL;
9421                                 goto fail;
9422                         }
9423
9424                         *lock_and_validation_needed = true;
9425                 }
9426         }
9427
9428 skip_modeset:
9429         /* Release extra reference */
9430         if (new_stream)
9431                  dc_stream_release(new_stream);
9432
9433         /*
9434          * We want to do dc stream updates that do not require a
9435          * full modeset below.
9436          */
9437         if (!(enable && aconnector && new_crtc_state->active))
9438                 return 0;
9439         /*
9440          * Given above conditions, the dc state cannot be NULL because:
9441          * 1. We're in the process of enabling CRTCs (just been added
9442          *    to the dc context, or already is on the context)
9443          * 2. Has a valid connector attached, and
9444          * 3. Is currently active and enabled.
9445          * => The dc stream state currently exists.
9446          */
9447         BUG_ON(dm_new_crtc_state->stream == NULL);
9448
9449         /* Scaling or underscan settings */
9450         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9451                 update_stream_scaling_settings(
9452                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9453
9454         /* ABM settings */
9455         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9456
9457         /*
9458          * Color management settings. We also update color properties
9459          * when a modeset is needed, to ensure it gets reprogrammed.
9460          */
9461         if (dm_new_crtc_state->base.color_mgmt_changed ||
9462             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9463                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9464                 if (ret)
9465                         goto fail;
9466         }
9467
9468         /* Update Freesync settings. */
9469         get_freesync_config_for_crtc(dm_new_crtc_state,
9470                                      dm_new_conn_state);
9471
9472         return ret;
9473
9474 fail:
9475         if (new_stream)
9476                 dc_stream_release(new_stream);
9477         return ret;
9478 }
9479
9480 static bool should_reset_plane(struct drm_atomic_state *state,
9481                                struct drm_plane *plane,
9482                                struct drm_plane_state *old_plane_state,
9483                                struct drm_plane_state *new_plane_state)
9484 {
9485         struct drm_plane *other;
9486         struct drm_plane_state *old_other_state, *new_other_state;
9487         struct drm_crtc_state *new_crtc_state;
9488         int i;
9489
9490         /*
9491          * TODO: Remove this hack once the checks below are sufficient
9492          * enough to determine when we need to reset all the planes on
9493          * the stream.
9494          */
9495         if (state->allow_modeset)
9496                 return true;
9497
9498         /* Exit early if we know that we're adding or removing the plane. */
9499         if (old_plane_state->crtc != new_plane_state->crtc)
9500                 return true;
9501
9502         /* old crtc == new_crtc == NULL, plane not in context. */
9503         if (!new_plane_state->crtc)
9504                 return false;
9505
9506         new_crtc_state =
9507                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9508
9509         if (!new_crtc_state)
9510                 return true;
9511
9512         /* CRTC Degamma changes currently require us to recreate planes. */
9513         if (new_crtc_state->color_mgmt_changed)
9514                 return true;
9515
9516         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9517                 return true;
9518
9519         /*
9520          * If there are any new primary or overlay planes being added or
9521          * removed then the z-order can potentially change. To ensure
9522          * correct z-order and pipe acquisition the current DC architecture
9523          * requires us to remove and recreate all existing planes.
9524          *
9525          * TODO: Come up with a more elegant solution for this.
9526          */
9527         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9528                 struct amdgpu_framebuffer *old_afb, *new_afb;
9529                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9530                         continue;
9531
9532                 if (old_other_state->crtc != new_plane_state->crtc &&
9533                     new_other_state->crtc != new_plane_state->crtc)
9534                         continue;
9535
9536                 if (old_other_state->crtc != new_other_state->crtc)
9537                         return true;
9538
9539                 /* Src/dst size and scaling updates. */
9540                 if (old_other_state->src_w != new_other_state->src_w ||
9541                     old_other_state->src_h != new_other_state->src_h ||
9542                     old_other_state->crtc_w != new_other_state->crtc_w ||
9543                     old_other_state->crtc_h != new_other_state->crtc_h)
9544                         return true;
9545
9546                 /* Rotation / mirroring updates. */
9547                 if (old_other_state->rotation != new_other_state->rotation)
9548                         return true;
9549
9550                 /* Blending updates. */
9551                 if (old_other_state->pixel_blend_mode !=
9552                     new_other_state->pixel_blend_mode)
9553                         return true;
9554
9555                 /* Alpha updates. */
9556                 if (old_other_state->alpha != new_other_state->alpha)
9557                         return true;
9558
9559                 /* Colorspace changes. */
9560                 if (old_other_state->color_range != new_other_state->color_range ||
9561                     old_other_state->color_encoding != new_other_state->color_encoding)
9562                         return true;
9563
9564                 /* Framebuffer checks fall at the end. */
9565                 if (!old_other_state->fb || !new_other_state->fb)
9566                         continue;
9567
9568                 /* Pixel format changes can require bandwidth updates. */
9569                 if (old_other_state->fb->format != new_other_state->fb->format)
9570                         return true;
9571
9572                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9573                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9574
9575                 /* Tiling and DCC changes also require bandwidth updates. */
9576                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9577                     old_afb->base.modifier != new_afb->base.modifier)
9578                         return true;
9579         }
9580
9581         return false;
9582 }
9583
9584 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9585                               struct drm_plane_state *new_plane_state,
9586                               struct drm_framebuffer *fb)
9587 {
9588         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9589         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9590         unsigned int pitch;
9591         bool linear;
9592
9593         if (fb->width > new_acrtc->max_cursor_width ||
9594             fb->height > new_acrtc->max_cursor_height) {
9595                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9596                                  new_plane_state->fb->width,
9597                                  new_plane_state->fb->height);
9598                 return -EINVAL;
9599         }
9600         if (new_plane_state->src_w != fb->width << 16 ||
9601             new_plane_state->src_h != fb->height << 16) {
9602                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9603                 return -EINVAL;
9604         }
9605
9606         /* Pitch in pixels */
9607         pitch = fb->pitches[0] / fb->format->cpp[0];
9608
9609         if (fb->width != pitch) {
9610                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9611                                  fb->width, pitch);
9612                 return -EINVAL;
9613         }
9614
9615         switch (pitch) {
9616         case 64:
9617         case 128:
9618         case 256:
9619                 /* FB pitch is supported by cursor plane */
9620                 break;
9621         default:
9622                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9623                 return -EINVAL;
9624         }
9625
9626         /* Core DRM takes care of checking FB modifiers, so we only need to
9627          * check tiling flags when the FB doesn't have a modifier. */
9628         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9629                 if (adev->family < AMDGPU_FAMILY_AI) {
9630                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9631                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9632                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9633                 } else {
9634                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9635                 }
9636                 if (!linear) {
9637                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9638                         return -EINVAL;
9639                 }
9640         }
9641
9642         return 0;
9643 }
9644
9645 static int dm_update_plane_state(struct dc *dc,
9646                                  struct drm_atomic_state *state,
9647                                  struct drm_plane *plane,
9648                                  struct drm_plane_state *old_plane_state,
9649                                  struct drm_plane_state *new_plane_state,
9650                                  bool enable,
9651                                  bool *lock_and_validation_needed)
9652 {
9653
9654         struct dm_atomic_state *dm_state = NULL;
9655         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9656         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9657         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9658         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9659         struct amdgpu_crtc *new_acrtc;
9660         bool needs_reset;
9661         int ret = 0;
9662
9663
9664         new_plane_crtc = new_plane_state->crtc;
9665         old_plane_crtc = old_plane_state->crtc;
9666         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9667         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9668
9669         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9670                 if (!enable || !new_plane_crtc ||
9671                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9672                         return 0;
9673
9674                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9675
9676                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9677                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9678                         return -EINVAL;
9679                 }
9680
9681                 if (new_plane_state->fb) {
9682                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9683                                                  new_plane_state->fb);
9684                         if (ret)
9685                                 return ret;
9686                 }
9687
9688                 return 0;
9689         }
9690
9691         needs_reset = should_reset_plane(state, plane, old_plane_state,
9692                                          new_plane_state);
9693
9694         /* Remove any changed/removed planes */
9695         if (!enable) {
9696                 if (!needs_reset)
9697                         return 0;
9698
9699                 if (!old_plane_crtc)
9700                         return 0;
9701
9702                 old_crtc_state = drm_atomic_get_old_crtc_state(
9703                                 state, old_plane_crtc);
9704                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9705
9706                 if (!dm_old_crtc_state->stream)
9707                         return 0;
9708
9709                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9710                                 plane->base.id, old_plane_crtc->base.id);
9711
9712                 ret = dm_atomic_get_state(state, &dm_state);
9713                 if (ret)
9714                         return ret;
9715
9716                 if (!dc_remove_plane_from_context(
9717                                 dc,
9718                                 dm_old_crtc_state->stream,
9719                                 dm_old_plane_state->dc_state,
9720                                 dm_state->context)) {
9721
9722                         return -EINVAL;
9723                 }
9724
9725
9726                 dc_plane_state_release(dm_old_plane_state->dc_state);
9727                 dm_new_plane_state->dc_state = NULL;
9728
9729                 *lock_and_validation_needed = true;
9730
9731         } else { /* Add new planes */
9732                 struct dc_plane_state *dc_new_plane_state;
9733
9734                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9735                         return 0;
9736
9737                 if (!new_plane_crtc)
9738                         return 0;
9739
9740                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9741                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9742
9743                 if (!dm_new_crtc_state->stream)
9744                         return 0;
9745
9746                 if (!needs_reset)
9747                         return 0;
9748
9749                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9750                 if (ret)
9751                         return ret;
9752
9753                 WARN_ON(dm_new_plane_state->dc_state);
9754
9755                 dc_new_plane_state = dc_create_plane_state(dc);
9756                 if (!dc_new_plane_state)
9757                         return -ENOMEM;
9758
9759                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9760                                  plane->base.id, new_plane_crtc->base.id);
9761
9762                 ret = fill_dc_plane_attributes(
9763                         drm_to_adev(new_plane_crtc->dev),
9764                         dc_new_plane_state,
9765                         new_plane_state,
9766                         new_crtc_state);
9767                 if (ret) {
9768                         dc_plane_state_release(dc_new_plane_state);
9769                         return ret;
9770                 }
9771
9772                 ret = dm_atomic_get_state(state, &dm_state);
9773                 if (ret) {
9774                         dc_plane_state_release(dc_new_plane_state);
9775                         return ret;
9776                 }
9777
9778                 /*
9779                  * Any atomic check errors that occur after this will
9780                  * not need a release. The plane state will be attached
9781                  * to the stream, and therefore part of the atomic
9782                  * state. It'll be released when the atomic state is
9783                  * cleaned.
9784                  */
9785                 if (!dc_add_plane_to_context(
9786                                 dc,
9787                                 dm_new_crtc_state->stream,
9788                                 dc_new_plane_state,
9789                                 dm_state->context)) {
9790
9791                         dc_plane_state_release(dc_new_plane_state);
9792                         return -EINVAL;
9793                 }
9794
9795                 dm_new_plane_state->dc_state = dc_new_plane_state;
9796
9797                 /* Tell DC to do a full surface update every time there
9798                  * is a plane change. Inefficient, but works for now.
9799                  */
9800                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9801
9802                 *lock_and_validation_needed = true;
9803         }
9804
9805
9806         return ret;
9807 }
9808
9809 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9810                                 struct drm_crtc *crtc,
9811                                 struct drm_crtc_state *new_crtc_state)
9812 {
9813         struct drm_plane_state *new_cursor_state, *new_primary_state;
9814         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9815
9816         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9817          * cursor per pipe but it's going to inherit the scaling and
9818          * positioning from the underlying pipe. Check the cursor plane's
9819          * blending properties match the primary plane's. */
9820
9821         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9822         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9823         if (!new_cursor_state || !new_primary_state ||
9824             !new_cursor_state->fb || !new_primary_state->fb) {
9825                 return 0;
9826         }
9827
9828         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9829                          (new_cursor_state->src_w >> 16);
9830         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9831                          (new_cursor_state->src_h >> 16);
9832
9833         primary_scale_w = new_primary_state->crtc_w * 1000 /
9834                          (new_primary_state->src_w >> 16);
9835         primary_scale_h = new_primary_state->crtc_h * 1000 /
9836                          (new_primary_state->src_h >> 16);
9837
9838         if (cursor_scale_w != primary_scale_w ||
9839             cursor_scale_h != primary_scale_h) {
9840                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9841                 return -EINVAL;
9842         }
9843
9844         return 0;
9845 }
9846
9847 #if defined(CONFIG_DRM_AMD_DC_DCN)
9848 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9849 {
9850         struct drm_connector *connector;
9851         struct drm_connector_state *conn_state;
9852         struct amdgpu_dm_connector *aconnector = NULL;
9853         int i;
9854         for_each_new_connector_in_state(state, connector, conn_state, i) {
9855                 if (conn_state->crtc != crtc)
9856                         continue;
9857
9858                 aconnector = to_amdgpu_dm_connector(connector);
9859                 if (!aconnector->port || !aconnector->mst_port)
9860                         aconnector = NULL;
9861                 else
9862                         break;
9863         }
9864
9865         if (!aconnector)
9866                 return 0;
9867
9868         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9869 }
9870 #endif
9871
9872 /**
9873  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9874  * @dev: The DRM device
9875  * @state: The atomic state to commit
9876  *
9877  * Validate that the given atomic state is programmable by DC into hardware.
9878  * This involves constructing a &struct dc_state reflecting the new hardware
9879  * state we wish to commit, then querying DC to see if it is programmable. It's
9880  * important not to modify the existing DC state. Otherwise, atomic_check
9881  * may unexpectedly commit hardware changes.
9882  *
9883  * When validating the DC state, it's important that the right locks are
9884  * acquired. For full updates case which removes/adds/updates streams on one
9885  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9886  * that any such full update commit will wait for completion of any outstanding
9887  * flip using DRMs synchronization events.
9888  *
9889  * Note that DM adds the affected connectors for all CRTCs in state, when that
9890  * might not seem necessary. This is because DC stream creation requires the
9891  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9892  * be possible but non-trivial - a possible TODO item.
9893  *
9894  * Return: -Error code if validation failed.
9895  */
9896 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9897                                   struct drm_atomic_state *state)
9898 {
9899         struct amdgpu_device *adev = drm_to_adev(dev);
9900         struct dm_atomic_state *dm_state = NULL;
9901         struct dc *dc = adev->dm.dc;
9902         struct drm_connector *connector;
9903         struct drm_connector_state *old_con_state, *new_con_state;
9904         struct drm_crtc *crtc;
9905         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9906         struct drm_plane *plane;
9907         struct drm_plane_state *old_plane_state, *new_plane_state;
9908         enum dc_status status;
9909         int ret, i;
9910         bool lock_and_validation_needed = false;
9911         struct dm_crtc_state *dm_old_crtc_state;
9912
9913         trace_amdgpu_dm_atomic_check_begin(state);
9914
9915         ret = drm_atomic_helper_check_modeset(dev, state);
9916         if (ret)
9917                 goto fail;
9918
9919         /* Check connector changes */
9920         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9921                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9922                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9923
9924                 /* Skip connectors that are disabled or part of modeset already. */
9925                 if (!old_con_state->crtc && !new_con_state->crtc)
9926                         continue;
9927
9928                 if (!new_con_state->crtc)
9929                         continue;
9930
9931                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9932                 if (IS_ERR(new_crtc_state)) {
9933                         ret = PTR_ERR(new_crtc_state);
9934                         goto fail;
9935                 }
9936
9937                 if (dm_old_con_state->abm_level !=
9938                     dm_new_con_state->abm_level)
9939                         new_crtc_state->connectors_changed = true;
9940         }
9941
9942 #if defined(CONFIG_DRM_AMD_DC_DCN)
9943         if (dc_resource_is_dsc_encoding_supported(dc)) {
9944                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9945                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9946                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9947                                 if (ret)
9948                                         goto fail;
9949                         }
9950                 }
9951         }
9952 #endif
9953         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9954                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9955
9956                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9957                     !new_crtc_state->color_mgmt_changed &&
9958                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9959                         dm_old_crtc_state->dsc_force_changed == false)
9960                         continue;
9961
9962                 if (!new_crtc_state->enable)
9963                         continue;
9964
9965                 ret = drm_atomic_add_affected_connectors(state, crtc);
9966                 if (ret)
9967                         return ret;
9968
9969                 ret = drm_atomic_add_affected_planes(state, crtc);
9970                 if (ret)
9971                         goto fail;
9972
9973                 if (dm_old_crtc_state->dsc_force_changed)
9974                         new_crtc_state->mode_changed = true;
9975         }
9976
9977         /*
9978          * Add all primary and overlay planes on the CRTC to the state
9979          * whenever a plane is enabled to maintain correct z-ordering
9980          * and to enable fast surface updates.
9981          */
9982         drm_for_each_crtc(crtc, dev) {
9983                 bool modified = false;
9984
9985                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9986                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9987                                 continue;
9988
9989                         if (new_plane_state->crtc == crtc ||
9990                             old_plane_state->crtc == crtc) {
9991                                 modified = true;
9992                                 break;
9993                         }
9994                 }
9995
9996                 if (!modified)
9997                         continue;
9998
9999                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10000                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10001                                 continue;
10002
10003                         new_plane_state =
10004                                 drm_atomic_get_plane_state(state, plane);
10005
10006                         if (IS_ERR(new_plane_state)) {
10007                                 ret = PTR_ERR(new_plane_state);
10008                                 goto fail;
10009                         }
10010                 }
10011         }
10012
10013         /* Remove exiting planes if they are modified */
10014         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10015                 ret = dm_update_plane_state(dc, state, plane,
10016                                             old_plane_state,
10017                                             new_plane_state,
10018                                             false,
10019                                             &lock_and_validation_needed);
10020                 if (ret)
10021                         goto fail;
10022         }
10023
10024         /* Disable all crtcs which require disable */
10025         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10026                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10027                                            old_crtc_state,
10028                                            new_crtc_state,
10029                                            false,
10030                                            &lock_and_validation_needed);
10031                 if (ret)
10032                         goto fail;
10033         }
10034
10035         /* Enable all crtcs which require enable */
10036         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10037                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10038                                            old_crtc_state,
10039                                            new_crtc_state,
10040                                            true,
10041                                            &lock_and_validation_needed);
10042                 if (ret)
10043                         goto fail;
10044         }
10045
10046         /* Add new/modified planes */
10047         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10048                 ret = dm_update_plane_state(dc, state, plane,
10049                                             old_plane_state,
10050                                             new_plane_state,
10051                                             true,
10052                                             &lock_and_validation_needed);
10053                 if (ret)
10054                         goto fail;
10055         }
10056
10057         /* Run this here since we want to validate the streams we created */
10058         ret = drm_atomic_helper_check_planes(dev, state);
10059         if (ret)
10060                 goto fail;
10061
10062         /* Check cursor planes scaling */
10063         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10064                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10065                 if (ret)
10066                         goto fail;
10067         }
10068
10069         if (state->legacy_cursor_update) {
10070                 /*
10071                  * This is a fast cursor update coming from the plane update
10072                  * helper, check if it can be done asynchronously for better
10073                  * performance.
10074                  */
10075                 state->async_update =
10076                         !drm_atomic_helper_async_check(dev, state);
10077
10078                 /*
10079                  * Skip the remaining global validation if this is an async
10080                  * update. Cursor updates can be done without affecting
10081                  * state or bandwidth calcs and this avoids the performance
10082                  * penalty of locking the private state object and
10083                  * allocating a new dc_state.
10084                  */
10085                 if (state->async_update)
10086                         return 0;
10087         }
10088
10089         /* Check scaling and underscan changes*/
10090         /* TODO Removed scaling changes validation due to inability to commit
10091          * new stream into context w\o causing full reset. Need to
10092          * decide how to handle.
10093          */
10094         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10095                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10096                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10097                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10098
10099                 /* Skip any modesets/resets */
10100                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10101                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10102                         continue;
10103
10104                 /* Skip any thing not scale or underscan changes */
10105                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10106                         continue;
10107
10108                 lock_and_validation_needed = true;
10109         }
10110
10111         /**
10112          * Streams and planes are reset when there are changes that affect
10113          * bandwidth. Anything that affects bandwidth needs to go through
10114          * DC global validation to ensure that the configuration can be applied
10115          * to hardware.
10116          *
10117          * We have to currently stall out here in atomic_check for outstanding
10118          * commits to finish in this case because our IRQ handlers reference
10119          * DRM state directly - we can end up disabling interrupts too early
10120          * if we don't.
10121          *
10122          * TODO: Remove this stall and drop DM state private objects.
10123          */
10124         if (lock_and_validation_needed) {
10125                 ret = dm_atomic_get_state(state, &dm_state);
10126                 if (ret)
10127                         goto fail;
10128
10129                 ret = do_aquire_global_lock(dev, state);
10130                 if (ret)
10131                         goto fail;
10132
10133 #if defined(CONFIG_DRM_AMD_DC_DCN)
10134                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10135                         goto fail;
10136
10137                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10138                 if (ret)
10139                         goto fail;
10140 #endif
10141
10142                 /*
10143                  * Perform validation of MST topology in the state:
10144                  * We need to perform MST atomic check before calling
10145                  * dc_validate_global_state(), or there is a chance
10146                  * to get stuck in an infinite loop and hang eventually.
10147                  */
10148                 ret = drm_dp_mst_atomic_check(state);
10149                 if (ret)
10150                         goto fail;
10151                 status = dc_validate_global_state(dc, dm_state->context, false);
10152                 if (status != DC_OK) {
10153                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10154                                        dc_status_to_str(status), status);
10155                         ret = -EINVAL;
10156                         goto fail;
10157                 }
10158         } else {
10159                 /*
10160                  * The commit is a fast update. Fast updates shouldn't change
10161                  * the DC context, affect global validation, and can have their
10162                  * commit work done in parallel with other commits not touching
10163                  * the same resource. If we have a new DC context as part of
10164                  * the DM atomic state from validation we need to free it and
10165                  * retain the existing one instead.
10166                  *
10167                  * Furthermore, since the DM atomic state only contains the DC
10168                  * context and can safely be annulled, we can free the state
10169                  * and clear the associated private object now to free
10170                  * some memory and avoid a possible use-after-free later.
10171                  */
10172
10173                 for (i = 0; i < state->num_private_objs; i++) {
10174                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10175
10176                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10177                                 int j = state->num_private_objs-1;
10178
10179                                 dm_atomic_destroy_state(obj,
10180                                                 state->private_objs[i].state);
10181
10182                                 /* If i is not at the end of the array then the
10183                                  * last element needs to be moved to where i was
10184                                  * before the array can safely be truncated.
10185                                  */
10186                                 if (i != j)
10187                                         state->private_objs[i] =
10188                                                 state->private_objs[j];
10189
10190                                 state->private_objs[j].ptr = NULL;
10191                                 state->private_objs[j].state = NULL;
10192                                 state->private_objs[j].old_state = NULL;
10193                                 state->private_objs[j].new_state = NULL;
10194
10195                                 state->num_private_objs = j;
10196                                 break;
10197                         }
10198                 }
10199         }
10200
10201         /* Store the overall update type for use later in atomic check. */
10202         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10203                 struct dm_crtc_state *dm_new_crtc_state =
10204                         to_dm_crtc_state(new_crtc_state);
10205
10206                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10207                                                          UPDATE_TYPE_FULL :
10208                                                          UPDATE_TYPE_FAST;
10209         }
10210
10211         /* Must be success */
10212         WARN_ON(ret);
10213
10214         trace_amdgpu_dm_atomic_check_finish(state, ret);
10215
10216         return ret;
10217
10218 fail:
10219         if (ret == -EDEADLK)
10220                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10221         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10222                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10223         else
10224                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10225
10226         trace_amdgpu_dm_atomic_check_finish(state, ret);
10227
10228         return ret;
10229 }
10230
10231 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10232                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10233 {
10234         uint8_t dpcd_data;
10235         bool capable = false;
10236
10237         if (amdgpu_dm_connector->dc_link &&
10238                 dm_helpers_dp_read_dpcd(
10239                                 NULL,
10240                                 amdgpu_dm_connector->dc_link,
10241                                 DP_DOWN_STREAM_PORT_COUNT,
10242                                 &dpcd_data,
10243                                 sizeof(dpcd_data))) {
10244                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10245         }
10246
10247         return capable;
10248 }
10249
10250 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10251                 uint8_t *edid_ext, int len,
10252                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10253 {
10254         int i;
10255         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10256         struct dc *dc = adev->dm.dc;
10257
10258         /* send extension block to DMCU for parsing */
10259         for (i = 0; i < len; i += 8) {
10260                 bool res;
10261                 int offset;
10262
10263                 /* send 8 bytes a time */
10264                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10265                         return false;
10266
10267                 if (i+8 == len) {
10268                         /* EDID block sent completed, expect result */
10269                         int version, min_rate, max_rate;
10270
10271                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10272                         if (res) {
10273                                 /* amd vsdb found */
10274                                 vsdb_info->freesync_supported = 1;
10275                                 vsdb_info->amd_vsdb_version = version;
10276                                 vsdb_info->min_refresh_rate_hz = min_rate;
10277                                 vsdb_info->max_refresh_rate_hz = max_rate;
10278                                 return true;
10279                         }
10280                         /* not amd vsdb */
10281                         return false;
10282                 }
10283
10284                 /* check for ack*/
10285                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10286                 if (!res)
10287                         return false;
10288         }
10289
10290         return false;
10291 }
10292
10293 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10294                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10295 {
10296         uint8_t *edid_ext = NULL;
10297         int i;
10298         bool valid_vsdb_found = false;
10299
10300         /*----- drm_find_cea_extension() -----*/
10301         /* No EDID or EDID extensions */
10302         if (edid == NULL || edid->extensions == 0)
10303                 return -ENODEV;
10304
10305         /* Find CEA extension */
10306         for (i = 0; i < edid->extensions; i++) {
10307                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10308                 if (edid_ext[0] == CEA_EXT)
10309                         break;
10310         }
10311
10312         if (i == edid->extensions)
10313                 return -ENODEV;
10314
10315         /*----- cea_db_offsets() -----*/
10316         if (edid_ext[0] != CEA_EXT)
10317                 return -ENODEV;
10318
10319         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10320
10321         return valid_vsdb_found ? i : -ENODEV;
10322 }
10323
10324 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10325                                         struct edid *edid)
10326 {
10327         int i = 0;
10328         struct detailed_timing *timing;
10329         struct detailed_non_pixel *data;
10330         struct detailed_data_monitor_range *range;
10331         struct amdgpu_dm_connector *amdgpu_dm_connector =
10332                         to_amdgpu_dm_connector(connector);
10333         struct dm_connector_state *dm_con_state = NULL;
10334
10335         struct drm_device *dev = connector->dev;
10336         struct amdgpu_device *adev = drm_to_adev(dev);
10337         bool freesync_capable = false;
10338         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10339
10340         if (!connector->state) {
10341                 DRM_ERROR("%s - Connector has no state", __func__);
10342                 goto update;
10343         }
10344
10345         if (!edid) {
10346                 dm_con_state = to_dm_connector_state(connector->state);
10347
10348                 amdgpu_dm_connector->min_vfreq = 0;
10349                 amdgpu_dm_connector->max_vfreq = 0;
10350                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10351
10352                 goto update;
10353         }
10354
10355         dm_con_state = to_dm_connector_state(connector->state);
10356
10357         if (!amdgpu_dm_connector->dc_sink) {
10358                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10359                 goto update;
10360         }
10361         if (!adev->dm.freesync_module)
10362                 goto update;
10363
10364
10365         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10366                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10367                 bool edid_check_required = false;
10368
10369                 if (edid) {
10370                         edid_check_required = is_dp_capable_without_timing_msa(
10371                                                 adev->dm.dc,
10372                                                 amdgpu_dm_connector);
10373                 }
10374
10375                 if (edid_check_required == true && (edid->version > 1 ||
10376                    (edid->version == 1 && edid->revision > 1))) {
10377                         for (i = 0; i < 4; i++) {
10378
10379                                 timing  = &edid->detailed_timings[i];
10380                                 data    = &timing->data.other_data;
10381                                 range   = &data->data.range;
10382                                 /*
10383                                  * Check if monitor has continuous frequency mode
10384                                  */
10385                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10386                                         continue;
10387                                 /*
10388                                  * Check for flag range limits only. If flag == 1 then
10389                                  * no additional timing information provided.
10390                                  * Default GTF, GTF Secondary curve and CVT are not
10391                                  * supported
10392                                  */
10393                                 if (range->flags != 1)
10394                                         continue;
10395
10396                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10397                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10398                                 amdgpu_dm_connector->pixel_clock_mhz =
10399                                         range->pixel_clock_mhz * 10;
10400
10401                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10402                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10403
10404                                 break;
10405                         }
10406
10407                         if (amdgpu_dm_connector->max_vfreq -
10408                             amdgpu_dm_connector->min_vfreq > 10) {
10409
10410                                 freesync_capable = true;
10411                         }
10412                 }
10413         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10414                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10415                 if (i >= 0 && vsdb_info.freesync_supported) {
10416                         timing  = &edid->detailed_timings[i];
10417                         data    = &timing->data.other_data;
10418
10419                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10420                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10421                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10422                                 freesync_capable = true;
10423
10424                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10425                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10426                 }
10427         }
10428
10429 update:
10430         if (dm_con_state)
10431                 dm_con_state->freesync_capable = freesync_capable;
10432
10433         if (connector->vrr_capable_property)
10434                 drm_connector_set_vrr_capable_property(connector,
10435                                                        freesync_capable);
10436 }
10437
10438 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10439 {
10440         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10441
10442         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10443                 return;
10444         if (link->type == dc_connection_none)
10445                 return;
10446         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10447                                         dpcd_data, sizeof(dpcd_data))) {
10448                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10449
10450                 if (dpcd_data[0] == 0) {
10451                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10452                         link->psr_settings.psr_feature_enabled = false;
10453                 } else {
10454                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10455                         link->psr_settings.psr_feature_enabled = true;
10456                 }
10457
10458                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10459         }
10460 }
10461
10462 /*
10463  * amdgpu_dm_link_setup_psr() - configure psr link
10464  * @stream: stream state
10465  *
10466  * Return: true if success
10467  */
10468 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10469 {
10470         struct dc_link *link = NULL;
10471         struct psr_config psr_config = {0};
10472         struct psr_context psr_context = {0};
10473         bool ret = false;
10474
10475         if (stream == NULL)
10476                 return false;
10477
10478         link = stream->link;
10479
10480         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10481
10482         if (psr_config.psr_version > 0) {
10483                 psr_config.psr_exit_link_training_required = 0x1;
10484                 psr_config.psr_frame_capture_indication_req = 0;
10485                 psr_config.psr_rfb_setup_time = 0x37;
10486                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10487                 psr_config.allow_smu_optimizations = 0x0;
10488
10489                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10490
10491         }
10492         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10493
10494         return ret;
10495 }
10496
10497 /*
10498  * amdgpu_dm_psr_enable() - enable psr f/w
10499  * @stream: stream state
10500  *
10501  * Return: true if success
10502  */
10503 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10504 {
10505         struct dc_link *link = stream->link;
10506         unsigned int vsync_rate_hz = 0;
10507         struct dc_static_screen_params params = {0};
10508         /* Calculate number of static frames before generating interrupt to
10509          * enter PSR.
10510          */
10511         // Init fail safe of 2 frames static
10512         unsigned int num_frames_static = 2;
10513
10514         DRM_DEBUG_DRIVER("Enabling psr...\n");
10515
10516         vsync_rate_hz = div64_u64(div64_u64((
10517                         stream->timing.pix_clk_100hz * 100),
10518                         stream->timing.v_total),
10519                         stream->timing.h_total);
10520
10521         /* Round up
10522          * Calculate number of frames such that at least 30 ms of time has
10523          * passed.
10524          */
10525         if (vsync_rate_hz != 0) {
10526                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10527                 num_frames_static = (30000 / frame_time_microsec) + 1;
10528         }
10529
10530         params.triggers.cursor_update = true;
10531         params.triggers.overlay_update = true;
10532         params.triggers.surface_update = true;
10533         params.num_frames = num_frames_static;
10534
10535         dc_stream_set_static_screen_params(link->ctx->dc,
10536                                            &stream, 1,
10537                                            &params);
10538
10539         return dc_link_set_psr_allow_active(link, true, false, false);
10540 }
10541
10542 /*
10543  * amdgpu_dm_psr_disable() - disable psr f/w
10544  * @stream:  stream state
10545  *
10546  * Return: true if success
10547  */
10548 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10549 {
10550
10551         DRM_DEBUG_DRIVER("Disabling psr...\n");
10552
10553         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10554 }
10555
10556 /*
10557  * amdgpu_dm_psr_disable() - disable psr f/w
10558  * if psr is enabled on any stream
10559  *
10560  * Return: true if success
10561  */
10562 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10563 {
10564         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10565         return dc_set_psr_allow_active(dm->dc, false);
10566 }
10567
10568 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10569 {
10570         struct amdgpu_device *adev = drm_to_adev(dev);
10571         struct dc *dc = adev->dm.dc;
10572         int i;
10573
10574         mutex_lock(&adev->dm.dc_lock);
10575         if (dc->current_state) {
10576                 for (i = 0; i < dc->current_state->stream_count; ++i)
10577                         dc->current_state->streams[i]
10578                                 ->triggered_crtc_reset.enabled =
10579                                 adev->dm.force_timing_sync;
10580
10581                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10582                 dc_trigger_sync(dc, dc->current_state);
10583         }
10584         mutex_unlock(&adev->dm.dc_lock);
10585 }
10586
10587 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10588                        uint32_t value, const char *func_name)
10589 {
10590 #ifdef DM_CHECK_ADDR_0
10591         if (address == 0) {
10592                 DC_ERR("invalid register write. address = 0");
10593                 return;
10594         }
10595 #endif
10596         cgs_write_register(ctx->cgs_device, address, value);
10597         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10598 }
10599
10600 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10601                           const char *func_name)
10602 {
10603         uint32_t value;
10604 #ifdef DM_CHECK_ADDR_0
10605         if (address == 0) {
10606                 DC_ERR("invalid register read; address = 0\n");
10607                 return 0;
10608         }
10609 #endif
10610
10611         if (ctx->dmub_srv &&
10612             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10613             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10614                 ASSERT(false);
10615                 return 0;
10616         }
10617
10618         value = cgs_read_register(ctx->cgs_device, address);
10619
10620         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10621
10622         return value;
10623 }