225a92ebb1afcb6cfc5c3cf00ea8d5be42905fd8
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137         switch (link->dpcd_caps.dongle_type) {
138         case DISPLAY_DONGLE_NONE:
139                 return DRM_MODE_SUBCONNECTOR_Native;
140         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141                 return DRM_MODE_SUBCONNECTOR_VGA;
142         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143         case DISPLAY_DONGLE_DP_DVI_DONGLE:
144                 return DRM_MODE_SUBCONNECTOR_DVID;
145         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147                 return DRM_MODE_SUBCONNECTOR_HDMIA;
148         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149         default:
150                 return DRM_MODE_SUBCONNECTOR_Unknown;
151         }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_link *link = aconnector->dc_link;
157         struct drm_connector *connector = &aconnector->base;
158         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161                 return;
162
163         if (aconnector->dc_sink)
164                 subconnector = get_subconnector_type(link);
165
166         drm_object_property_set_value(&connector->base,
167                         connector->dev->mode_config.dp_subconnector_property,
168                         subconnector);
169 }
170
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183                                 struct drm_plane *plane,
184                                 unsigned long possible_crtcs,
185                                 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187                                struct drm_plane *plane,
188                                uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
191                                     uint32_t link_index,
192                                     struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194                                   struct amdgpu_encoder *aencoder,
195                                   uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202                                   struct drm_atomic_state *state);
203
204 static void handle_cursor_update(struct drm_plane *plane,
205                                  struct drm_plane_state *old_plane_state);
206
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218                                  struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234         if (crtc >= adev->mode_info.num_crtc)
235                 return 0;
236         else {
237                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238
239                 if (acrtc->dm_irq_params.stream == NULL) {
240                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241                                   crtc);
242                         return 0;
243                 }
244
245                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246         }
247 }
248
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250                                   u32 *vbl, u32 *position)
251 {
252         uint32_t v_blank_start, v_blank_end, h_position, v_position;
253
254         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255                 return -EINVAL;
256         else {
257                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
259                 if (acrtc->dm_irq_params.stream ==  NULL) {
260                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261                                   crtc);
262                         return 0;
263                 }
264
265                 /*
266                  * TODO rework base driver to use values directly.
267                  * for now parse it back into reg-format
268                  */
269                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270                                          &v_blank_start,
271                                          &v_blank_end,
272                                          &h_position,
273                                          &v_position);
274
275                 *position = v_position | (h_position << 16);
276                 *vbl = v_blank_start | (v_blank_end << 16);
277         }
278
279         return 0;
280 }
281
282 static bool dm_is_idle(void *handle)
283 {
284         /* XXX todo */
285         return true;
286 }
287
288 static int dm_wait_for_idle(void *handle)
289 {
290         /* XXX todo */
291         return 0;
292 }
293
294 static bool dm_check_soft_reset(void *handle)
295 {
296         return false;
297 }
298
299 static int dm_soft_reset(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307                      int otg_inst)
308 {
309         struct drm_device *dev = adev_to_drm(adev);
310         struct drm_crtc *crtc;
311         struct amdgpu_crtc *amdgpu_crtc;
312
313         if (otg_inst == -1) {
314                 WARN_ON(1);
315                 return adev->mode_info.crtcs[0];
316         }
317
318         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319                 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321                 if (amdgpu_crtc->otg_inst == otg_inst)
322                         return amdgpu_crtc;
323         }
324
325         return NULL;
326 }
327
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330         return acrtc->dm_irq_params.freesync_config.state ==
331                        VRR_STATE_ACTIVE_VARIABLE ||
332                acrtc->dm_irq_params.freesync_config.state ==
333                        VRR_STATE_ACTIVE_FIXED;
334 }
335
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343                                               struct dm_crtc_state *new_state)
344 {
345         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346                 return true;
347         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348                 return true;
349         else
350                 return false;
351 }
352
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362         struct amdgpu_crtc *amdgpu_crtc;
363         struct common_irq_params *irq_params = interrupt_params;
364         struct amdgpu_device *adev = irq_params->adev;
365         unsigned long flags;
366         struct drm_pending_vblank_event *e;
367         uint32_t vpos, hpos, v_blank_start, v_blank_end;
368         bool vrr_active;
369
370         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372         /* IRQ could occur when in initial stage */
373         /* TODO work and BO cleanup */
374         if (amdgpu_crtc == NULL) {
375                 DC_LOG_PFLIP("CRTC is null, returning.\n");
376                 return;
377         }
378
379         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383                                                  amdgpu_crtc->pflip_status,
384                                                  AMDGPU_FLIP_SUBMITTED,
385                                                  amdgpu_crtc->crtc_id,
386                                                  amdgpu_crtc);
387                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388                 return;
389         }
390
391         /* page flip completed. */
392         e = amdgpu_crtc->event;
393         amdgpu_crtc->event = NULL;
394
395         if (!e)
396                 WARN_ON(1);
397
398         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399
400         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401         if (!vrr_active ||
402             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403                                       &v_blank_end, &hpos, &vpos) ||
404             (vpos < v_blank_start)) {
405                 /* Update to correct count and vblank timestamp if racing with
406                  * vblank irq. This also updates to the correct vblank timestamp
407                  * even in VRR mode, as scanout is past the front-porch atm.
408                  */
409                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410
411                 /* Wake up userspace by sending the pageflip event with proper
412                  * count and timestamp of vblank of flip completion.
413                  */
414                 if (e) {
415                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416
417                         /* Event sent, so done with vblank for this flip */
418                         drm_crtc_vblank_put(&amdgpu_crtc->base);
419                 }
420         } else if (e) {
421                 /* VRR active and inside front-porch: vblank count and
422                  * timestamp for pageflip event will only be up to date after
423                  * drm_crtc_handle_vblank() has been executed from late vblank
424                  * irq handler after start of back-porch (vline 0). We queue the
425                  * pageflip event for send-out by drm_crtc_handle_vblank() with
426                  * updated timestamp and count, once it runs after us.
427                  *
428                  * We need to open-code this instead of using the helper
429                  * drm_crtc_arm_vblank_event(), as that helper would
430                  * call drm_crtc_accurate_vblank_count(), which we must
431                  * not call in VRR mode while we are in front-porch!
432                  */
433
434                 /* sequence will be replaced by real count during send-out. */
435                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436                 e->pipe = amdgpu_crtc->crtc_id;
437
438                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439                 e = NULL;
440         }
441
442         /* Keep track of vblank of this flip for flip throttling. We use the
443          * cooked hw counter, as that one incremented at start of this vblank
444          * of pageflip completion, so last_flip_vblank is the forbidden count
445          * for queueing new pageflips if vsync + VRR is enabled.
446          */
447         amdgpu_crtc->dm_irq_params.last_flip_vblank =
448                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449
450         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452
453         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454                      amdgpu_crtc->crtc_id, amdgpu_crtc,
455                      vrr_active, (int) !e);
456 }
457
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460         struct common_irq_params *irq_params = interrupt_params;
461         struct amdgpu_device *adev = irq_params->adev;
462         struct amdgpu_crtc *acrtc;
463         struct drm_device *drm_dev;
464         struct drm_vblank_crtc *vblank;
465         ktime_t frame_duration_ns, previous_timestamp;
466         unsigned long flags;
467         int vrr_active;
468
469         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470
471         if (acrtc) {
472                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473                 drm_dev = acrtc->base.dev;
474                 vblank = &drm_dev->vblank[acrtc->base.index];
475                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476                 frame_duration_ns = vblank->time - previous_timestamp;
477
478                 if (frame_duration_ns > 0) {
479                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
480                                                 frame_duration_ns,
481                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
483                 }
484
485                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486                               acrtc->crtc_id,
487                               vrr_active);
488
489                 /* Core vblank handling is done here after end of front-porch in
490                  * vrr mode, as vblank timestamping will give valid results
491                  * while now done after front-porch. This will also deliver
492                  * page-flip completion events that have been queued to us
493                  * if a pageflip happened inside front-porch.
494                  */
495                 if (vrr_active) {
496                         drm_crtc_handle_vblank(&acrtc->base);
497
498                         /* BTR processing for pre-DCE12 ASICs */
499                         if (acrtc->dm_irq_params.stream &&
500                             adev->family < AMDGPU_FAMILY_AI) {
501                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502                                 mod_freesync_handle_v_update(
503                                     adev->dm.freesync_module,
504                                     acrtc->dm_irq_params.stream,
505                                     &acrtc->dm_irq_params.vrr_params);
506
507                                 dc_stream_adjust_vmin_vmax(
508                                     adev->dm.dc,
509                                     acrtc->dm_irq_params.stream,
510                                     &acrtc->dm_irq_params.vrr_params.adjust);
511                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512                         }
513                 }
514         }
515 }
516
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526         struct common_irq_params *irq_params = interrupt_params;
527         struct amdgpu_device *adev = irq_params->adev;
528         struct amdgpu_crtc *acrtc;
529         unsigned long flags;
530         int vrr_active;
531
532         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533         if (!acrtc)
534                 return;
535
536         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537
538         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539                       vrr_active, acrtc->dm_irq_params.active_planes);
540
541         /**
542          * Core vblank handling at start of front-porch is only possible
543          * in non-vrr mode, as only there vblank timestamping will give
544          * valid results while done in front-porch. Otherwise defer it
545          * to dm_vupdate_high_irq after end of front-porch.
546          */
547         if (!vrr_active)
548                 drm_crtc_handle_vblank(&acrtc->base);
549
550         /**
551          * Following stuff must happen at start of vblank, for crc
552          * computation and below-the-range btr support in vrr mode.
553          */
554         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555
556         /* BTR updates need to happen before VUPDATE on Vega and above. */
557         if (adev->family < AMDGPU_FAMILY_AI)
558                 return;
559
560         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561
562         if (acrtc->dm_irq_params.stream &&
563             acrtc->dm_irq_params.vrr_params.supported &&
564             acrtc->dm_irq_params.freesync_config.state ==
565                     VRR_STATE_ACTIVE_VARIABLE) {
566                 mod_freesync_handle_v_update(adev->dm.freesync_module,
567                                              acrtc->dm_irq_params.stream,
568                                              &acrtc->dm_irq_params.vrr_params);
569
570                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571                                            &acrtc->dm_irq_params.vrr_params.adjust);
572         }
573
574         /*
575          * If there aren't any active_planes then DCH HUBP may be clock-gated.
576          * In that case, pageflip completion interrupts won't fire and pageflip
577          * completion events won't get delivered. Prevent this by sending
578          * pending pageflip events from here if a flip is still pending.
579          *
580          * If any planes are enabled, use dm_pflip_high_irq() instead, to
581          * avoid race conditions between flip programming and completion,
582          * which could cause too early flip completion events.
583          */
584         if (adev->family >= AMDGPU_FAMILY_RV &&
585             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586             acrtc->dm_irq_params.active_planes == 0) {
587                 if (acrtc->event) {
588                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589                         acrtc->event = NULL;
590                         drm_crtc_vblank_put(&acrtc->base);
591                 }
592                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
593         }
594
595         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609         struct common_irq_params *irq_params = interrupt_params;
610         struct amdgpu_device *adev = irq_params->adev;
611         struct amdgpu_crtc *acrtc;
612
613         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614
615         if (!acrtc)
616                 return;
617
618         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622
623 static int dm_set_clockgating_state(void *handle,
624                   enum amd_clockgating_state state)
625 {
626         return 0;
627 }
628
629 static int dm_set_powergating_state(void *handle,
630                   enum amd_powergating_state state)
631 {
632         return 0;
633 }
634
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641         struct drm_device *dev = connector->dev;
642         struct amdgpu_device *adev = drm_to_adev(dev);
643         struct dm_compressor_info *compressor = &adev->dm.compressor;
644         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645         struct drm_display_mode *mode;
646         unsigned long max_size = 0;
647
648         if (adev->dm.dc->fbc_compressor == NULL)
649                 return;
650
651         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652                 return;
653
654         if (compressor->bo_ptr)
655                 return;
656
657
658         list_for_each_entry(mode, &connector->modes, head) {
659                 if (max_size < mode->htotal * mode->vtotal)
660                         max_size = mode->htotal * mode->vtotal;
661         }
662
663         if (max_size) {
664                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666                             &compressor->gpu_addr, &compressor->cpu_addr);
667
668                 if (r)
669                         DRM_ERROR("DM: Failed to initialize FBC\n");
670                 else {
671                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673                 }
674
675         }
676
677 }
678
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680                                           int pipe, bool *enabled,
681                                           unsigned char *buf, int max_bytes)
682 {
683         struct drm_device *dev = dev_get_drvdata(kdev);
684         struct amdgpu_device *adev = drm_to_adev(dev);
685         struct drm_connector *connector;
686         struct drm_connector_list_iter conn_iter;
687         struct amdgpu_dm_connector *aconnector;
688         int ret = 0;
689
690         *enabled = false;
691
692         mutex_lock(&adev->dm.audio_lock);
693
694         drm_connector_list_iter_begin(dev, &conn_iter);
695         drm_for_each_connector_iter(connector, &conn_iter) {
696                 aconnector = to_amdgpu_dm_connector(connector);
697                 if (aconnector->audio_inst != port)
698                         continue;
699
700                 *enabled = true;
701                 ret = drm_eld_size(connector->eld);
702                 memcpy(buf, connector->eld, min(max_bytes, ret));
703
704                 break;
705         }
706         drm_connector_list_iter_end(&conn_iter);
707
708         mutex_unlock(&adev->dm.audio_lock);
709
710         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711
712         return ret;
713 }
714
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716         .get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720                                        struct device *hda_kdev, void *data)
721 {
722         struct drm_device *dev = dev_get_drvdata(kdev);
723         struct amdgpu_device *adev = drm_to_adev(dev);
724         struct drm_audio_component *acomp = data;
725
726         acomp->ops = &amdgpu_dm_audio_component_ops;
727         acomp->dev = kdev;
728         adev->dm.audio_component = acomp;
729
730         return 0;
731 }
732
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734                                           struct device *hda_kdev, void *data)
735 {
736         struct drm_device *dev = dev_get_drvdata(kdev);
737         struct amdgpu_device *adev = drm_to_adev(dev);
738         struct drm_audio_component *acomp = data;
739
740         acomp->ops = NULL;
741         acomp->dev = NULL;
742         adev->dm.audio_component = NULL;
743 }
744
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746         .bind   = amdgpu_dm_audio_component_bind,
747         .unbind = amdgpu_dm_audio_component_unbind,
748 };
749
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752         int i, ret;
753
754         if (!amdgpu_audio)
755                 return 0;
756
757         adev->mode_info.audio.enabled = true;
758
759         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760
761         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762                 adev->mode_info.audio.pin[i].channels = -1;
763                 adev->mode_info.audio.pin[i].rate = -1;
764                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765                 adev->mode_info.audio.pin[i].status_bits = 0;
766                 adev->mode_info.audio.pin[i].category_code = 0;
767                 adev->mode_info.audio.pin[i].connected = false;
768                 adev->mode_info.audio.pin[i].id =
769                         adev->dm.dc->res_pool->audios[i]->inst;
770                 adev->mode_info.audio.pin[i].offset = 0;
771         }
772
773         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774         if (ret < 0)
775                 return ret;
776
777         adev->dm.audio_registered = true;
778
779         return 0;
780 }
781
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784         if (!amdgpu_audio)
785                 return;
786
787         if (!adev->mode_info.audio.enabled)
788                 return;
789
790         if (adev->dm.audio_registered) {
791                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792                 adev->dm.audio_registered = false;
793         }
794
795         /* TODO: Disable audio? */
796
797         adev->mode_info.audio.enabled = false;
798 }
799
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802         struct drm_audio_component *acomp = adev->dm.audio_component;
803
804         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806
807                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808                                                  pin, -1);
809         }
810 }
811
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814         const struct dmcub_firmware_header_v1_0 *hdr;
815         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817         const struct firmware *dmub_fw = adev->dm.dmub_fw;
818         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819         struct abm *abm = adev->dm.dc->res_pool->abm;
820         struct dmub_srv_hw_params hw_params;
821         enum dmub_status status;
822         const unsigned char *fw_inst_const, *fw_bss_data;
823         uint32_t i, fw_inst_const_size, fw_bss_data_size;
824         bool has_hw_support;
825
826         if (!dmub_srv)
827                 /* DMUB isn't supported on the ASIC. */
828                 return 0;
829
830         if (!fb_info) {
831                 DRM_ERROR("No framebuffer info for DMUB service.\n");
832                 return -EINVAL;
833         }
834
835         if (!dmub_fw) {
836                 /* Firmware required for DMUB support. */
837                 DRM_ERROR("No firmware provided for DMUB.\n");
838                 return -EINVAL;
839         }
840
841         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842         if (status != DMUB_STATUS_OK) {
843                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844                 return -EINVAL;
845         }
846
847         if (!has_hw_support) {
848                 DRM_INFO("DMUB unsupported on ASIC\n");
849                 return 0;
850         }
851
852         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853
854         fw_inst_const = dmub_fw->data +
855                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856                         PSP_HEADER_BYTES;
857
858         fw_bss_data = dmub_fw->data +
859                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860                       le32_to_cpu(hdr->inst_const_bytes);
861
862         /* Copy firmware and bios info into FB memory. */
863         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865
866         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867
868         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869          * amdgpu_ucode_init_single_fw will load dmub firmware
870          * fw_inst_const part to cw0; otherwise, the firmware back door load
871          * will be done by dm_dmub_hw_init
872          */
873         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875                                 fw_inst_const_size);
876         }
877
878         if (fw_bss_data_size)
879                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880                        fw_bss_data, fw_bss_data_size);
881
882         /* Copy firmware bios info into FB memory. */
883         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884                adev->bios_size);
885
886         /* Reset regions that need to be reset. */
887         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889
890         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892
893         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895
896         /* Initialize hardware. */
897         memset(&hw_params, 0, sizeof(hw_params));
898         hw_params.fb_base = adev->gmc.fb_start;
899         hw_params.fb_offset = adev->gmc.aper_base;
900
901         /* backdoor load firmware and trigger dmub running */
902         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903                 hw_params.load_inst_const = true;
904
905         if (dmcu)
906                 hw_params.psp_version = dmcu->psp_version;
907
908         for (i = 0; i < fb_info->num_fb; ++i)
909                 hw_params.fb[i] = &fb_info->fb[i];
910
911         status = dmub_srv_hw_init(dmub_srv, &hw_params);
912         if (status != DMUB_STATUS_OK) {
913                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914                 return -EINVAL;
915         }
916
917         /* Wait for firmware load to finish. */
918         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919         if (status != DMUB_STATUS_OK)
920                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921
922         /* Init DMCU and ABM if available. */
923         if (dmcu && abm) {
924                 dmcu->funcs->dmcu_init(dmcu);
925                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926         }
927
928         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929         if (!adev->dm.dc->ctx->dmub_srv) {
930                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931                 return -ENOMEM;
932         }
933
934         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935                  adev->dm.dmcub_fw_version);
936
937         return 0;
938 }
939
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
943 {
944         struct common_irq_params *irq_params = interrupt_params;
945         struct amdgpu_device *adev = irq_params->adev;
946         struct amdgpu_display_manager *dm = &adev->dm;
947         struct dmcub_trace_buf_entry entry = { 0 };
948         uint32_t count = 0;
949
950         do {
951                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953                                                         entry.param0, entry.param1);
954
955                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
957                 } else
958                         break;
959
960                 count++;
961
962         } while (count <= DMUB_TRACE_MAX_READ);
963
964         ASSERT(count <= DMUB_TRACE_MAX_READ);
965 }
966
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969         uint64_t pt_base;
970         uint32_t logical_addr_low;
971         uint32_t logical_addr_high;
972         uint32_t agp_base, agp_bot, agp_top;
973         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974
975         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977
978         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979                 /*
980                  * Raven2 has a HW issue that it is unable to use the vram which
981                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982                  * workaround that increase system aperture high address (add 1)
983                  * to get rid of the VM fault and hardware hang.
984                  */
985                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986         else
987                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988
989         agp_base = 0;
990         agp_bot = adev->gmc.agp_start >> 24;
991         agp_top = adev->gmc.agp_end >> 24;
992
993
994         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999         page_table_base.low_part = lower_32_bits(pt_base);
1000
1001         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003
1004         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007
1008         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011
1012         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015
1016         pa_config->is_hvm_enabled = 0;
1017
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023
1024         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025         struct amdgpu_display_manager *dm = vblank_work->dm;
1026
1027         mutex_lock(&dm->dc_lock);
1028
1029         if (vblank_work->enable)
1030                 dm->active_vblank_irq_count++;
1031         else if(dm->active_vblank_irq_count)
1032                 dm->active_vblank_irq_count--;
1033
1034         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035
1036         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037
1038         mutex_unlock(&dm->dc_lock);
1039 }
1040
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043
1044         int max_caps = dc->caps.max_links;
1045         struct vblank_workqueue *vblank_work;
1046         int i = 0;
1047
1048         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049         if (ZERO_OR_NULL_PTR(vblank_work)) {
1050                 kfree(vblank_work);
1051                 return NULL;
1052         }
1053
1054         for (i = 0; i < max_caps; i++)
1055                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056
1057         return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062         struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064         struct dc_callback_init init_params;
1065 #endif
1066         int r;
1067
1068         adev->dm.ddev = adev_to_drm(adev);
1069         adev->dm.adev = adev;
1070
1071         /* Zero all the fields */
1072         memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074         memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076
1077         mutex_init(&adev->dm.dc_lock);
1078         mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080         spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082
1083         if(amdgpu_dm_irq_init(adev)) {
1084                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085                 goto error;
1086         }
1087
1088         init_data.asic_id.chip_family = adev->family;
1089
1090         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092
1093         init_data.asic_id.vram_width = adev->gmc.vram_width;
1094         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095         init_data.asic_id.atombios_base_address =
1096                 adev->mode_info.atom_context->bios;
1097
1098         init_data.driver = adev;
1099
1100         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101
1102         if (!adev->dm.cgs_device) {
1103                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104                 goto error;
1105         }
1106
1107         init_data.cgs_device = adev->dm.cgs_device;
1108
1109         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110
1111         switch (adev->asic_type) {
1112         case CHIP_CARRIZO:
1113         case CHIP_STONEY:
1114         case CHIP_RAVEN:
1115         case CHIP_RENOIR:
1116                 init_data.flags.gpu_vm_support = true;
1117                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118                         init_data.flags.disable_dmcu = true;
1119                 break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121         case CHIP_VANGOGH:
1122                 init_data.flags.gpu_vm_support = true;
1123                 break;
1124 #endif
1125         default:
1126                 break;
1127         }
1128
1129         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130                 init_data.flags.fbc_support = true;
1131
1132         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133                 init_data.flags.multi_mon_pp_mclk_switch = true;
1134
1135         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136                 init_data.flags.disable_fractional_pwm = true;
1137
1138         init_data.flags.power_down_display_on_boot = true;
1139
1140         INIT_LIST_HEAD(&adev->dm.da_list);
1141         /* Display Core create. */
1142         adev->dm.dc = dc_create(&init_data);
1143
1144         if (adev->dm.dc) {
1145                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146         } else {
1147                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148                 goto error;
1149         }
1150
1151         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154         }
1155
1156         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158
1159         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160                 adev->dm.dc->debug.disable_stutter = true;
1161
1162         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163                 adev->dm.dc->debug.disable_dsc = true;
1164
1165         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166                 adev->dm.dc->debug.disable_clock_gate = true;
1167
1168         r = dm_dmub_hw_init(adev);
1169         if (r) {
1170                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171                 goto error;
1172         }
1173
1174         dc_hardware_init(adev->dm.dc);
1175
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177         if (adev->apu_flags) {
1178                 struct dc_phy_addr_space_config pa_config;
1179
1180                 mmhub_read_system_context(adev, &pa_config);
1181
1182                 // Call the DC init_memory func
1183                 dc_setup_system_context(adev->dm.dc, &pa_config);
1184         }
1185 #endif
1186
1187         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188         if (!adev->dm.freesync_module) {
1189                 DRM_ERROR(
1190                 "amdgpu: failed to initialize freesync_module.\n");
1191         } else
1192                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193                                 adev->dm.freesync_module);
1194
1195         amdgpu_dm_init_color_mod();
1196
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198         if (adev->dm.dc->caps.max_links > 0) {
1199                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200
1201                 if (!adev->dm.vblank_workqueue)
1202                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203                 else
1204                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205         }
1206 #endif
1207
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211
1212                 if (!adev->dm.hdcp_workqueue)
1213                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214                 else
1215                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216
1217                 dc_init_callbacks(adev->dm.dc, &init_params);
1218         }
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223         if (amdgpu_dm_initialize_drm_device(adev)) {
1224                 DRM_ERROR(
1225                 "amdgpu: failed to initialize sw for display support.\n");
1226                 goto error;
1227         }
1228
1229         /* create fake encoders for MST */
1230         dm_dp_create_fake_mst_encoders(adev);
1231
1232         /* TODO: Add_display_info? */
1233
1234         /* TODO use dynamic cursor width */
1235         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237
1238         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239                 DRM_ERROR(
1240                 "amdgpu: failed to initialize sw for display support.\n");
1241                 goto error;
1242         }
1243
1244
1245         DRM_DEBUG_DRIVER("KMS initialized.\n");
1246
1247         return 0;
1248 error:
1249         amdgpu_dm_fini(adev);
1250
1251         return -EINVAL;
1252 }
1253
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256         int i;
1257
1258         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260         }
1261
1262         amdgpu_dm_audio_fini(adev);
1263
1264         amdgpu_dm_destroy_drm_device(&adev->dm);
1265
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267         if (adev->dm.crc_rd_wrk) {
1268                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269                 kfree(adev->dm.crc_rd_wrk);
1270                 adev->dm.crc_rd_wrk = NULL;
1271         }
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274         if (adev->dm.hdcp_workqueue) {
1275                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276                 adev->dm.hdcp_workqueue = NULL;
1277         }
1278
1279         if (adev->dm.dc)
1280                 dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284         if (adev->dm.vblank_workqueue) {
1285                 adev->dm.vblank_workqueue->dm = NULL;
1286                 kfree(adev->dm.vblank_workqueue);
1287                 adev->dm.vblank_workqueue = NULL;
1288         }
1289 #endif
1290
1291         if (adev->dm.dc->ctx->dmub_srv) {
1292                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293                 adev->dm.dc->ctx->dmub_srv = NULL;
1294         }
1295
1296         if (adev->dm.dmub_bo)
1297                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298                                       &adev->dm.dmub_bo_gpu_addr,
1299                                       &adev->dm.dmub_bo_cpu_addr);
1300
1301         /* DC Destroy TODO: Replace destroy DAL */
1302         if (adev->dm.dc)
1303                 dc_destroy(&adev->dm.dc);
1304         /*
1305          * TODO: pageflip, vlank interrupt
1306          *
1307          * amdgpu_dm_irq_fini(adev);
1308          */
1309
1310         if (adev->dm.cgs_device) {
1311                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312                 adev->dm.cgs_device = NULL;
1313         }
1314         if (adev->dm.freesync_module) {
1315                 mod_freesync_destroy(adev->dm.freesync_module);
1316                 adev->dm.freesync_module = NULL;
1317         }
1318
1319         mutex_destroy(&adev->dm.audio_lock);
1320         mutex_destroy(&adev->dm.dc_lock);
1321
1322         return;
1323 }
1324
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327         const char *fw_name_dmcu = NULL;
1328         int r;
1329         const struct dmcu_firmware_header_v1_0 *hdr;
1330
1331         switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333         case CHIP_TAHITI:
1334         case CHIP_PITCAIRN:
1335         case CHIP_VERDE:
1336         case CHIP_OLAND:
1337 #endif
1338         case CHIP_BONAIRE:
1339         case CHIP_HAWAII:
1340         case CHIP_KAVERI:
1341         case CHIP_KABINI:
1342         case CHIP_MULLINS:
1343         case CHIP_TONGA:
1344         case CHIP_FIJI:
1345         case CHIP_CARRIZO:
1346         case CHIP_STONEY:
1347         case CHIP_POLARIS11:
1348         case CHIP_POLARIS10:
1349         case CHIP_POLARIS12:
1350         case CHIP_VEGAM:
1351         case CHIP_VEGA10:
1352         case CHIP_VEGA12:
1353         case CHIP_VEGA20:
1354         case CHIP_NAVI10:
1355         case CHIP_NAVI14:
1356         case CHIP_RENOIR:
1357         case CHIP_SIENNA_CICHLID:
1358         case CHIP_NAVY_FLOUNDER:
1359         case CHIP_DIMGREY_CAVEFISH:
1360         case CHIP_VANGOGH:
1361                 return 0;
1362         case CHIP_NAVI12:
1363                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364                 break;
1365         case CHIP_RAVEN:
1366                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370                 else
1371                         return 0;
1372                 break;
1373         default:
1374                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375                 return -EINVAL;
1376         }
1377
1378         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380                 return 0;
1381         }
1382
1383         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384         if (r == -ENOENT) {
1385                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387                 adev->dm.fw_dmcu = NULL;
1388                 return 0;
1389         }
1390         if (r) {
1391                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392                         fw_name_dmcu);
1393                 return r;
1394         }
1395
1396         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397         if (r) {
1398                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399                         fw_name_dmcu);
1400                 release_firmware(adev->dm.fw_dmcu);
1401                 adev->dm.fw_dmcu = NULL;
1402                 return r;
1403         }
1404
1405         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408         adev->firmware.fw_size +=
1409                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410
1411         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413         adev->firmware.fw_size +=
1414                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415
1416         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417
1418         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419
1420         return 0;
1421 }
1422
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425         struct amdgpu_device *adev = ctx;
1426
1427         return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431                                      uint32_t value)
1432 {
1433         struct amdgpu_device *adev = ctx;
1434
1435         return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440         struct dmub_srv_create_params create_params;
1441         struct dmub_srv_region_params region_params;
1442         struct dmub_srv_region_info region_info;
1443         struct dmub_srv_fb_params fb_params;
1444         struct dmub_srv_fb_info *fb_info;
1445         struct dmub_srv *dmub_srv;
1446         const struct dmcub_firmware_header_v1_0 *hdr;
1447         const char *fw_name_dmub;
1448         enum dmub_asic dmub_asic;
1449         enum dmub_status status;
1450         int r;
1451
1452         switch (adev->asic_type) {
1453         case CHIP_RENOIR:
1454                 dmub_asic = DMUB_ASIC_DCN21;
1455                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458                 break;
1459         case CHIP_SIENNA_CICHLID:
1460                 dmub_asic = DMUB_ASIC_DCN30;
1461                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462                 break;
1463         case CHIP_NAVY_FLOUNDER:
1464                 dmub_asic = DMUB_ASIC_DCN30;
1465                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466                 break;
1467         case CHIP_VANGOGH:
1468                 dmub_asic = DMUB_ASIC_DCN301;
1469                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470                 break;
1471         case CHIP_DIMGREY_CAVEFISH:
1472                 dmub_asic = DMUB_ASIC_DCN302;
1473                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474                 break;
1475
1476         default:
1477                 /* ASIC doesn't support DMUB. */
1478                 return 0;
1479         }
1480
1481         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482         if (r) {
1483                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484                 return 0;
1485         }
1486
1487         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488         if (r) {
1489                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490                 return 0;
1491         }
1492
1493         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494
1495         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497                         AMDGPU_UCODE_ID_DMCUB;
1498                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499                         adev->dm.dmub_fw;
1500                 adev->firmware.fw_size +=
1501                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502
1503                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504                          adev->dm.dmcub_fw_version);
1505         }
1506
1507         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508
1509         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510         dmub_srv = adev->dm.dmub_srv;
1511
1512         if (!dmub_srv) {
1513                 DRM_ERROR("Failed to allocate DMUB service!\n");
1514                 return -ENOMEM;
1515         }
1516
1517         memset(&create_params, 0, sizeof(create_params));
1518         create_params.user_ctx = adev;
1519         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521         create_params.asic = dmub_asic;
1522
1523         /* Create the DMUB service. */
1524         status = dmub_srv_create(dmub_srv, &create_params);
1525         if (status != DMUB_STATUS_OK) {
1526                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1527                 return -EINVAL;
1528         }
1529
1530         /* Calculate the size of all the regions for the DMUB service. */
1531         memset(&region_params, 0, sizeof(region_params));
1532
1533         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536         region_params.vbios_size = adev->bios_size;
1537         region_params.fw_bss_data = region_params.bss_data_size ?
1538                 adev->dm.dmub_fw->data +
1539                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541         region_params.fw_inst_const =
1542                 adev->dm.dmub_fw->data +
1543                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544                 PSP_HEADER_BYTES;
1545
1546         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547                                            &region_info);
1548
1549         if (status != DMUB_STATUS_OK) {
1550                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551                 return -EINVAL;
1552         }
1553
1554         /*
1555          * Allocate a framebuffer based on the total size of all the regions.
1556          * TODO: Move this into GART.
1557          */
1558         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560                                     &adev->dm.dmub_bo_gpu_addr,
1561                                     &adev->dm.dmub_bo_cpu_addr);
1562         if (r)
1563                 return r;
1564
1565         /* Rebase the regions on the framebuffer address. */
1566         memset(&fb_params, 0, sizeof(fb_params));
1567         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569         fb_params.region_info = &region_info;
1570
1571         adev->dm.dmub_fb_info =
1572                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573         fb_info = adev->dm.dmub_fb_info;
1574
1575         if (!fb_info) {
1576                 DRM_ERROR(
1577                         "Failed to allocate framebuffer info for DMUB service!\n");
1578                 return -ENOMEM;
1579         }
1580
1581         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582         if (status != DMUB_STATUS_OK) {
1583                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584                 return -EINVAL;
1585         }
1586
1587         return 0;
1588 }
1589
1590 static int dm_sw_init(void *handle)
1591 {
1592         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593         int r;
1594
1595         r = dm_dmub_sw_init(adev);
1596         if (r)
1597                 return r;
1598
1599         return load_dmcu_fw(adev);
1600 }
1601
1602 static int dm_sw_fini(void *handle)
1603 {
1604         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605
1606         kfree(adev->dm.dmub_fb_info);
1607         adev->dm.dmub_fb_info = NULL;
1608
1609         if (adev->dm.dmub_srv) {
1610                 dmub_srv_destroy(adev->dm.dmub_srv);
1611                 adev->dm.dmub_srv = NULL;
1612         }
1613
1614         release_firmware(adev->dm.dmub_fw);
1615         adev->dm.dmub_fw = NULL;
1616
1617         release_firmware(adev->dm.fw_dmcu);
1618         adev->dm.fw_dmcu = NULL;
1619
1620         return 0;
1621 }
1622
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625         struct amdgpu_dm_connector *aconnector;
1626         struct drm_connector *connector;
1627         struct drm_connector_list_iter iter;
1628         int ret = 0;
1629
1630         drm_connector_list_iter_begin(dev, &iter);
1631         drm_for_each_connector_iter(connector, &iter) {
1632                 aconnector = to_amdgpu_dm_connector(connector);
1633                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634                     aconnector->mst_mgr.aux) {
1635                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636                                          aconnector,
1637                                          aconnector->base.base.id);
1638
1639                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640                         if (ret < 0) {
1641                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1642                                 aconnector->dc_link->type =
1643                                         dc_connection_single;
1644                                 break;
1645                         }
1646                 }
1647         }
1648         drm_connector_list_iter_end(&iter);
1649
1650         return ret;
1651 }
1652
1653 static int dm_late_init(void *handle)
1654 {
1655         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656
1657         struct dmcu_iram_parameters params;
1658         unsigned int linear_lut[16];
1659         int i;
1660         struct dmcu *dmcu = NULL;
1661         bool ret = true;
1662
1663         dmcu = adev->dm.dc->res_pool->dmcu;
1664
1665         for (i = 0; i < 16; i++)
1666                 linear_lut[i] = 0xFFFF * i / 15;
1667
1668         params.set = 0;
1669         params.backlight_ramping_start = 0xCCCC;
1670         params.backlight_ramping_reduction = 0xCCCCCCCC;
1671         params.backlight_lut_array_size = 16;
1672         params.backlight_lut_array = linear_lut;
1673
1674         /* Min backlight level after ABM reduction,  Don't allow below 1%
1675          * 0xFFFF x 0.01 = 0x28F
1676          */
1677         params.min_abm_backlight = 0x28F;
1678
1679         /* In the case where abm is implemented on dmcub,
1680          * dmcu object will be null.
1681          * ABM 2.4 and up are implemented on dmcub.
1682          */
1683         if (dmcu)
1684                 ret = dmcu_load_iram(dmcu, params);
1685         else if (adev->dm.dc->ctx->dmub_srv)
1686                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687
1688         if (!ret)
1689                 return -EINVAL;
1690
1691         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696         struct amdgpu_dm_connector *aconnector;
1697         struct drm_connector *connector;
1698         struct drm_connector_list_iter iter;
1699         struct drm_dp_mst_topology_mgr *mgr;
1700         int ret;
1701         bool need_hotplug = false;
1702
1703         drm_connector_list_iter_begin(dev, &iter);
1704         drm_for_each_connector_iter(connector, &iter) {
1705                 aconnector = to_amdgpu_dm_connector(connector);
1706                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707                     aconnector->mst_port)
1708                         continue;
1709
1710                 mgr = &aconnector->mst_mgr;
1711
1712                 if (suspend) {
1713                         drm_dp_mst_topology_mgr_suspend(mgr);
1714                 } else {
1715                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716                         if (ret < 0) {
1717                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718                                 need_hotplug = true;
1719                         }
1720                 }
1721         }
1722         drm_connector_list_iter_end(&iter);
1723
1724         if (need_hotplug)
1725                 drm_kms_helper_hotplug_event(dev);
1726 }
1727
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730         struct smu_context *smu = &adev->smu;
1731         int ret = 0;
1732
1733         if (!is_support_sw_smu(adev))
1734                 return 0;
1735
1736         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737          * on window driver dc implementation.
1738          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739          * should be passed to smu during boot up and resume from s3.
1740          * boot up: dc calculate dcn watermark clock settings within dc_create,
1741          * dcn20_resource_construct
1742          * then call pplib functions below to pass the settings to smu:
1743          * smu_set_watermarks_for_clock_ranges
1744          * smu_set_watermarks_table
1745          * navi10_set_watermarks_table
1746          * smu_write_watermarks_table
1747          *
1748          * For Renoir, clock settings of dcn watermark are also fixed values.
1749          * dc has implemented different flow for window driver:
1750          * dc_hardware_init / dc_set_power_state
1751          * dcn10_init_hw
1752          * notify_wm_ranges
1753          * set_wm_ranges
1754          * -- Linux
1755          * smu_set_watermarks_for_clock_ranges
1756          * renoir_set_watermarks_table
1757          * smu_write_watermarks_table
1758          *
1759          * For Linux,
1760          * dc_hardware_init -> amdgpu_dm_init
1761          * dc_set_power_state --> dm_resume
1762          *
1763          * therefore, this function apply to navi10/12/14 but not Renoir
1764          * *
1765          */
1766         switch(adev->asic_type) {
1767         case CHIP_NAVI10:
1768         case CHIP_NAVI14:
1769         case CHIP_NAVI12:
1770                 break;
1771         default:
1772                 return 0;
1773         }
1774
1775         ret = smu_write_watermarks_table(smu);
1776         if (ret) {
1777                 DRM_ERROR("Failed to update WMTABLE!\n");
1778                 return ret;
1779         }
1780
1781         return 0;
1782 }
1783
1784 /**
1785  * dm_hw_init() - Initialize DC device
1786  * @handle: The base driver device containing the amdgpu_dm device.
1787  *
1788  * Initialize the &struct amdgpu_display_manager device. This involves calling
1789  * the initializers of each DM component, then populating the struct with them.
1790  *
1791  * Although the function implies hardware initialization, both hardware and
1792  * software are initialized here. Splitting them out to their relevant init
1793  * hooks is a future TODO item.
1794  *
1795  * Some notable things that are initialized here:
1796  *
1797  * - Display Core, both software and hardware
1798  * - DC modules that we need (freesync and color management)
1799  * - DRM software states
1800  * - Interrupt sources and handlers
1801  * - Vblank support
1802  * - Debug FS entries, if enabled
1803  */
1804 static int dm_hw_init(void *handle)
1805 {
1806         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807         /* Create DAL display manager */
1808         amdgpu_dm_init(adev);
1809         amdgpu_dm_hpd_init(adev);
1810
1811         return 0;
1812 }
1813
1814 /**
1815  * dm_hw_fini() - Teardown DC device
1816  * @handle: The base driver device containing the amdgpu_dm device.
1817  *
1818  * Teardown components within &struct amdgpu_display_manager that require
1819  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820  * were loaded. Also flush IRQ workqueues and disable them.
1821  */
1822 static int dm_hw_fini(void *handle)
1823 {
1824         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825
1826         amdgpu_dm_hpd_fini(adev);
1827
1828         amdgpu_dm_irq_fini(adev);
1829         amdgpu_dm_fini(adev);
1830         return 0;
1831 }
1832
1833
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838                                  struct dc_state *state, bool enable)
1839 {
1840         enum dc_irq_source irq_source;
1841         struct amdgpu_crtc *acrtc;
1842         int rc = -EBUSY;
1843         int i = 0;
1844
1845         for (i = 0; i < state->stream_count; i++) {
1846                 acrtc = get_crtc_by_otg_inst(
1847                                 adev, state->stream_status[i].primary_otg_inst);
1848
1849                 if (acrtc && state->stream_status[i].plane_count != 0) {
1850                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1854                         if (rc)
1855                                 DRM_WARN("Failed to %s pflip interrupts\n",
1856                                          enable ? "enable" : "disable");
1857
1858                         if (enable) {
1859                                 rc = dm_enable_vblank(&acrtc->base);
1860                                 if (rc)
1861                                         DRM_WARN("Failed to enable vblank interrupts\n");
1862                         } else {
1863                                 dm_disable_vblank(&acrtc->base);
1864                         }
1865
1866                 }
1867         }
1868
1869 }
1870
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873         struct dc_state *context = NULL;
1874         enum dc_status res = DC_ERROR_UNEXPECTED;
1875         int i;
1876         struct dc_stream_state *del_streams[MAX_PIPES];
1877         int del_streams_count = 0;
1878
1879         memset(del_streams, 0, sizeof(del_streams));
1880
1881         context = dc_create_state(dc);
1882         if (context == NULL)
1883                 goto context_alloc_fail;
1884
1885         dc_resource_state_copy_construct_current(dc, context);
1886
1887         /* First remove from context all streams */
1888         for (i = 0; i < context->stream_count; i++) {
1889                 struct dc_stream_state *stream = context->streams[i];
1890
1891                 del_streams[del_streams_count++] = stream;
1892         }
1893
1894         /* Remove all planes for removed streams and then remove the streams */
1895         for (i = 0; i < del_streams_count; i++) {
1896                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897                         res = DC_FAIL_DETACH_SURFACES;
1898                         goto fail;
1899                 }
1900
1901                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902                 if (res != DC_OK)
1903                         goto fail;
1904         }
1905
1906
1907         res = dc_validate_global_state(dc, context, false);
1908
1909         if (res != DC_OK) {
1910                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911                 goto fail;
1912         }
1913
1914         res = dc_commit_state(dc, context);
1915
1916 fail:
1917         dc_release_state(context);
1918
1919 context_alloc_fail:
1920         return res;
1921 }
1922
1923 static int dm_suspend(void *handle)
1924 {
1925         struct amdgpu_device *adev = handle;
1926         struct amdgpu_display_manager *dm = &adev->dm;
1927         int ret = 0;
1928
1929         if (amdgpu_in_reset(adev)) {
1930                 mutex_lock(&dm->dc_lock);
1931
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933                 dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935
1936                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937
1938                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939
1940                 amdgpu_dm_commit_zero_streams(dm->dc);
1941
1942                 amdgpu_dm_irq_suspend(adev);
1943
1944                 return ret;
1945         }
1946
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948         amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950         WARN_ON(adev->dm.cached_state);
1951         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952
1953         s3_handle_mst(adev_to_drm(adev), true);
1954
1955         amdgpu_dm_irq_suspend(adev);
1956
1957
1958         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959
1960         return 0;
1961 }
1962
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965                                              struct drm_crtc *crtc)
1966 {
1967         uint32_t i;
1968         struct drm_connector_state *new_con_state;
1969         struct drm_connector *connector;
1970         struct drm_crtc *crtc_from_state;
1971
1972         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973                 crtc_from_state = new_con_state->crtc;
1974
1975                 if (crtc_from_state == crtc)
1976                         return to_amdgpu_dm_connector(connector);
1977         }
1978
1979         return NULL;
1980 }
1981
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984         struct dc_sink_init_data sink_init_data = { 0 };
1985         struct display_sink_capability sink_caps = { 0 };
1986         enum dc_edid_status edid_status;
1987         struct dc_context *dc_ctx = link->ctx;
1988         struct dc_sink *sink = NULL;
1989         struct dc_sink *prev_sink = NULL;
1990
1991         link->type = dc_connection_none;
1992         prev_sink = link->local_sink;
1993
1994         if (prev_sink)
1995                 dc_sink_release(prev_sink);
1996
1997         switch (link->connector_signal) {
1998         case SIGNAL_TYPE_HDMI_TYPE_A: {
1999                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001                 break;
2002         }
2003
2004         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007                 break;
2008         }
2009
2010         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013                 break;
2014         }
2015
2016         case SIGNAL_TYPE_LVDS: {
2017                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2019                 break;
2020         }
2021
2022         case SIGNAL_TYPE_EDP: {
2023                 sink_caps.transaction_type =
2024                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025                 sink_caps.signal = SIGNAL_TYPE_EDP;
2026                 break;
2027         }
2028
2029         case SIGNAL_TYPE_DISPLAY_PORT: {
2030                 sink_caps.transaction_type =
2031                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033                 break;
2034         }
2035
2036         default:
2037                 DC_ERROR("Invalid connector type! signal:%d\n",
2038                         link->connector_signal);
2039                 return;
2040         }
2041
2042         sink_init_data.link = link;
2043         sink_init_data.sink_signal = sink_caps.signal;
2044
2045         sink = dc_sink_create(&sink_init_data);
2046         if (!sink) {
2047                 DC_ERROR("Failed to create sink!\n");
2048                 return;
2049         }
2050
2051         /* dc_sink_create returns a new reference */
2052         link->local_sink = sink;
2053
2054         edid_status = dm_helpers_read_local_edid(
2055                         link->ctx,
2056                         link,
2057                         sink);
2058
2059         if (edid_status != EDID_OK)
2060                 DC_ERROR("Failed to read EDID");
2061
2062 }
2063
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065                                      struct amdgpu_display_manager *dm)
2066 {
2067         struct {
2068                 struct dc_surface_update surface_updates[MAX_SURFACES];
2069                 struct dc_plane_info plane_infos[MAX_SURFACES];
2070                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072                 struct dc_stream_update stream_update;
2073         } * bundle;
2074         int k, m;
2075
2076         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077
2078         if (!bundle) {
2079                 dm_error("Failed to allocate update bundle\n");
2080                 goto cleanup;
2081         }
2082
2083         for (k = 0; k < dc_state->stream_count; k++) {
2084                 bundle->stream_update.stream = dc_state->streams[k];
2085
2086                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087                         bundle->surface_updates[m].surface =
2088                                 dc_state->stream_status->plane_states[m];
2089                         bundle->surface_updates[m].surface->force_full_update =
2090                                 true;
2091                 }
2092                 dc_commit_updates_for_stream(
2093                         dm->dc, bundle->surface_updates,
2094                         dc_state->stream_status->plane_count,
2095                         dc_state->streams[k], &bundle->stream_update, dc_state);
2096         }
2097
2098 cleanup:
2099         kfree(bundle);
2100
2101         return;
2102 }
2103
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106         struct dc_stream_state *stream_state;
2107         struct amdgpu_dm_connector *aconnector = link->priv;
2108         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109         struct dc_stream_update stream_update;
2110         bool dpms_off = true;
2111
2112         memset(&stream_update, 0, sizeof(stream_update));
2113         stream_update.dpms_off = &dpms_off;
2114
2115         mutex_lock(&adev->dm.dc_lock);
2116         stream_state = dc_stream_find_from_link(link);
2117
2118         if (stream_state == NULL) {
2119                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120                 mutex_unlock(&adev->dm.dc_lock);
2121                 return;
2122         }
2123
2124         stream_update.stream = stream_state;
2125         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126                                      stream_state, &stream_update,
2127                                      stream_state->ctx->dc->current_state);
2128         mutex_unlock(&adev->dm.dc_lock);
2129 }
2130
2131 static int dm_resume(void *handle)
2132 {
2133         struct amdgpu_device *adev = handle;
2134         struct drm_device *ddev = adev_to_drm(adev);
2135         struct amdgpu_display_manager *dm = &adev->dm;
2136         struct amdgpu_dm_connector *aconnector;
2137         struct drm_connector *connector;
2138         struct drm_connector_list_iter iter;
2139         struct drm_crtc *crtc;
2140         struct drm_crtc_state *new_crtc_state;
2141         struct dm_crtc_state *dm_new_crtc_state;
2142         struct drm_plane *plane;
2143         struct drm_plane_state *new_plane_state;
2144         struct dm_plane_state *dm_new_plane_state;
2145         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146         enum dc_connection_type new_connection_type = dc_connection_none;
2147         struct dc_state *dc_state;
2148         int i, r, j;
2149
2150         if (amdgpu_in_reset(adev)) {
2151                 dc_state = dm->cached_dc_state;
2152
2153                 r = dm_dmub_hw_init(adev);
2154                 if (r)
2155                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158                 dc_resume(dm->dc);
2159
2160                 amdgpu_dm_irq_resume_early(adev);
2161
2162                 for (i = 0; i < dc_state->stream_count; i++) {
2163                         dc_state->streams[i]->mode_changed = true;
2164                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2166                                         = 0xffffffff;
2167                         }
2168                 }
2169
2170                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171
2172                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173
2174                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175
2176                 dc_release_state(dm->cached_dc_state);
2177                 dm->cached_dc_state = NULL;
2178
2179                 amdgpu_dm_irq_resume_late(adev);
2180
2181                 mutex_unlock(&dm->dc_lock);
2182
2183                 return 0;
2184         }
2185         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186         dc_release_state(dm_state->context);
2187         dm_state->context = dc_create_state(dm->dc);
2188         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189         dc_resource_state_construct(dm->dc, dm_state->context);
2190
2191         /* Before powering on DC we need to re-initialize DMUB. */
2192         r = dm_dmub_hw_init(adev);
2193         if (r)
2194                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195
2196         /* power on hardware */
2197         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198
2199         /* program HPD filter */
2200         dc_resume(dm->dc);
2201
2202         /*
2203          * early enable HPD Rx IRQ, should be done before set mode as short
2204          * pulse interrupts are used for MST
2205          */
2206         amdgpu_dm_irq_resume_early(adev);
2207
2208         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209         s3_handle_mst(ddev, false);
2210
2211         /* Do detection*/
2212         drm_connector_list_iter_begin(ddev, &iter);
2213         drm_for_each_connector_iter(connector, &iter) {
2214                 aconnector = to_amdgpu_dm_connector(connector);
2215
2216                 /*
2217                  * this is the case when traversing through already created
2218                  * MST connectors, should be skipped
2219                  */
2220                 if (aconnector->mst_port)
2221                         continue;
2222
2223                 mutex_lock(&aconnector->hpd_lock);
2224                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225                         DRM_ERROR("KMS: Failed to detect connector\n");
2226
2227                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228                         emulated_link_detect(aconnector->dc_link);
2229                 else
2230                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231
2232                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233                         aconnector->fake_enable = false;
2234
2235                 if (aconnector->dc_sink)
2236                         dc_sink_release(aconnector->dc_sink);
2237                 aconnector->dc_sink = NULL;
2238                 amdgpu_dm_update_connector_after_detect(aconnector);
2239                 mutex_unlock(&aconnector->hpd_lock);
2240         }
2241         drm_connector_list_iter_end(&iter);
2242
2243         /* Force mode set in atomic commit */
2244         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245                 new_crtc_state->active_changed = true;
2246
2247         /*
2248          * atomic_check is expected to create the dc states. We need to release
2249          * them here, since they were duplicated as part of the suspend
2250          * procedure.
2251          */
2252         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254                 if (dm_new_crtc_state->stream) {
2255                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256                         dc_stream_release(dm_new_crtc_state->stream);
2257                         dm_new_crtc_state->stream = NULL;
2258                 }
2259         }
2260
2261         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263                 if (dm_new_plane_state->dc_state) {
2264                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265                         dc_plane_state_release(dm_new_plane_state->dc_state);
2266                         dm_new_plane_state->dc_state = NULL;
2267                 }
2268         }
2269
2270         drm_atomic_helper_resume(ddev, dm->cached_state);
2271
2272         dm->cached_state = NULL;
2273
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275         amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277
2278         amdgpu_dm_irq_resume_late(adev);
2279
2280         amdgpu_dm_smu_write_watermarks_table(adev);
2281
2282         return 0;
2283 }
2284
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296         .name = "dm",
2297         .early_init = dm_early_init,
2298         .late_init = dm_late_init,
2299         .sw_init = dm_sw_init,
2300         .sw_fini = dm_sw_fini,
2301         .hw_init = dm_hw_init,
2302         .hw_fini = dm_hw_fini,
2303         .suspend = dm_suspend,
2304         .resume = dm_resume,
2305         .is_idle = dm_is_idle,
2306         .wait_for_idle = dm_wait_for_idle,
2307         .check_soft_reset = dm_check_soft_reset,
2308         .soft_reset = dm_soft_reset,
2309         .set_clockgating_state = dm_set_clockgating_state,
2310         .set_powergating_state = dm_set_powergating_state,
2311 };
2312
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315         .type = AMD_IP_BLOCK_TYPE_DCE,
2316         .major = 1,
2317         .minor = 0,
2318         .rev = 0,
2319         .funcs = &amdgpu_dm_funcs,
2320 };
2321
2322
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330         .fb_create = amdgpu_display_user_framebuffer_create,
2331         .get_format_info = amd_get_format_info,
2332         .output_poll_changed = drm_fb_helper_output_poll_changed,
2333         .atomic_check = amdgpu_dm_atomic_check,
2334         .atomic_commit = drm_atomic_helper_commit,
2335 };
2336
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343         u32 max_cll, min_cll, max, min, q, r;
2344         struct amdgpu_dm_backlight_caps *caps;
2345         struct amdgpu_display_manager *dm;
2346         struct drm_connector *conn_base;
2347         struct amdgpu_device *adev;
2348         struct dc_link *link = NULL;
2349         static const u8 pre_computed_values[] = {
2350                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352
2353         if (!aconnector || !aconnector->dc_link)
2354                 return;
2355
2356         link = aconnector->dc_link;
2357         if (link->connector_signal != SIGNAL_TYPE_EDP)
2358                 return;
2359
2360         conn_base = &aconnector->base;
2361         adev = drm_to_adev(conn_base->dev);
2362         dm = &adev->dm;
2363         caps = &dm->backlight_caps;
2364         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365         caps->aux_support = false;
2366         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368
2369         if (caps->ext_caps->bits.oled == 1 ||
2370             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372                 caps->aux_support = true;
2373
2374         if (amdgpu_backlight == 0)
2375                 caps->aux_support = false;
2376         else if (amdgpu_backlight == 1)
2377                 caps->aux_support = true;
2378
2379         /* From the specification (CTA-861-G), for calculating the maximum
2380          * luminance we need to use:
2381          *      Luminance = 50*2**(CV/32)
2382          * Where CV is a one-byte value.
2383          * For calculating this expression we may need float point precision;
2384          * to avoid this complexity level, we take advantage that CV is divided
2385          * by a constant. From the Euclids division algorithm, we know that CV
2386          * can be written as: CV = 32*q + r. Next, we replace CV in the
2387          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388          * need to pre-compute the value of r/32. For pre-computing the values
2389          * We just used the following Ruby line:
2390          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391          * The results of the above expressions can be verified at
2392          * pre_computed_values.
2393          */
2394         q = max_cll >> 5;
2395         r = max_cll % 32;
2396         max = (1 << q) * pre_computed_values[r];
2397
2398         // min luminance: maxLum * (CV/255)^2 / 100
2399         q = DIV_ROUND_CLOSEST(min_cll, 255);
2400         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401
2402         caps->aux_max_input_signal = max;
2403         caps->aux_min_input_signal = min;
2404 }
2405
2406 void amdgpu_dm_update_connector_after_detect(
2407                 struct amdgpu_dm_connector *aconnector)
2408 {
2409         struct drm_connector *connector = &aconnector->base;
2410         struct drm_device *dev = connector->dev;
2411         struct dc_sink *sink;
2412
2413         /* MST handled by drm_mst framework */
2414         if (aconnector->mst_mgr.mst_state == true)
2415                 return;
2416
2417         sink = aconnector->dc_link->local_sink;
2418         if (sink)
2419                 dc_sink_retain(sink);
2420
2421         /*
2422          * Edid mgmt connector gets first update only in mode_valid hook and then
2423          * the connector sink is set to either fake or physical sink depends on link status.
2424          * Skip if already done during boot.
2425          */
2426         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427                         && aconnector->dc_em_sink) {
2428
2429                 /*
2430                  * For S3 resume with headless use eml_sink to fake stream
2431                  * because on resume connector->sink is set to NULL
2432                  */
2433                 mutex_lock(&dev->mode_config.mutex);
2434
2435                 if (sink) {
2436                         if (aconnector->dc_sink) {
2437                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2438                                 /*
2439                                  * retain and release below are used to
2440                                  * bump up refcount for sink because the link doesn't point
2441                                  * to it anymore after disconnect, so on next crtc to connector
2442                                  * reshuffle by UMD we will get into unwanted dc_sink release
2443                                  */
2444                                 dc_sink_release(aconnector->dc_sink);
2445                         }
2446                         aconnector->dc_sink = sink;
2447                         dc_sink_retain(aconnector->dc_sink);
2448                         amdgpu_dm_update_freesync_caps(connector,
2449                                         aconnector->edid);
2450                 } else {
2451                         amdgpu_dm_update_freesync_caps(connector, NULL);
2452                         if (!aconnector->dc_sink) {
2453                                 aconnector->dc_sink = aconnector->dc_em_sink;
2454                                 dc_sink_retain(aconnector->dc_sink);
2455                         }
2456                 }
2457
2458                 mutex_unlock(&dev->mode_config.mutex);
2459
2460                 if (sink)
2461                         dc_sink_release(sink);
2462                 return;
2463         }
2464
2465         /*
2466          * TODO: temporary guard to look for proper fix
2467          * if this sink is MST sink, we should not do anything
2468          */
2469         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470                 dc_sink_release(sink);
2471                 return;
2472         }
2473
2474         if (aconnector->dc_sink == sink) {
2475                 /*
2476                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477                  * Do nothing!!
2478                  */
2479                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480                                 aconnector->connector_id);
2481                 if (sink)
2482                         dc_sink_release(sink);
2483                 return;
2484         }
2485
2486         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487                 aconnector->connector_id, aconnector->dc_sink, sink);
2488
2489         mutex_lock(&dev->mode_config.mutex);
2490
2491         /*
2492          * 1. Update status of the drm connector
2493          * 2. Send an event and let userspace tell us what to do
2494          */
2495         if (sink) {
2496                 /*
2497                  * TODO: check if we still need the S3 mode update workaround.
2498                  * If yes, put it here.
2499                  */
2500                 if (aconnector->dc_sink) {
2501                         amdgpu_dm_update_freesync_caps(connector, NULL);
2502                         dc_sink_release(aconnector->dc_sink);
2503                 }
2504
2505                 aconnector->dc_sink = sink;
2506                 dc_sink_retain(aconnector->dc_sink);
2507                 if (sink->dc_edid.length == 0) {
2508                         aconnector->edid = NULL;
2509                         if (aconnector->dc_link->aux_mode) {
2510                                 drm_dp_cec_unset_edid(
2511                                         &aconnector->dm_dp_aux.aux);
2512                         }
2513                 } else {
2514                         aconnector->edid =
2515                                 (struct edid *)sink->dc_edid.raw_edid;
2516
2517                         drm_connector_update_edid_property(connector,
2518                                                            aconnector->edid);
2519                         if (aconnector->dc_link->aux_mode)
2520                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521                                                     aconnector->edid);
2522                 }
2523
2524                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525                 update_connector_ext_caps(aconnector);
2526         } else {
2527                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528                 amdgpu_dm_update_freesync_caps(connector, NULL);
2529                 drm_connector_update_edid_property(connector, NULL);
2530                 aconnector->num_modes = 0;
2531                 dc_sink_release(aconnector->dc_sink);
2532                 aconnector->dc_sink = NULL;
2533                 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539         }
2540
2541         mutex_unlock(&dev->mode_config.mutex);
2542
2543         update_subconnector_property(aconnector);
2544
2545         if (sink)
2546                 dc_sink_release(sink);
2547 }
2548
2549 static void handle_hpd_irq(void *param)
2550 {
2551         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552         struct drm_connector *connector = &aconnector->base;
2553         struct drm_device *dev = connector->dev;
2554         enum dc_connection_type new_connection_type = dc_connection_none;
2555         struct amdgpu_device *adev = drm_to_adev(dev);
2556 #ifdef CONFIG_DRM_AMD_DC_HDCP
2557         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559
2560         if (adev->dm.disable_hpd_irq)
2561                 return;
2562
2563         /*
2564          * In case of failure or MST no need to update connector status or notify the OS
2565          * since (for MST case) MST does this in its own context.
2566          */
2567         mutex_lock(&aconnector->hpd_lock);
2568
2569 #ifdef CONFIG_DRM_AMD_DC_HDCP
2570         if (adev->dm.hdcp_workqueue) {
2571                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2572                 dm_con_state->update_hdcp = true;
2573         }
2574 #endif
2575         if (aconnector->fake_enable)
2576                 aconnector->fake_enable = false;
2577
2578         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2579                 DRM_ERROR("KMS: Failed to detect connector\n");
2580
2581         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2582                 emulated_link_detect(aconnector->dc_link);
2583
2584
2585                 drm_modeset_lock_all(dev);
2586                 dm_restore_drm_connector_state(dev, connector);
2587                 drm_modeset_unlock_all(dev);
2588
2589                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2590                         drm_kms_helper_hotplug_event(dev);
2591
2592         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2593                 if (new_connection_type == dc_connection_none &&
2594                     aconnector->dc_link->type == dc_connection_none)
2595                         dm_set_dpms_off(aconnector->dc_link);
2596
2597                 amdgpu_dm_update_connector_after_detect(aconnector);
2598
2599                 drm_modeset_lock_all(dev);
2600                 dm_restore_drm_connector_state(dev, connector);
2601                 drm_modeset_unlock_all(dev);
2602
2603                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2604                         drm_kms_helper_hotplug_event(dev);
2605         }
2606         mutex_unlock(&aconnector->hpd_lock);
2607
2608 }
2609
2610 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2611 {
2612         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2613         uint8_t dret;
2614         bool new_irq_handled = false;
2615         int dpcd_addr;
2616         int dpcd_bytes_to_read;
2617
2618         const int max_process_count = 30;
2619         int process_count = 0;
2620
2621         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2622
2623         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2624                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2625                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2626                 dpcd_addr = DP_SINK_COUNT;
2627         } else {
2628                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2629                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2630                 dpcd_addr = DP_SINK_COUNT_ESI;
2631         }
2632
2633         dret = drm_dp_dpcd_read(
2634                 &aconnector->dm_dp_aux.aux,
2635                 dpcd_addr,
2636                 esi,
2637                 dpcd_bytes_to_read);
2638
2639         while (dret == dpcd_bytes_to_read &&
2640                 process_count < max_process_count) {
2641                 uint8_t retry;
2642                 dret = 0;
2643
2644                 process_count++;
2645
2646                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2647                 /* handle HPD short pulse irq */
2648                 if (aconnector->mst_mgr.mst_state)
2649                         drm_dp_mst_hpd_irq(
2650                                 &aconnector->mst_mgr,
2651                                 esi,
2652                                 &new_irq_handled);
2653
2654                 if (new_irq_handled) {
2655                         /* ACK at DPCD to notify down stream */
2656                         const int ack_dpcd_bytes_to_write =
2657                                 dpcd_bytes_to_read - 1;
2658
2659                         for (retry = 0; retry < 3; retry++) {
2660                                 uint8_t wret;
2661
2662                                 wret = drm_dp_dpcd_write(
2663                                         &aconnector->dm_dp_aux.aux,
2664                                         dpcd_addr + 1,
2665                                         &esi[1],
2666                                         ack_dpcd_bytes_to_write);
2667                                 if (wret == ack_dpcd_bytes_to_write)
2668                                         break;
2669                         }
2670
2671                         /* check if there is new irq to be handled */
2672                         dret = drm_dp_dpcd_read(
2673                                 &aconnector->dm_dp_aux.aux,
2674                                 dpcd_addr,
2675                                 esi,
2676                                 dpcd_bytes_to_read);
2677
2678                         new_irq_handled = false;
2679                 } else {
2680                         break;
2681                 }
2682         }
2683
2684         if (process_count == max_process_count)
2685                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2686 }
2687
2688 static void handle_hpd_rx_irq(void *param)
2689 {
2690         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2691         struct drm_connector *connector = &aconnector->base;
2692         struct drm_device *dev = connector->dev;
2693         struct dc_link *dc_link = aconnector->dc_link;
2694         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2695         bool result = false;
2696         enum dc_connection_type new_connection_type = dc_connection_none;
2697         struct amdgpu_device *adev = drm_to_adev(dev);
2698         union hpd_irq_data hpd_irq_data;
2699
2700         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2701
2702         if (adev->dm.disable_hpd_irq)
2703                 return;
2704
2705
2706         /*
2707          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2708          * conflict, after implement i2c helper, this mutex should be
2709          * retired.
2710          */
2711         if (dc_link->type != dc_connection_mst_branch)
2712                 mutex_lock(&aconnector->hpd_lock);
2713
2714         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2715
2716         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2717                 (dc_link->type == dc_connection_mst_branch)) {
2718                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2719                         result = true;
2720                         dm_handle_hpd_rx_irq(aconnector);
2721                         goto out;
2722                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2723                         result = false;
2724                         dm_handle_hpd_rx_irq(aconnector);
2725                         goto out;
2726                 }
2727         }
2728
2729         if (!amdgpu_in_reset(adev))
2730                 mutex_lock(&adev->dm.dc_lock);
2731 #ifdef CONFIG_DRM_AMD_DC_HDCP
2732         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2733 #else
2734         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2735 #endif
2736         if (!amdgpu_in_reset(adev))
2737                 mutex_unlock(&adev->dm.dc_lock);
2738
2739 out:
2740         if (result && !is_mst_root_connector) {
2741                 /* Downstream Port status changed. */
2742                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2743                         DRM_ERROR("KMS: Failed to detect connector\n");
2744
2745                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2746                         emulated_link_detect(dc_link);
2747
2748                         if (aconnector->fake_enable)
2749                                 aconnector->fake_enable = false;
2750
2751                         amdgpu_dm_update_connector_after_detect(aconnector);
2752
2753
2754                         drm_modeset_lock_all(dev);
2755                         dm_restore_drm_connector_state(dev, connector);
2756                         drm_modeset_unlock_all(dev);
2757
2758                         drm_kms_helper_hotplug_event(dev);
2759                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2760
2761                         if (aconnector->fake_enable)
2762                                 aconnector->fake_enable = false;
2763
2764                         amdgpu_dm_update_connector_after_detect(aconnector);
2765
2766
2767                         drm_modeset_lock_all(dev);
2768                         dm_restore_drm_connector_state(dev, connector);
2769                         drm_modeset_unlock_all(dev);
2770
2771                         drm_kms_helper_hotplug_event(dev);
2772                 }
2773         }
2774 #ifdef CONFIG_DRM_AMD_DC_HDCP
2775         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2776                 if (adev->dm.hdcp_workqueue)
2777                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2778         }
2779 #endif
2780
2781         if (dc_link->type != dc_connection_mst_branch) {
2782                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2783                 mutex_unlock(&aconnector->hpd_lock);
2784         }
2785 }
2786
2787 static void register_hpd_handlers(struct amdgpu_device *adev)
2788 {
2789         struct drm_device *dev = adev_to_drm(adev);
2790         struct drm_connector *connector;
2791         struct amdgpu_dm_connector *aconnector;
2792         const struct dc_link *dc_link;
2793         struct dc_interrupt_params int_params = {0};
2794
2795         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2796         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2797
2798         list_for_each_entry(connector,
2799                         &dev->mode_config.connector_list, head) {
2800
2801                 aconnector = to_amdgpu_dm_connector(connector);
2802                 dc_link = aconnector->dc_link;
2803
2804                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2805                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2806                         int_params.irq_source = dc_link->irq_source_hpd;
2807
2808                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2809                                         handle_hpd_irq,
2810                                         (void *) aconnector);
2811                 }
2812
2813                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2814
2815                         /* Also register for DP short pulse (hpd_rx). */
2816                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2817                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2818
2819                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820                                         handle_hpd_rx_irq,
2821                                         (void *) aconnector);
2822                 }
2823         }
2824 }
2825
2826 #if defined(CONFIG_DRM_AMD_DC_SI)
2827 /* Register IRQ sources and initialize IRQ callbacks */
2828 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2829 {
2830         struct dc *dc = adev->dm.dc;
2831         struct common_irq_params *c_irq_params;
2832         struct dc_interrupt_params int_params = {0};
2833         int r;
2834         int i;
2835         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2836
2837         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2838         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2839
2840         /*
2841          * Actions of amdgpu_irq_add_id():
2842          * 1. Register a set() function with base driver.
2843          *    Base driver will call set() function to enable/disable an
2844          *    interrupt in DC hardware.
2845          * 2. Register amdgpu_dm_irq_handler().
2846          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2847          *    coming from DC hardware.
2848          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2849          *    for acknowledging and handling. */
2850
2851         /* Use VBLANK interrupt */
2852         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2853                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2854                 if (r) {
2855                         DRM_ERROR("Failed to add crtc irq id!\n");
2856                         return r;
2857                 }
2858
2859                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2860                 int_params.irq_source =
2861                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2862
2863                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2864
2865                 c_irq_params->adev = adev;
2866                 c_irq_params->irq_src = int_params.irq_source;
2867
2868                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2869                                 dm_crtc_high_irq, c_irq_params);
2870         }
2871
2872         /* Use GRPH_PFLIP interrupt */
2873         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2874                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2875                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2876                 if (r) {
2877                         DRM_ERROR("Failed to add page flip irq id!\n");
2878                         return r;
2879                 }
2880
2881                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2882                 int_params.irq_source =
2883                         dc_interrupt_to_irq_source(dc, i, 0);
2884
2885                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2886
2887                 c_irq_params->adev = adev;
2888                 c_irq_params->irq_src = int_params.irq_source;
2889
2890                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2891                                 dm_pflip_high_irq, c_irq_params);
2892
2893         }
2894
2895         /* HPD */
2896         r = amdgpu_irq_add_id(adev, client_id,
2897                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2898         if (r) {
2899                 DRM_ERROR("Failed to add hpd irq id!\n");
2900                 return r;
2901         }
2902
2903         register_hpd_handlers(adev);
2904
2905         return 0;
2906 }
2907 #endif
2908
2909 /* Register IRQ sources and initialize IRQ callbacks */
2910 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2911 {
2912         struct dc *dc = adev->dm.dc;
2913         struct common_irq_params *c_irq_params;
2914         struct dc_interrupt_params int_params = {0};
2915         int r;
2916         int i;
2917         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2918
2919         if (adev->asic_type >= CHIP_VEGA10)
2920                 client_id = SOC15_IH_CLIENTID_DCE;
2921
2922         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2923         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2924
2925         /*
2926          * Actions of amdgpu_irq_add_id():
2927          * 1. Register a set() function with base driver.
2928          *    Base driver will call set() function to enable/disable an
2929          *    interrupt in DC hardware.
2930          * 2. Register amdgpu_dm_irq_handler().
2931          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2932          *    coming from DC hardware.
2933          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2934          *    for acknowledging and handling. */
2935
2936         /* Use VBLANK interrupt */
2937         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2938                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2939                 if (r) {
2940                         DRM_ERROR("Failed to add crtc irq id!\n");
2941                         return r;
2942                 }
2943
2944                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2945                 int_params.irq_source =
2946                         dc_interrupt_to_irq_source(dc, i, 0);
2947
2948                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2949
2950                 c_irq_params->adev = adev;
2951                 c_irq_params->irq_src = int_params.irq_source;
2952
2953                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2954                                 dm_crtc_high_irq, c_irq_params);
2955         }
2956
2957         /* Use VUPDATE interrupt */
2958         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2959                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2960                 if (r) {
2961                         DRM_ERROR("Failed to add vupdate irq id!\n");
2962                         return r;
2963                 }
2964
2965                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966                 int_params.irq_source =
2967                         dc_interrupt_to_irq_source(dc, i, 0);
2968
2969                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2970
2971                 c_irq_params->adev = adev;
2972                 c_irq_params->irq_src = int_params.irq_source;
2973
2974                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2975                                 dm_vupdate_high_irq, c_irq_params);
2976         }
2977
2978         /* Use GRPH_PFLIP interrupt */
2979         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2980                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2981                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2982                 if (r) {
2983                         DRM_ERROR("Failed to add page flip irq id!\n");
2984                         return r;
2985                 }
2986
2987                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2988                 int_params.irq_source =
2989                         dc_interrupt_to_irq_source(dc, i, 0);
2990
2991                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2992
2993                 c_irq_params->adev = adev;
2994                 c_irq_params->irq_src = int_params.irq_source;
2995
2996                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2997                                 dm_pflip_high_irq, c_irq_params);
2998
2999         }
3000
3001         /* HPD */
3002         r = amdgpu_irq_add_id(adev, client_id,
3003                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3004         if (r) {
3005                 DRM_ERROR("Failed to add hpd irq id!\n");
3006                 return r;
3007         }
3008
3009         register_hpd_handlers(adev);
3010
3011         return 0;
3012 }
3013
3014 #if defined(CONFIG_DRM_AMD_DC_DCN)
3015 /* Register IRQ sources and initialize IRQ callbacks */
3016 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3017 {
3018         struct dc *dc = adev->dm.dc;
3019         struct common_irq_params *c_irq_params;
3020         struct dc_interrupt_params int_params = {0};
3021         int r;
3022         int i;
3023 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3024         static const unsigned int vrtl_int_srcid[] = {
3025                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3026                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3027                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3028                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3029                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3030                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3031         };
3032 #endif
3033
3034         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3035         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3036
3037         /*
3038          * Actions of amdgpu_irq_add_id():
3039          * 1. Register a set() function with base driver.
3040          *    Base driver will call set() function to enable/disable an
3041          *    interrupt in DC hardware.
3042          * 2. Register amdgpu_dm_irq_handler().
3043          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3044          *    coming from DC hardware.
3045          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3046          *    for acknowledging and handling.
3047          */
3048
3049         /* Use VSTARTUP interrupt */
3050         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3051                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3052                         i++) {
3053                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3054
3055                 if (r) {
3056                         DRM_ERROR("Failed to add crtc irq id!\n");
3057                         return r;
3058                 }
3059
3060                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3061                 int_params.irq_source =
3062                         dc_interrupt_to_irq_source(dc, i, 0);
3063
3064                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3065
3066                 c_irq_params->adev = adev;
3067                 c_irq_params->irq_src = int_params.irq_source;
3068
3069                 amdgpu_dm_irq_register_interrupt(
3070                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3071         }
3072
3073         /* Use otg vertical line interrupt */
3074 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3075         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3076                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3077                                 vrtl_int_srcid[i], &adev->vline0_irq);
3078
3079                 if (r) {
3080                         DRM_ERROR("Failed to add vline0 irq id!\n");
3081                         return r;
3082                 }
3083
3084                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3085                 int_params.irq_source =
3086                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3087
3088                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3089                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3090                         break;
3091                 }
3092
3093                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3094                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3095
3096                 c_irq_params->adev = adev;
3097                 c_irq_params->irq_src = int_params.irq_source;
3098
3099                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3100                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3101         }
3102 #endif
3103
3104         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3105          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3106          * to trigger at end of each vblank, regardless of state of the lock,
3107          * matching DCE behaviour.
3108          */
3109         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3110              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3111              i++) {
3112                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3113
3114                 if (r) {
3115                         DRM_ERROR("Failed to add vupdate irq id!\n");
3116                         return r;
3117                 }
3118
3119                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3120                 int_params.irq_source =
3121                         dc_interrupt_to_irq_source(dc, i, 0);
3122
3123                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3124
3125                 c_irq_params->adev = adev;
3126                 c_irq_params->irq_src = int_params.irq_source;
3127
3128                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3129                                 dm_vupdate_high_irq, c_irq_params);
3130         }
3131
3132         /* Use GRPH_PFLIP interrupt */
3133         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3134                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3135                         i++) {
3136                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3137                 if (r) {
3138                         DRM_ERROR("Failed to add page flip irq id!\n");
3139                         return r;
3140                 }
3141
3142                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3143                 int_params.irq_source =
3144                         dc_interrupt_to_irq_source(dc, i, 0);
3145
3146                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3147
3148                 c_irq_params->adev = adev;
3149                 c_irq_params->irq_src = int_params.irq_source;
3150
3151                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3152                                 dm_pflip_high_irq, c_irq_params);
3153
3154         }
3155
3156         if (dc->ctx->dmub_srv) {
3157                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3158                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3159
3160                 if (r) {
3161                         DRM_ERROR("Failed to add dmub trace irq id!\n");
3162                         return r;
3163                 }
3164
3165                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3166                 int_params.irq_source =
3167                         dc_interrupt_to_irq_source(dc, i, 0);
3168
3169                 c_irq_params = &adev->dm.dmub_trace_params[0];
3170
3171                 c_irq_params->adev = adev;
3172                 c_irq_params->irq_src = int_params.irq_source;
3173
3174                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3175                                 dm_dmub_trace_high_irq, c_irq_params);
3176         }
3177
3178         /* HPD */
3179         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3180                         &adev->hpd_irq);
3181         if (r) {
3182                 DRM_ERROR("Failed to add hpd irq id!\n");
3183                 return r;
3184         }
3185
3186         register_hpd_handlers(adev);
3187
3188         return 0;
3189 }
3190 #endif
3191
3192 /*
3193  * Acquires the lock for the atomic state object and returns
3194  * the new atomic state.
3195  *
3196  * This should only be called during atomic check.
3197  */
3198 static int dm_atomic_get_state(struct drm_atomic_state *state,
3199                                struct dm_atomic_state **dm_state)
3200 {
3201         struct drm_device *dev = state->dev;
3202         struct amdgpu_device *adev = drm_to_adev(dev);
3203         struct amdgpu_display_manager *dm = &adev->dm;
3204         struct drm_private_state *priv_state;
3205
3206         if (*dm_state)
3207                 return 0;
3208
3209         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3210         if (IS_ERR(priv_state))
3211                 return PTR_ERR(priv_state);
3212
3213         *dm_state = to_dm_atomic_state(priv_state);
3214
3215         return 0;
3216 }
3217
3218 static struct dm_atomic_state *
3219 dm_atomic_get_new_state(struct drm_atomic_state *state)
3220 {
3221         struct drm_device *dev = state->dev;
3222         struct amdgpu_device *adev = drm_to_adev(dev);
3223         struct amdgpu_display_manager *dm = &adev->dm;
3224         struct drm_private_obj *obj;
3225         struct drm_private_state *new_obj_state;
3226         int i;
3227
3228         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3229                 if (obj->funcs == dm->atomic_obj.funcs)
3230                         return to_dm_atomic_state(new_obj_state);
3231         }
3232
3233         return NULL;
3234 }
3235
3236 static struct drm_private_state *
3237 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3238 {
3239         struct dm_atomic_state *old_state, *new_state;
3240
3241         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3242         if (!new_state)
3243                 return NULL;
3244
3245         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3246
3247         old_state = to_dm_atomic_state(obj->state);
3248
3249         if (old_state && old_state->context)
3250                 new_state->context = dc_copy_state(old_state->context);
3251
3252         if (!new_state->context) {
3253                 kfree(new_state);
3254                 return NULL;
3255         }
3256
3257         return &new_state->base;
3258 }
3259
3260 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3261                                     struct drm_private_state *state)
3262 {
3263         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3264
3265         if (dm_state && dm_state->context)
3266                 dc_release_state(dm_state->context);
3267
3268         kfree(dm_state);
3269 }
3270
3271 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3272         .atomic_duplicate_state = dm_atomic_duplicate_state,
3273         .atomic_destroy_state = dm_atomic_destroy_state,
3274 };
3275
3276 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3277 {
3278         struct dm_atomic_state *state;
3279         int r;
3280
3281         adev->mode_info.mode_config_initialized = true;
3282
3283         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3284         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3285
3286         adev_to_drm(adev)->mode_config.max_width = 16384;
3287         adev_to_drm(adev)->mode_config.max_height = 16384;
3288
3289         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3290         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3291         /* indicates support for immediate flip */
3292         adev_to_drm(adev)->mode_config.async_page_flip = true;
3293
3294         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3295
3296         state = kzalloc(sizeof(*state), GFP_KERNEL);
3297         if (!state)
3298                 return -ENOMEM;
3299
3300         state->context = dc_create_state(adev->dm.dc);
3301         if (!state->context) {
3302                 kfree(state);
3303                 return -ENOMEM;
3304         }
3305
3306         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3307
3308         drm_atomic_private_obj_init(adev_to_drm(adev),
3309                                     &adev->dm.atomic_obj,
3310                                     &state->base,
3311                                     &dm_atomic_state_funcs);
3312
3313         r = amdgpu_display_modeset_create_props(adev);
3314         if (r) {
3315                 dc_release_state(state->context);
3316                 kfree(state);
3317                 return r;
3318         }
3319
3320         r = amdgpu_dm_audio_init(adev);
3321         if (r) {
3322                 dc_release_state(state->context);
3323                 kfree(state);
3324                 return r;
3325         }
3326
3327         return 0;
3328 }
3329
3330 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3331 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3332 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3333
3334 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3335         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3336
3337 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3338 {
3339 #if defined(CONFIG_ACPI)
3340         struct amdgpu_dm_backlight_caps caps;
3341
3342         memset(&caps, 0, sizeof(caps));
3343
3344         if (dm->backlight_caps.caps_valid)
3345                 return;
3346
3347         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3348         if (caps.caps_valid) {
3349                 dm->backlight_caps.caps_valid = true;
3350                 if (caps.aux_support)
3351                         return;
3352                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3353                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3354         } else {
3355                 dm->backlight_caps.min_input_signal =
3356                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3357                 dm->backlight_caps.max_input_signal =
3358                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3359         }
3360 #else
3361         if (dm->backlight_caps.aux_support)
3362                 return;
3363
3364         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3365         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3366 #endif
3367 }
3368
3369 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3370                                 unsigned *min, unsigned *max)
3371 {
3372         if (!caps)
3373                 return 0;
3374
3375         if (caps->aux_support) {
3376                 // Firmware limits are in nits, DC API wants millinits.
3377                 *max = 1000 * caps->aux_max_input_signal;
3378                 *min = 1000 * caps->aux_min_input_signal;
3379         } else {
3380                 // Firmware limits are 8-bit, PWM control is 16-bit.
3381                 *max = 0x101 * caps->max_input_signal;
3382                 *min = 0x101 * caps->min_input_signal;
3383         }
3384         return 1;
3385 }
3386
3387 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3388                                         uint32_t brightness)
3389 {
3390         unsigned min, max;
3391
3392         if (!get_brightness_range(caps, &min, &max))
3393                 return brightness;
3394
3395         // Rescale 0..255 to min..max
3396         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3397                                        AMDGPU_MAX_BL_LEVEL);
3398 }
3399
3400 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3401                                       uint32_t brightness)
3402 {
3403         unsigned min, max;
3404
3405         if (!get_brightness_range(caps, &min, &max))
3406                 return brightness;
3407
3408         if (brightness < min)
3409                 return 0;
3410         // Rescale min..max to 0..255
3411         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3412                                  max - min);
3413 }
3414
3415 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3416 {
3417         struct amdgpu_display_manager *dm = bl_get_data(bd);
3418         struct amdgpu_dm_backlight_caps caps;
3419         struct dc_link *link = NULL;
3420         u32 brightness;
3421         bool rc;
3422
3423         amdgpu_dm_update_backlight_caps(dm);
3424         caps = dm->backlight_caps;
3425
3426         link = (struct dc_link *)dm->backlight_link;
3427
3428         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3429         // Change brightness based on AUX property
3430         if (caps.aux_support)
3431                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3432                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3433         else
3434                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3435
3436         return rc ? 0 : 1;
3437 }
3438
3439 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3440 {
3441         struct amdgpu_display_manager *dm = bl_get_data(bd);
3442         struct amdgpu_dm_backlight_caps caps;
3443
3444         amdgpu_dm_update_backlight_caps(dm);
3445         caps = dm->backlight_caps;
3446
3447         if (caps.aux_support) {
3448                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3449                 u32 avg, peak;
3450                 bool rc;
3451
3452                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3453                 if (!rc)
3454                         return bd->props.brightness;
3455                 return convert_brightness_to_user(&caps, avg);
3456         } else {
3457                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3458
3459                 if (ret == DC_ERROR_UNEXPECTED)
3460                         return bd->props.brightness;
3461                 return convert_brightness_to_user(&caps, ret);
3462         }
3463 }
3464
3465 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3466         .options = BL_CORE_SUSPENDRESUME,
3467         .get_brightness = amdgpu_dm_backlight_get_brightness,
3468         .update_status  = amdgpu_dm_backlight_update_status,
3469 };
3470
3471 static void
3472 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3473 {
3474         char bl_name[16];
3475         struct backlight_properties props = { 0 };
3476
3477         amdgpu_dm_update_backlight_caps(dm);
3478
3479         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3480         props.brightness = AMDGPU_MAX_BL_LEVEL;
3481         props.type = BACKLIGHT_RAW;
3482
3483         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3484                  adev_to_drm(dm->adev)->primary->index);
3485
3486         dm->backlight_dev = backlight_device_register(bl_name,
3487                                                       adev_to_drm(dm->adev)->dev,
3488                                                       dm,
3489                                                       &amdgpu_dm_backlight_ops,
3490                                                       &props);
3491
3492         if (IS_ERR(dm->backlight_dev))
3493                 DRM_ERROR("DM: Backlight registration failed!\n");
3494         else
3495                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3496 }
3497
3498 #endif
3499
3500 static int initialize_plane(struct amdgpu_display_manager *dm,
3501                             struct amdgpu_mode_info *mode_info, int plane_id,
3502                             enum drm_plane_type plane_type,
3503                             const struct dc_plane_cap *plane_cap)
3504 {
3505         struct drm_plane *plane;
3506         unsigned long possible_crtcs;
3507         int ret = 0;
3508
3509         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3510         if (!plane) {
3511                 DRM_ERROR("KMS: Failed to allocate plane\n");
3512                 return -ENOMEM;
3513         }
3514         plane->type = plane_type;
3515
3516         /*
3517          * HACK: IGT tests expect that the primary plane for a CRTC
3518          * can only have one possible CRTC. Only expose support for
3519          * any CRTC if they're not going to be used as a primary plane
3520          * for a CRTC - like overlay or underlay planes.
3521          */
3522         possible_crtcs = 1 << plane_id;
3523         if (plane_id >= dm->dc->caps.max_streams)
3524                 possible_crtcs = 0xff;
3525
3526         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3527
3528         if (ret) {
3529                 DRM_ERROR("KMS: Failed to initialize plane\n");
3530                 kfree(plane);
3531                 return ret;
3532         }
3533
3534         if (mode_info)
3535                 mode_info->planes[plane_id] = plane;
3536
3537         return ret;
3538 }
3539
3540
3541 static void register_backlight_device(struct amdgpu_display_manager *dm,
3542                                       struct dc_link *link)
3543 {
3544 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3545         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3546
3547         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3548             link->type != dc_connection_none) {
3549                 /*
3550                  * Event if registration failed, we should continue with
3551                  * DM initialization because not having a backlight control
3552                  * is better then a black screen.
3553                  */
3554                 amdgpu_dm_register_backlight_device(dm);
3555
3556                 if (dm->backlight_dev)
3557                         dm->backlight_link = link;
3558         }
3559 #endif
3560 }
3561
3562
3563 /*
3564  * In this architecture, the association
3565  * connector -> encoder -> crtc
3566  * id not really requried. The crtc and connector will hold the
3567  * display_index as an abstraction to use with DAL component
3568  *
3569  * Returns 0 on success
3570  */
3571 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3572 {
3573         struct amdgpu_display_manager *dm = &adev->dm;
3574         int32_t i;
3575         struct amdgpu_dm_connector *aconnector = NULL;
3576         struct amdgpu_encoder *aencoder = NULL;
3577         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3578         uint32_t link_cnt;
3579         int32_t primary_planes;
3580         enum dc_connection_type new_connection_type = dc_connection_none;
3581         const struct dc_plane_cap *plane;
3582
3583         dm->display_indexes_num = dm->dc->caps.max_streams;
3584         /* Update the actual used number of crtc */
3585         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3586
3587         link_cnt = dm->dc->caps.max_links;
3588         if (amdgpu_dm_mode_config_init(dm->adev)) {
3589                 DRM_ERROR("DM: Failed to initialize mode config\n");
3590                 return -EINVAL;
3591         }
3592
3593         /* There is one primary plane per CRTC */
3594         primary_planes = dm->dc->caps.max_streams;
3595         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3596
3597         /*
3598          * Initialize primary planes, implicit planes for legacy IOCTLS.
3599          * Order is reversed to match iteration order in atomic check.
3600          */
3601         for (i = (primary_planes - 1); i >= 0; i--) {
3602                 plane = &dm->dc->caps.planes[i];
3603
3604                 if (initialize_plane(dm, mode_info, i,
3605                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3606                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3607                         goto fail;
3608                 }
3609         }
3610
3611         /*
3612          * Initialize overlay planes, index starting after primary planes.
3613          * These planes have a higher DRM index than the primary planes since
3614          * they should be considered as having a higher z-order.
3615          * Order is reversed to match iteration order in atomic check.
3616          *
3617          * Only support DCN for now, and only expose one so we don't encourage
3618          * userspace to use up all the pipes.
3619          */
3620         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3621                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3622
3623                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3624                         continue;
3625
3626                 if (!plane->blends_with_above || !plane->blends_with_below)
3627                         continue;
3628
3629                 if (!plane->pixel_format_support.argb8888)
3630                         continue;
3631
3632                 if (initialize_plane(dm, NULL, primary_planes + i,
3633                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3634                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3635                         goto fail;
3636                 }
3637
3638                 /* Only create one overlay plane. */
3639                 break;
3640         }
3641
3642         for (i = 0; i < dm->dc->caps.max_streams; i++)
3643                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3644                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3645                         goto fail;
3646                 }
3647
3648         /* loops over all connectors on the board */
3649         for (i = 0; i < link_cnt; i++) {
3650                 struct dc_link *link = NULL;
3651
3652                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3653                         DRM_ERROR(
3654                                 "KMS: Cannot support more than %d display indexes\n",
3655                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3656                         continue;
3657                 }
3658
3659                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3660                 if (!aconnector)
3661                         goto fail;
3662
3663                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3664                 if (!aencoder)
3665                         goto fail;
3666
3667                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3668                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3669                         goto fail;
3670                 }
3671
3672                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3673                         DRM_ERROR("KMS: Failed to initialize connector\n");
3674                         goto fail;
3675                 }
3676
3677                 link = dc_get_link_at_index(dm->dc, i);
3678
3679                 if (!dc_link_detect_sink(link, &new_connection_type))
3680                         DRM_ERROR("KMS: Failed to detect connector\n");
3681
3682                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3683                         emulated_link_detect(link);
3684                         amdgpu_dm_update_connector_after_detect(aconnector);
3685
3686                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3687                         amdgpu_dm_update_connector_after_detect(aconnector);
3688                         register_backlight_device(dm, link);
3689                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3690                                 amdgpu_dm_set_psr_caps(link);
3691                 }
3692
3693
3694         }
3695
3696         /* Software is initialized. Now we can register interrupt handlers. */
3697         switch (adev->asic_type) {
3698 #if defined(CONFIG_DRM_AMD_DC_SI)
3699         case CHIP_TAHITI:
3700         case CHIP_PITCAIRN:
3701         case CHIP_VERDE:
3702         case CHIP_OLAND:
3703                 if (dce60_register_irq_handlers(dm->adev)) {
3704                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3705                         goto fail;
3706                 }
3707                 break;
3708 #endif
3709         case CHIP_BONAIRE:
3710         case CHIP_HAWAII:
3711         case CHIP_KAVERI:
3712         case CHIP_KABINI:
3713         case CHIP_MULLINS:
3714         case CHIP_TONGA:
3715         case CHIP_FIJI:
3716         case CHIP_CARRIZO:
3717         case CHIP_STONEY:
3718         case CHIP_POLARIS11:
3719         case CHIP_POLARIS10:
3720         case CHIP_POLARIS12:
3721         case CHIP_VEGAM:
3722         case CHIP_VEGA10:
3723         case CHIP_VEGA12:
3724         case CHIP_VEGA20:
3725                 if (dce110_register_irq_handlers(dm->adev)) {
3726                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3727                         goto fail;
3728                 }
3729                 break;
3730 #if defined(CONFIG_DRM_AMD_DC_DCN)
3731         case CHIP_RAVEN:
3732         case CHIP_NAVI12:
3733         case CHIP_NAVI10:
3734         case CHIP_NAVI14:
3735         case CHIP_RENOIR:
3736         case CHIP_SIENNA_CICHLID:
3737         case CHIP_NAVY_FLOUNDER:
3738         case CHIP_DIMGREY_CAVEFISH:
3739         case CHIP_VANGOGH:
3740                 if (dcn10_register_irq_handlers(dm->adev)) {
3741                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3742                         goto fail;
3743                 }
3744                 break;
3745 #endif
3746         default:
3747                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3748                 goto fail;
3749         }
3750
3751         return 0;
3752 fail:
3753         kfree(aencoder);
3754         kfree(aconnector);
3755
3756         return -EINVAL;
3757 }
3758
3759 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3760 {
3761         drm_mode_config_cleanup(dm->ddev);
3762         drm_atomic_private_obj_fini(&dm->atomic_obj);
3763         return;
3764 }
3765
3766 /******************************************************************************
3767  * amdgpu_display_funcs functions
3768  *****************************************************************************/
3769
3770 /*
3771  * dm_bandwidth_update - program display watermarks
3772  *
3773  * @adev: amdgpu_device pointer
3774  *
3775  * Calculate and program the display watermarks and line buffer allocation.
3776  */
3777 static void dm_bandwidth_update(struct amdgpu_device *adev)
3778 {
3779         /* TODO: implement later */
3780 }
3781
3782 static const struct amdgpu_display_funcs dm_display_funcs = {
3783         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3784         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3785         .backlight_set_level = NULL, /* never called for DC */
3786         .backlight_get_level = NULL, /* never called for DC */
3787         .hpd_sense = NULL,/* called unconditionally */
3788         .hpd_set_polarity = NULL, /* called unconditionally */
3789         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3790         .page_flip_get_scanoutpos =
3791                 dm_crtc_get_scanoutpos,/* called unconditionally */
3792         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3793         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3794 };
3795
3796 #if defined(CONFIG_DEBUG_KERNEL_DC)
3797
3798 static ssize_t s3_debug_store(struct device *device,
3799                               struct device_attribute *attr,
3800                               const char *buf,
3801                               size_t count)
3802 {
3803         int ret;
3804         int s3_state;
3805         struct drm_device *drm_dev = dev_get_drvdata(device);
3806         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3807
3808         ret = kstrtoint(buf, 0, &s3_state);
3809
3810         if (ret == 0) {
3811                 if (s3_state) {
3812                         dm_resume(adev);
3813                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3814                 } else
3815                         dm_suspend(adev);
3816         }
3817
3818         return ret == 0 ? count : 0;
3819 }
3820
3821 DEVICE_ATTR_WO(s3_debug);
3822
3823 #endif
3824
3825 static int dm_early_init(void *handle)
3826 {
3827         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3828
3829         switch (adev->asic_type) {
3830 #if defined(CONFIG_DRM_AMD_DC_SI)
3831         case CHIP_TAHITI:
3832         case CHIP_PITCAIRN:
3833         case CHIP_VERDE:
3834                 adev->mode_info.num_crtc = 6;
3835                 adev->mode_info.num_hpd = 6;
3836                 adev->mode_info.num_dig = 6;
3837                 break;
3838         case CHIP_OLAND:
3839                 adev->mode_info.num_crtc = 2;
3840                 adev->mode_info.num_hpd = 2;
3841                 adev->mode_info.num_dig = 2;
3842                 break;
3843 #endif
3844         case CHIP_BONAIRE:
3845         case CHIP_HAWAII:
3846                 adev->mode_info.num_crtc = 6;
3847                 adev->mode_info.num_hpd = 6;
3848                 adev->mode_info.num_dig = 6;
3849                 break;
3850         case CHIP_KAVERI:
3851                 adev->mode_info.num_crtc = 4;
3852                 adev->mode_info.num_hpd = 6;
3853                 adev->mode_info.num_dig = 7;
3854                 break;
3855         case CHIP_KABINI:
3856         case CHIP_MULLINS:
3857                 adev->mode_info.num_crtc = 2;
3858                 adev->mode_info.num_hpd = 6;
3859                 adev->mode_info.num_dig = 6;
3860                 break;
3861         case CHIP_FIJI:
3862         case CHIP_TONGA:
3863                 adev->mode_info.num_crtc = 6;
3864                 adev->mode_info.num_hpd = 6;
3865                 adev->mode_info.num_dig = 7;
3866                 break;
3867         case CHIP_CARRIZO:
3868                 adev->mode_info.num_crtc = 3;
3869                 adev->mode_info.num_hpd = 6;
3870                 adev->mode_info.num_dig = 9;
3871                 break;
3872         case CHIP_STONEY:
3873                 adev->mode_info.num_crtc = 2;
3874                 adev->mode_info.num_hpd = 6;
3875                 adev->mode_info.num_dig = 9;
3876                 break;
3877         case CHIP_POLARIS11:
3878         case CHIP_POLARIS12:
3879                 adev->mode_info.num_crtc = 5;
3880                 adev->mode_info.num_hpd = 5;
3881                 adev->mode_info.num_dig = 5;
3882                 break;
3883         case CHIP_POLARIS10:
3884         case CHIP_VEGAM:
3885                 adev->mode_info.num_crtc = 6;
3886                 adev->mode_info.num_hpd = 6;
3887                 adev->mode_info.num_dig = 6;
3888                 break;
3889         case CHIP_VEGA10:
3890         case CHIP_VEGA12:
3891         case CHIP_VEGA20:
3892                 adev->mode_info.num_crtc = 6;
3893                 adev->mode_info.num_hpd = 6;
3894                 adev->mode_info.num_dig = 6;
3895                 break;
3896 #if defined(CONFIG_DRM_AMD_DC_DCN)
3897         case CHIP_RAVEN:
3898         case CHIP_RENOIR:
3899         case CHIP_VANGOGH:
3900                 adev->mode_info.num_crtc = 4;
3901                 adev->mode_info.num_hpd = 4;
3902                 adev->mode_info.num_dig = 4;
3903                 break;
3904         case CHIP_NAVI10:
3905         case CHIP_NAVI12:
3906         case CHIP_SIENNA_CICHLID:
3907         case CHIP_NAVY_FLOUNDER:
3908                 adev->mode_info.num_crtc = 6;
3909                 adev->mode_info.num_hpd = 6;
3910                 adev->mode_info.num_dig = 6;
3911                 break;
3912         case CHIP_NAVI14:
3913         case CHIP_DIMGREY_CAVEFISH:
3914                 adev->mode_info.num_crtc = 5;
3915                 adev->mode_info.num_hpd = 5;
3916                 adev->mode_info.num_dig = 5;
3917                 break;
3918 #endif
3919         default:
3920                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3921                 return -EINVAL;
3922         }
3923
3924         amdgpu_dm_set_irq_funcs(adev);
3925
3926         if (adev->mode_info.funcs == NULL)
3927                 adev->mode_info.funcs = &dm_display_funcs;
3928
3929         /*
3930          * Note: Do NOT change adev->audio_endpt_rreg and
3931          * adev->audio_endpt_wreg because they are initialised in
3932          * amdgpu_device_init()
3933          */
3934 #if defined(CONFIG_DEBUG_KERNEL_DC)
3935         device_create_file(
3936                 adev_to_drm(adev)->dev,
3937                 &dev_attr_s3_debug);
3938 #endif
3939
3940         return 0;
3941 }
3942
3943 static bool modeset_required(struct drm_crtc_state *crtc_state,
3944                              struct dc_stream_state *new_stream,
3945                              struct dc_stream_state *old_stream)
3946 {
3947         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3948 }
3949
3950 static bool modereset_required(struct drm_crtc_state *crtc_state)
3951 {
3952         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3953 }
3954
3955 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3956 {
3957         drm_encoder_cleanup(encoder);
3958         kfree(encoder);
3959 }
3960
3961 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3962         .destroy = amdgpu_dm_encoder_destroy,
3963 };
3964
3965
3966 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3967                                          struct drm_framebuffer *fb,
3968                                          int *min_downscale, int *max_upscale)
3969 {
3970         struct amdgpu_device *adev = drm_to_adev(dev);
3971         struct dc *dc = adev->dm.dc;
3972         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3973         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3974
3975         switch (fb->format->format) {
3976         case DRM_FORMAT_P010:
3977         case DRM_FORMAT_NV12:
3978         case DRM_FORMAT_NV21:
3979                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3980                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3981                 break;
3982
3983         case DRM_FORMAT_XRGB16161616F:
3984         case DRM_FORMAT_ARGB16161616F:
3985         case DRM_FORMAT_XBGR16161616F:
3986         case DRM_FORMAT_ABGR16161616F:
3987                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3988                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3989                 break;
3990
3991         default:
3992                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3993                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3994                 break;
3995         }
3996
3997         /*
3998          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3999          * scaling factor of 1.0 == 1000 units.
4000          */
4001         if (*max_upscale == 1)
4002                 *max_upscale = 1000;
4003
4004         if (*min_downscale == 1)
4005                 *min_downscale = 1000;
4006 }
4007
4008
4009 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4010                                 struct dc_scaling_info *scaling_info)
4011 {
4012         int scale_w, scale_h, min_downscale, max_upscale;
4013
4014         memset(scaling_info, 0, sizeof(*scaling_info));
4015
4016         /* Source is fixed 16.16 but we ignore mantissa for now... */
4017         scaling_info->src_rect.x = state->src_x >> 16;
4018         scaling_info->src_rect.y = state->src_y >> 16;
4019
4020         /*
4021          * For reasons we don't (yet) fully understand a non-zero
4022          * src_y coordinate into an NV12 buffer can cause a
4023          * system hang. To avoid hangs (and maybe be overly cautious)
4024          * let's reject both non-zero src_x and src_y.
4025          *
4026          * We currently know of only one use-case to reproduce a
4027          * scenario with non-zero src_x and src_y for NV12, which
4028          * is to gesture the YouTube Android app into full screen
4029          * on ChromeOS.
4030          */
4031         if (state->fb &&
4032             state->fb->format->format == DRM_FORMAT_NV12 &&
4033             (scaling_info->src_rect.x != 0 ||
4034              scaling_info->src_rect.y != 0))
4035                 return -EINVAL;
4036
4037         scaling_info->src_rect.width = state->src_w >> 16;
4038         if (scaling_info->src_rect.width == 0)
4039                 return -EINVAL;
4040
4041         scaling_info->src_rect.height = state->src_h >> 16;
4042         if (scaling_info->src_rect.height == 0)
4043                 return -EINVAL;
4044
4045         scaling_info->dst_rect.x = state->crtc_x;
4046         scaling_info->dst_rect.y = state->crtc_y;
4047
4048         if (state->crtc_w == 0)
4049                 return -EINVAL;
4050
4051         scaling_info->dst_rect.width = state->crtc_w;
4052
4053         if (state->crtc_h == 0)
4054                 return -EINVAL;
4055
4056         scaling_info->dst_rect.height = state->crtc_h;
4057
4058         /* DRM doesn't specify clipping on destination output. */
4059         scaling_info->clip_rect = scaling_info->dst_rect;
4060
4061         /* Validate scaling per-format with DC plane caps */
4062         if (state->plane && state->plane->dev && state->fb) {
4063                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4064                                              &min_downscale, &max_upscale);
4065         } else {
4066                 min_downscale = 250;
4067                 max_upscale = 16000;
4068         }
4069
4070         scale_w = scaling_info->dst_rect.width * 1000 /
4071                   scaling_info->src_rect.width;
4072
4073         if (scale_w < min_downscale || scale_w > max_upscale)
4074                 return -EINVAL;
4075
4076         scale_h = scaling_info->dst_rect.height * 1000 /
4077                   scaling_info->src_rect.height;
4078
4079         if (scale_h < min_downscale || scale_h > max_upscale)
4080                 return -EINVAL;
4081
4082         /*
4083          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4084          * assume reasonable defaults based on the format.
4085          */
4086
4087         return 0;
4088 }
4089
4090 static void
4091 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4092                                  uint64_t tiling_flags)
4093 {
4094         /* Fill GFX8 params */
4095         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4096                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4097
4098                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4099                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4100                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4101                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4102                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4103
4104                 /* XXX fix me for VI */
4105                 tiling_info->gfx8.num_banks = num_banks;
4106                 tiling_info->gfx8.array_mode =
4107                                 DC_ARRAY_2D_TILED_THIN1;
4108                 tiling_info->gfx8.tile_split = tile_split;
4109                 tiling_info->gfx8.bank_width = bankw;
4110                 tiling_info->gfx8.bank_height = bankh;
4111                 tiling_info->gfx8.tile_aspect = mtaspect;
4112                 tiling_info->gfx8.tile_mode =
4113                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4114         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4115                         == DC_ARRAY_1D_TILED_THIN1) {
4116                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4117         }
4118
4119         tiling_info->gfx8.pipe_config =
4120                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4121 }
4122
4123 static void
4124 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4125                                   union dc_tiling_info *tiling_info)
4126 {
4127         tiling_info->gfx9.num_pipes =
4128                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4129         tiling_info->gfx9.num_banks =
4130                 adev->gfx.config.gb_addr_config_fields.num_banks;
4131         tiling_info->gfx9.pipe_interleave =
4132                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4133         tiling_info->gfx9.num_shader_engines =
4134                 adev->gfx.config.gb_addr_config_fields.num_se;
4135         tiling_info->gfx9.max_compressed_frags =
4136                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4137         tiling_info->gfx9.num_rb_per_se =
4138                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4139         tiling_info->gfx9.shaderEnable = 1;
4140         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4141             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4142             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4143             adev->asic_type == CHIP_VANGOGH)
4144                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4145 }
4146
4147 static int
4148 validate_dcc(struct amdgpu_device *adev,
4149              const enum surface_pixel_format format,
4150              const enum dc_rotation_angle rotation,
4151              const union dc_tiling_info *tiling_info,
4152              const struct dc_plane_dcc_param *dcc,
4153              const struct dc_plane_address *address,
4154              const struct plane_size *plane_size)
4155 {
4156         struct dc *dc = adev->dm.dc;
4157         struct dc_dcc_surface_param input;
4158         struct dc_surface_dcc_cap output;
4159
4160         memset(&input, 0, sizeof(input));
4161         memset(&output, 0, sizeof(output));
4162
4163         if (!dcc->enable)
4164                 return 0;
4165
4166         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4167             !dc->cap_funcs.get_dcc_compression_cap)
4168                 return -EINVAL;
4169
4170         input.format = format;
4171         input.surface_size.width = plane_size->surface_size.width;
4172         input.surface_size.height = plane_size->surface_size.height;
4173         input.swizzle_mode = tiling_info->gfx9.swizzle;
4174
4175         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4176                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4177         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4178                 input.scan = SCAN_DIRECTION_VERTICAL;
4179
4180         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4181                 return -EINVAL;
4182
4183         if (!output.capable)
4184                 return -EINVAL;
4185
4186         if (dcc->independent_64b_blks == 0 &&
4187             output.grph.rgb.independent_64b_blks != 0)
4188                 return -EINVAL;
4189
4190         return 0;
4191 }
4192
4193 static bool
4194 modifier_has_dcc(uint64_t modifier)
4195 {
4196         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4197 }
4198
4199 static unsigned
4200 modifier_gfx9_swizzle_mode(uint64_t modifier)
4201 {
4202         if (modifier == DRM_FORMAT_MOD_LINEAR)
4203                 return 0;
4204
4205         return AMD_FMT_MOD_GET(TILE, modifier);
4206 }
4207
4208 static const struct drm_format_info *
4209 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4210 {
4211         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4212 }
4213
4214 static void
4215 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4216                                     union dc_tiling_info *tiling_info,
4217                                     uint64_t modifier)
4218 {
4219         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4220         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4221         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4222         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4223
4224         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4225
4226         if (!IS_AMD_FMT_MOD(modifier))
4227                 return;
4228
4229         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4230         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4231
4232         if (adev->family >= AMDGPU_FAMILY_NV) {
4233                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4234         } else {
4235                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4236
4237                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4238         }
4239 }
4240
4241 enum dm_micro_swizzle {
4242         MICRO_SWIZZLE_Z = 0,
4243         MICRO_SWIZZLE_S = 1,
4244         MICRO_SWIZZLE_D = 2,
4245         MICRO_SWIZZLE_R = 3
4246 };
4247
4248 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4249                                           uint32_t format,
4250                                           uint64_t modifier)
4251 {
4252         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4253         const struct drm_format_info *info = drm_format_info(format);
4254         int i;
4255
4256         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4257
4258         if (!info)
4259                 return false;
4260
4261         /*
4262          * We always have to allow these modifiers:
4263          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4264          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4265          */
4266         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4267             modifier == DRM_FORMAT_MOD_INVALID) {
4268                 return true;
4269         }
4270
4271         /* Check that the modifier is on the list of the plane's supported modifiers. */
4272         for (i = 0; i < plane->modifier_count; i++) {
4273                 if (modifier == plane->modifiers[i])
4274                         break;
4275         }
4276         if (i == plane->modifier_count)
4277                 return false;
4278
4279         /*
4280          * For D swizzle the canonical modifier depends on the bpp, so check
4281          * it here.
4282          */
4283         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4284             adev->family >= AMDGPU_FAMILY_NV) {
4285                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4286                         return false;
4287         }
4288
4289         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4290             info->cpp[0] < 8)
4291                 return false;
4292
4293         if (modifier_has_dcc(modifier)) {
4294                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4295                 if (info->cpp[0] != 4)
4296                         return false;
4297                 /* We support multi-planar formats, but not when combined with
4298                  * additional DCC metadata planes. */
4299                 if (info->num_planes > 1)
4300                         return false;
4301         }
4302
4303         return true;
4304 }
4305
4306 static void
4307 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4308 {
4309         if (!*mods)
4310                 return;
4311
4312         if (*cap - *size < 1) {
4313                 uint64_t new_cap = *cap * 2;
4314                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4315
4316                 if (!new_mods) {
4317                         kfree(*mods);
4318                         *mods = NULL;
4319                         return;
4320                 }
4321
4322                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4323                 kfree(*mods);
4324                 *mods = new_mods;
4325                 *cap = new_cap;
4326         }
4327
4328         (*mods)[*size] = mod;
4329         *size += 1;
4330 }
4331
4332 static void
4333 add_gfx9_modifiers(const struct amdgpu_device *adev,
4334                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4335 {
4336         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4337         int pipe_xor_bits = min(8, pipes +
4338                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4339         int bank_xor_bits = min(8 - pipe_xor_bits,
4340                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4341         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4342                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4343
4344
4345         if (adev->family == AMDGPU_FAMILY_RV) {
4346                 /* Raven2 and later */
4347                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4348
4349                 /*
4350                  * No _D DCC swizzles yet because we only allow 32bpp, which
4351                  * doesn't support _D on DCN
4352                  */
4353
4354                 if (has_constant_encode) {
4355                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4356                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4357                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4358                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4359                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4360                                     AMD_FMT_MOD_SET(DCC, 1) |
4361                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4362                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4363                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4364                 }
4365
4366                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4367                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4368                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4369                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4370                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4371                             AMD_FMT_MOD_SET(DCC, 1) |
4372                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4373                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4374                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4375
4376                 if (has_constant_encode) {
4377                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4378                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4379                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4380                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4381                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4382                                     AMD_FMT_MOD_SET(DCC, 1) |
4383                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4384                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4385                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4386
4387                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4388                                     AMD_FMT_MOD_SET(RB, rb) |
4389                                     AMD_FMT_MOD_SET(PIPE, pipes));
4390                 }
4391
4392                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4393                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4394                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4395                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4396                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4397                             AMD_FMT_MOD_SET(DCC, 1) |
4398                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4399                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4400                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4401                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4402                             AMD_FMT_MOD_SET(RB, rb) |
4403                             AMD_FMT_MOD_SET(PIPE, pipes));
4404         }
4405
4406         /*
4407          * Only supported for 64bpp on Raven, will be filtered on format in
4408          * dm_plane_format_mod_supported.
4409          */
4410         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4412                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4413                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4414                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4415
4416         if (adev->family == AMDGPU_FAMILY_RV) {
4417                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4418                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4419                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4420                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4421                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4422         }
4423
4424         /*
4425          * Only supported for 64bpp on Raven, will be filtered on format in
4426          * dm_plane_format_mod_supported.
4427          */
4428         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4429                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4430                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4431
4432         if (adev->family == AMDGPU_FAMILY_RV) {
4433                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4434                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4435                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4436         }
4437 }
4438
4439 static void
4440 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4441                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4442 {
4443         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4444
4445         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4447                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4448                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4449                     AMD_FMT_MOD_SET(DCC, 1) |
4450                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4451                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4452                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4453
4454         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4455                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4456                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4457                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4458                     AMD_FMT_MOD_SET(DCC, 1) |
4459                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4460                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4461                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4462                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4463
4464         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4465                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4466                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4467                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4468
4469         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4470                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4471                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4472                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4473
4474
4475         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4476         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4477                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4478                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4479
4480         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4481                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4482                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4483 }
4484
4485 static void
4486 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4487                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4488 {
4489         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4490         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4491
4492         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4493                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4494                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4495                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4496                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4497                     AMD_FMT_MOD_SET(DCC, 1) |
4498                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4499                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4500                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4501                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4502
4503         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4504                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4505                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4506                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4507                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4508                     AMD_FMT_MOD_SET(DCC, 1) |
4509                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4510                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4511                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4512                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4513                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4514
4515         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4516                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4517                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4518                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4519                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4520
4521         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4522                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4523                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4524                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4525                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4526
4527         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4528         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4529                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4530                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4531
4532         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4533                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4534                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4535 }
4536
4537 static int
4538 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4539 {
4540         uint64_t size = 0, capacity = 128;
4541         *mods = NULL;
4542
4543         /* We have not hooked up any pre-GFX9 modifiers. */
4544         if (adev->family < AMDGPU_FAMILY_AI)
4545                 return 0;
4546
4547         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4548
4549         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4550                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4551                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4552                 return *mods ? 0 : -ENOMEM;
4553         }
4554
4555         switch (adev->family) {
4556         case AMDGPU_FAMILY_AI:
4557         case AMDGPU_FAMILY_RV:
4558                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4559                 break;
4560         case AMDGPU_FAMILY_NV:
4561         case AMDGPU_FAMILY_VGH:
4562                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4563                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4564                 else
4565                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4566                 break;
4567         }
4568
4569         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4570
4571         /* INVALID marks the end of the list. */
4572         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4573
4574         if (!*mods)
4575                 return -ENOMEM;
4576
4577         return 0;
4578 }
4579
4580 static int
4581 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4582                                           const struct amdgpu_framebuffer *afb,
4583                                           const enum surface_pixel_format format,
4584                                           const enum dc_rotation_angle rotation,
4585                                           const struct plane_size *plane_size,
4586                                           union dc_tiling_info *tiling_info,
4587                                           struct dc_plane_dcc_param *dcc,
4588                                           struct dc_plane_address *address,
4589                                           const bool force_disable_dcc)
4590 {
4591         const uint64_t modifier = afb->base.modifier;
4592         int ret;
4593
4594         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4595         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4596
4597         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4598                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4599
4600                 dcc->enable = 1;
4601                 dcc->meta_pitch = afb->base.pitches[1];
4602                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4603
4604                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4605                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4606         }
4607
4608         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4609         if (ret)
4610                 return ret;
4611
4612         return 0;
4613 }
4614
4615 static int
4616 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4617                              const struct amdgpu_framebuffer *afb,
4618                              const enum surface_pixel_format format,
4619                              const enum dc_rotation_angle rotation,
4620                              const uint64_t tiling_flags,
4621                              union dc_tiling_info *tiling_info,
4622                              struct plane_size *plane_size,
4623                              struct dc_plane_dcc_param *dcc,
4624                              struct dc_plane_address *address,
4625                              bool tmz_surface,
4626                              bool force_disable_dcc)
4627 {
4628         const struct drm_framebuffer *fb = &afb->base;
4629         int ret;
4630
4631         memset(tiling_info, 0, sizeof(*tiling_info));
4632         memset(plane_size, 0, sizeof(*plane_size));
4633         memset(dcc, 0, sizeof(*dcc));
4634         memset(address, 0, sizeof(*address));
4635
4636         address->tmz_surface = tmz_surface;
4637
4638         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4639                 uint64_t addr = afb->address + fb->offsets[0];
4640
4641                 plane_size->surface_size.x = 0;
4642                 plane_size->surface_size.y = 0;
4643                 plane_size->surface_size.width = fb->width;
4644                 plane_size->surface_size.height = fb->height;
4645                 plane_size->surface_pitch =
4646                         fb->pitches[0] / fb->format->cpp[0];
4647
4648                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4649                 address->grph.addr.low_part = lower_32_bits(addr);
4650                 address->grph.addr.high_part = upper_32_bits(addr);
4651         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4652                 uint64_t luma_addr = afb->address + fb->offsets[0];
4653                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4654
4655                 plane_size->surface_size.x = 0;
4656                 plane_size->surface_size.y = 0;
4657                 plane_size->surface_size.width = fb->width;
4658                 plane_size->surface_size.height = fb->height;
4659                 plane_size->surface_pitch =
4660                         fb->pitches[0] / fb->format->cpp[0];
4661
4662                 plane_size->chroma_size.x = 0;
4663                 plane_size->chroma_size.y = 0;
4664                 /* TODO: set these based on surface format */
4665                 plane_size->chroma_size.width = fb->width / 2;
4666                 plane_size->chroma_size.height = fb->height / 2;
4667
4668                 plane_size->chroma_pitch =
4669                         fb->pitches[1] / fb->format->cpp[1];
4670
4671                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4672                 address->video_progressive.luma_addr.low_part =
4673                         lower_32_bits(luma_addr);
4674                 address->video_progressive.luma_addr.high_part =
4675                         upper_32_bits(luma_addr);
4676                 address->video_progressive.chroma_addr.low_part =
4677                         lower_32_bits(chroma_addr);
4678                 address->video_progressive.chroma_addr.high_part =
4679                         upper_32_bits(chroma_addr);
4680         }
4681
4682         if (adev->family >= AMDGPU_FAMILY_AI) {
4683                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4684                                                                 rotation, plane_size,
4685                                                                 tiling_info, dcc,
4686                                                                 address,
4687                                                                 force_disable_dcc);
4688                 if (ret)
4689                         return ret;
4690         } else {
4691                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4692         }
4693
4694         return 0;
4695 }
4696
4697 static void
4698 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4699                                bool *per_pixel_alpha, bool *global_alpha,
4700                                int *global_alpha_value)
4701 {
4702         *per_pixel_alpha = false;
4703         *global_alpha = false;
4704         *global_alpha_value = 0xff;
4705
4706         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4707                 return;
4708
4709         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4710                 static const uint32_t alpha_formats[] = {
4711                         DRM_FORMAT_ARGB8888,
4712                         DRM_FORMAT_RGBA8888,
4713                         DRM_FORMAT_ABGR8888,
4714                 };
4715                 uint32_t format = plane_state->fb->format->format;
4716                 unsigned int i;
4717
4718                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4719                         if (format == alpha_formats[i]) {
4720                                 *per_pixel_alpha = true;
4721                                 break;
4722                         }
4723                 }
4724         }
4725
4726         if (plane_state->alpha < 0xffff) {
4727                 *global_alpha = true;
4728                 *global_alpha_value = plane_state->alpha >> 8;
4729         }
4730 }
4731
4732 static int
4733 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4734                             const enum surface_pixel_format format,
4735                             enum dc_color_space *color_space)
4736 {
4737         bool full_range;
4738
4739         *color_space = COLOR_SPACE_SRGB;
4740
4741         /* DRM color properties only affect non-RGB formats. */
4742         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4743                 return 0;
4744
4745         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4746
4747         switch (plane_state->color_encoding) {
4748         case DRM_COLOR_YCBCR_BT601:
4749                 if (full_range)
4750                         *color_space = COLOR_SPACE_YCBCR601;
4751                 else
4752                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4753                 break;
4754
4755         case DRM_COLOR_YCBCR_BT709:
4756                 if (full_range)
4757                         *color_space = COLOR_SPACE_YCBCR709;
4758                 else
4759                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4760                 break;
4761
4762         case DRM_COLOR_YCBCR_BT2020:
4763                 if (full_range)
4764                         *color_space = COLOR_SPACE_2020_YCBCR;
4765                 else
4766                         return -EINVAL;
4767                 break;
4768
4769         default:
4770                 return -EINVAL;
4771         }
4772
4773         return 0;
4774 }
4775
4776 static int
4777 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4778                             const struct drm_plane_state *plane_state,
4779                             const uint64_t tiling_flags,
4780                             struct dc_plane_info *plane_info,
4781                             struct dc_plane_address *address,
4782                             bool tmz_surface,
4783                             bool force_disable_dcc)
4784 {
4785         const struct drm_framebuffer *fb = plane_state->fb;
4786         const struct amdgpu_framebuffer *afb =
4787                 to_amdgpu_framebuffer(plane_state->fb);
4788         int ret;
4789
4790         memset(plane_info, 0, sizeof(*plane_info));
4791
4792         switch (fb->format->format) {
4793         case DRM_FORMAT_C8:
4794                 plane_info->format =
4795                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4796                 break;
4797         case DRM_FORMAT_RGB565:
4798                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4799                 break;
4800         case DRM_FORMAT_XRGB8888:
4801         case DRM_FORMAT_ARGB8888:
4802                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4803                 break;
4804         case DRM_FORMAT_XRGB2101010:
4805         case DRM_FORMAT_ARGB2101010:
4806                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4807                 break;
4808         case DRM_FORMAT_XBGR2101010:
4809         case DRM_FORMAT_ABGR2101010:
4810                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4811                 break;
4812         case DRM_FORMAT_XBGR8888:
4813         case DRM_FORMAT_ABGR8888:
4814                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4815                 break;
4816         case DRM_FORMAT_NV21:
4817                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4818                 break;
4819         case DRM_FORMAT_NV12:
4820                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4821                 break;
4822         case DRM_FORMAT_P010:
4823                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4824                 break;
4825         case DRM_FORMAT_XRGB16161616F:
4826         case DRM_FORMAT_ARGB16161616F:
4827                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4828                 break;
4829         case DRM_FORMAT_XBGR16161616F:
4830         case DRM_FORMAT_ABGR16161616F:
4831                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4832                 break;
4833         default:
4834                 DRM_ERROR(
4835                         "Unsupported screen format %p4cc\n",
4836                         &fb->format->format);
4837                 return -EINVAL;
4838         }
4839
4840         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4841         case DRM_MODE_ROTATE_0:
4842                 plane_info->rotation = ROTATION_ANGLE_0;
4843                 break;
4844         case DRM_MODE_ROTATE_90:
4845                 plane_info->rotation = ROTATION_ANGLE_90;
4846                 break;
4847         case DRM_MODE_ROTATE_180:
4848                 plane_info->rotation = ROTATION_ANGLE_180;
4849                 break;
4850         case DRM_MODE_ROTATE_270:
4851                 plane_info->rotation = ROTATION_ANGLE_270;
4852                 break;
4853         default:
4854                 plane_info->rotation = ROTATION_ANGLE_0;
4855                 break;
4856         }
4857
4858         plane_info->visible = true;
4859         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4860
4861         plane_info->layer_index = 0;
4862
4863         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4864                                           &plane_info->color_space);
4865         if (ret)
4866                 return ret;
4867
4868         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4869                                            plane_info->rotation, tiling_flags,
4870                                            &plane_info->tiling_info,
4871                                            &plane_info->plane_size,
4872                                            &plane_info->dcc, address, tmz_surface,
4873                                            force_disable_dcc);
4874         if (ret)
4875                 return ret;
4876
4877         fill_blending_from_plane_state(
4878                 plane_state, &plane_info->per_pixel_alpha,
4879                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4880
4881         return 0;
4882 }
4883
4884 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4885                                     struct dc_plane_state *dc_plane_state,
4886                                     struct drm_plane_state *plane_state,
4887                                     struct drm_crtc_state *crtc_state)
4888 {
4889         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4890         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4891         struct dc_scaling_info scaling_info;
4892         struct dc_plane_info plane_info;
4893         int ret;
4894         bool force_disable_dcc = false;
4895
4896         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4897         if (ret)
4898                 return ret;
4899
4900         dc_plane_state->src_rect = scaling_info.src_rect;
4901         dc_plane_state->dst_rect = scaling_info.dst_rect;
4902         dc_plane_state->clip_rect = scaling_info.clip_rect;
4903         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4904
4905         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4906         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4907                                           afb->tiling_flags,
4908                                           &plane_info,
4909                                           &dc_plane_state->address,
4910                                           afb->tmz_surface,
4911                                           force_disable_dcc);
4912         if (ret)
4913                 return ret;
4914
4915         dc_plane_state->format = plane_info.format;
4916         dc_plane_state->color_space = plane_info.color_space;
4917         dc_plane_state->format = plane_info.format;
4918         dc_plane_state->plane_size = plane_info.plane_size;
4919         dc_plane_state->rotation = plane_info.rotation;
4920         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4921         dc_plane_state->stereo_format = plane_info.stereo_format;
4922         dc_plane_state->tiling_info = plane_info.tiling_info;
4923         dc_plane_state->visible = plane_info.visible;
4924         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4925         dc_plane_state->global_alpha = plane_info.global_alpha;
4926         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4927         dc_plane_state->dcc = plane_info.dcc;
4928         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4929         dc_plane_state->flip_int_enabled = true;
4930
4931         /*
4932          * Always set input transfer function, since plane state is refreshed
4933          * every time.
4934          */
4935         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4936         if (ret)
4937                 return ret;
4938
4939         return 0;
4940 }
4941
4942 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4943                                            const struct dm_connector_state *dm_state,
4944                                            struct dc_stream_state *stream)
4945 {
4946         enum amdgpu_rmx_type rmx_type;
4947
4948         struct rect src = { 0 }; /* viewport in composition space*/
4949         struct rect dst = { 0 }; /* stream addressable area */
4950
4951         /* no mode. nothing to be done */
4952         if (!mode)
4953                 return;
4954
4955         /* Full screen scaling by default */
4956         src.width = mode->hdisplay;
4957         src.height = mode->vdisplay;
4958         dst.width = stream->timing.h_addressable;
4959         dst.height = stream->timing.v_addressable;
4960
4961         if (dm_state) {
4962                 rmx_type = dm_state->scaling;
4963                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4964                         if (src.width * dst.height <
4965                                         src.height * dst.width) {
4966                                 /* height needs less upscaling/more downscaling */
4967                                 dst.width = src.width *
4968                                                 dst.height / src.height;
4969                         } else {
4970                                 /* width needs less upscaling/more downscaling */
4971                                 dst.height = src.height *
4972                                                 dst.width / src.width;
4973                         }
4974                 } else if (rmx_type == RMX_CENTER) {
4975                         dst = src;
4976                 }
4977
4978                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4979                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4980
4981                 if (dm_state->underscan_enable) {
4982                         dst.x += dm_state->underscan_hborder / 2;
4983                         dst.y += dm_state->underscan_vborder / 2;
4984                         dst.width -= dm_state->underscan_hborder;
4985                         dst.height -= dm_state->underscan_vborder;
4986                 }
4987         }
4988
4989         stream->src = src;
4990         stream->dst = dst;
4991
4992         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4993                       dst.x, dst.y, dst.width, dst.height);
4994
4995 }
4996
4997 static enum dc_color_depth
4998 convert_color_depth_from_display_info(const struct drm_connector *connector,
4999                                       bool is_y420, int requested_bpc)
5000 {
5001         uint8_t bpc;
5002
5003         if (is_y420) {
5004                 bpc = 8;
5005
5006                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5007                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5008                         bpc = 16;
5009                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5010                         bpc = 12;
5011                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5012                         bpc = 10;
5013         } else {
5014                 bpc = (uint8_t)connector->display_info.bpc;
5015                 /* Assume 8 bpc by default if no bpc is specified. */
5016                 bpc = bpc ? bpc : 8;
5017         }
5018
5019         if (requested_bpc > 0) {
5020                 /*
5021                  * Cap display bpc based on the user requested value.
5022                  *
5023                  * The value for state->max_bpc may not correctly updated
5024                  * depending on when the connector gets added to the state
5025                  * or if this was called outside of atomic check, so it
5026                  * can't be used directly.
5027                  */
5028                 bpc = min_t(u8, bpc, requested_bpc);
5029
5030                 /* Round down to the nearest even number. */
5031                 bpc = bpc - (bpc & 1);
5032         }
5033
5034         switch (bpc) {
5035         case 0:
5036                 /*
5037                  * Temporary Work around, DRM doesn't parse color depth for
5038                  * EDID revision before 1.4
5039                  * TODO: Fix edid parsing
5040                  */
5041                 return COLOR_DEPTH_888;
5042         case 6:
5043                 return COLOR_DEPTH_666;
5044         case 8:
5045                 return COLOR_DEPTH_888;
5046         case 10:
5047                 return COLOR_DEPTH_101010;
5048         case 12:
5049                 return COLOR_DEPTH_121212;
5050         case 14:
5051                 return COLOR_DEPTH_141414;
5052         case 16:
5053                 return COLOR_DEPTH_161616;
5054         default:
5055                 return COLOR_DEPTH_UNDEFINED;
5056         }
5057 }
5058
5059 static enum dc_aspect_ratio
5060 get_aspect_ratio(const struct drm_display_mode *mode_in)
5061 {
5062         /* 1-1 mapping, since both enums follow the HDMI spec. */
5063         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5064 }
5065
5066 static enum dc_color_space
5067 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5068 {
5069         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5070
5071         switch (dc_crtc_timing->pixel_encoding) {
5072         case PIXEL_ENCODING_YCBCR422:
5073         case PIXEL_ENCODING_YCBCR444:
5074         case PIXEL_ENCODING_YCBCR420:
5075         {
5076                 /*
5077                  * 27030khz is the separation point between HDTV and SDTV
5078                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5079                  * respectively
5080                  */
5081                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5082                         if (dc_crtc_timing->flags.Y_ONLY)
5083                                 color_space =
5084                                         COLOR_SPACE_YCBCR709_LIMITED;
5085                         else
5086                                 color_space = COLOR_SPACE_YCBCR709;
5087                 } else {
5088                         if (dc_crtc_timing->flags.Y_ONLY)
5089                                 color_space =
5090                                         COLOR_SPACE_YCBCR601_LIMITED;
5091                         else
5092                                 color_space = COLOR_SPACE_YCBCR601;
5093                 }
5094
5095         }
5096         break;
5097         case PIXEL_ENCODING_RGB:
5098                 color_space = COLOR_SPACE_SRGB;
5099                 break;
5100
5101         default:
5102                 WARN_ON(1);
5103                 break;
5104         }
5105
5106         return color_space;
5107 }
5108
5109 static bool adjust_colour_depth_from_display_info(
5110         struct dc_crtc_timing *timing_out,
5111         const struct drm_display_info *info)
5112 {
5113         enum dc_color_depth depth = timing_out->display_color_depth;
5114         int normalized_clk;
5115         do {
5116                 normalized_clk = timing_out->pix_clk_100hz / 10;
5117                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5118                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5119                         normalized_clk /= 2;
5120                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5121                 switch (depth) {
5122                 case COLOR_DEPTH_888:
5123                         break;
5124                 case COLOR_DEPTH_101010:
5125                         normalized_clk = (normalized_clk * 30) / 24;
5126                         break;
5127                 case COLOR_DEPTH_121212:
5128                         normalized_clk = (normalized_clk * 36) / 24;
5129                         break;
5130                 case COLOR_DEPTH_161616:
5131                         normalized_clk = (normalized_clk * 48) / 24;
5132                         break;
5133                 default:
5134                         /* The above depths are the only ones valid for HDMI. */
5135                         return false;
5136                 }
5137                 if (normalized_clk <= info->max_tmds_clock) {
5138                         timing_out->display_color_depth = depth;
5139                         return true;
5140                 }
5141         } while (--depth > COLOR_DEPTH_666);
5142         return false;
5143 }
5144
5145 static void fill_stream_properties_from_drm_display_mode(
5146         struct dc_stream_state *stream,
5147         const struct drm_display_mode *mode_in,
5148         const struct drm_connector *connector,
5149         const struct drm_connector_state *connector_state,
5150         const struct dc_stream_state *old_stream,
5151         int requested_bpc)
5152 {
5153         struct dc_crtc_timing *timing_out = &stream->timing;
5154         const struct drm_display_info *info = &connector->display_info;
5155         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5156         struct hdmi_vendor_infoframe hv_frame;
5157         struct hdmi_avi_infoframe avi_frame;
5158
5159         memset(&hv_frame, 0, sizeof(hv_frame));
5160         memset(&avi_frame, 0, sizeof(avi_frame));
5161
5162         timing_out->h_border_left = 0;
5163         timing_out->h_border_right = 0;
5164         timing_out->v_border_top = 0;
5165         timing_out->v_border_bottom = 0;
5166         /* TODO: un-hardcode */
5167         if (drm_mode_is_420_only(info, mode_in)
5168                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5169                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5170         else if (drm_mode_is_420_also(info, mode_in)
5171                         && aconnector->force_yuv420_output)
5172                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5173         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5174                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5175                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5176         else
5177                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5178
5179         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5180         timing_out->display_color_depth = convert_color_depth_from_display_info(
5181                 connector,
5182                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5183                 requested_bpc);
5184         timing_out->scan_type = SCANNING_TYPE_NODATA;
5185         timing_out->hdmi_vic = 0;
5186
5187         if(old_stream) {
5188                 timing_out->vic = old_stream->timing.vic;
5189                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5190                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5191         } else {
5192                 timing_out->vic = drm_match_cea_mode(mode_in);
5193                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5194                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5195                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5196                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5197         }
5198
5199         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5200                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5201                 timing_out->vic = avi_frame.video_code;
5202                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5203                 timing_out->hdmi_vic = hv_frame.vic;
5204         }
5205
5206         if (is_freesync_video_mode(mode_in, aconnector)) {
5207                 timing_out->h_addressable = mode_in->hdisplay;
5208                 timing_out->h_total = mode_in->htotal;
5209                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5210                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5211                 timing_out->v_total = mode_in->vtotal;
5212                 timing_out->v_addressable = mode_in->vdisplay;
5213                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5214                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5215                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5216         } else {
5217                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5218                 timing_out->h_total = mode_in->crtc_htotal;
5219                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5220                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5221                 timing_out->v_total = mode_in->crtc_vtotal;
5222                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5223                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5224                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5225                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5226         }
5227
5228         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5229
5230         stream->output_color_space = get_output_color_space(timing_out);
5231
5232         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5233         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5234         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5235                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5236                     drm_mode_is_420_also(info, mode_in) &&
5237                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5238                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5239                         adjust_colour_depth_from_display_info(timing_out, info);
5240                 }
5241         }
5242 }
5243
5244 static void fill_audio_info(struct audio_info *audio_info,
5245                             const struct drm_connector *drm_connector,
5246                             const struct dc_sink *dc_sink)
5247 {
5248         int i = 0;
5249         int cea_revision = 0;
5250         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5251
5252         audio_info->manufacture_id = edid_caps->manufacturer_id;
5253         audio_info->product_id = edid_caps->product_id;
5254
5255         cea_revision = drm_connector->display_info.cea_rev;
5256
5257         strscpy(audio_info->display_name,
5258                 edid_caps->display_name,
5259                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5260
5261         if (cea_revision >= 3) {
5262                 audio_info->mode_count = edid_caps->audio_mode_count;
5263
5264                 for (i = 0; i < audio_info->mode_count; ++i) {
5265                         audio_info->modes[i].format_code =
5266                                         (enum audio_format_code)
5267                                         (edid_caps->audio_modes[i].format_code);
5268                         audio_info->modes[i].channel_count =
5269                                         edid_caps->audio_modes[i].channel_count;
5270                         audio_info->modes[i].sample_rates.all =
5271                                         edid_caps->audio_modes[i].sample_rate;
5272                         audio_info->modes[i].sample_size =
5273                                         edid_caps->audio_modes[i].sample_size;
5274                 }
5275         }
5276
5277         audio_info->flags.all = edid_caps->speaker_flags;
5278
5279         /* TODO: We only check for the progressive mode, check for interlace mode too */
5280         if (drm_connector->latency_present[0]) {
5281                 audio_info->video_latency = drm_connector->video_latency[0];
5282                 audio_info->audio_latency = drm_connector->audio_latency[0];
5283         }
5284
5285         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5286
5287 }
5288
5289 static void
5290 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5291                                       struct drm_display_mode *dst_mode)
5292 {
5293         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5294         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5295         dst_mode->crtc_clock = src_mode->crtc_clock;
5296         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5297         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5298         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5299         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5300         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5301         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5302         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5303         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5304         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5305         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5306         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5307 }
5308
5309 static void
5310 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5311                                         const struct drm_display_mode *native_mode,
5312                                         bool scale_enabled)
5313 {
5314         if (scale_enabled) {
5315                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5316         } else if (native_mode->clock == drm_mode->clock &&
5317                         native_mode->htotal == drm_mode->htotal &&
5318                         native_mode->vtotal == drm_mode->vtotal) {
5319                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5320         } else {
5321                 /* no scaling nor amdgpu inserted, no need to patch */
5322         }
5323 }
5324
5325 static struct dc_sink *
5326 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5327 {
5328         struct dc_sink_init_data sink_init_data = { 0 };
5329         struct dc_sink *sink = NULL;
5330         sink_init_data.link = aconnector->dc_link;
5331         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5332
5333         sink = dc_sink_create(&sink_init_data);
5334         if (!sink) {
5335                 DRM_ERROR("Failed to create sink!\n");
5336                 return NULL;
5337         }
5338         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5339
5340         return sink;
5341 }
5342
5343 static void set_multisync_trigger_params(
5344                 struct dc_stream_state *stream)
5345 {
5346         struct dc_stream_state *master = NULL;
5347
5348         if (stream->triggered_crtc_reset.enabled) {
5349                 master = stream->triggered_crtc_reset.event_source;
5350                 stream->triggered_crtc_reset.event =
5351                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5352                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5353                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5354         }
5355 }
5356
5357 static void set_master_stream(struct dc_stream_state *stream_set[],
5358                               int stream_count)
5359 {
5360         int j, highest_rfr = 0, master_stream = 0;
5361
5362         for (j = 0;  j < stream_count; j++) {
5363                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5364                         int refresh_rate = 0;
5365
5366                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5367                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5368                         if (refresh_rate > highest_rfr) {
5369                                 highest_rfr = refresh_rate;
5370                                 master_stream = j;
5371                         }
5372                 }
5373         }
5374         for (j = 0;  j < stream_count; j++) {
5375                 if (stream_set[j])
5376                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5377         }
5378 }
5379
5380 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5381 {
5382         int i = 0;
5383         struct dc_stream_state *stream;
5384
5385         if (context->stream_count < 2)
5386                 return;
5387         for (i = 0; i < context->stream_count ; i++) {
5388                 if (!context->streams[i])
5389                         continue;
5390                 /*
5391                  * TODO: add a function to read AMD VSDB bits and set
5392                  * crtc_sync_master.multi_sync_enabled flag
5393                  * For now it's set to false
5394                  */
5395         }
5396
5397         set_master_stream(context->streams, context->stream_count);
5398
5399         for (i = 0; i < context->stream_count ; i++) {
5400                 stream = context->streams[i];
5401
5402                 if (!stream)
5403                         continue;
5404
5405                 set_multisync_trigger_params(stream);
5406         }
5407 }
5408
5409 static struct drm_display_mode *
5410 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5411                           bool use_probed_modes)
5412 {
5413         struct drm_display_mode *m, *m_pref = NULL;
5414         u16 current_refresh, highest_refresh;
5415         struct list_head *list_head = use_probed_modes ?
5416                                                     &aconnector->base.probed_modes :
5417                                                     &aconnector->base.modes;
5418
5419         if (aconnector->freesync_vid_base.clock != 0)
5420                 return &aconnector->freesync_vid_base;
5421
5422         /* Find the preferred mode */
5423         list_for_each_entry (m, list_head, head) {
5424                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5425                         m_pref = m;
5426                         break;
5427                 }
5428         }
5429
5430         if (!m_pref) {
5431                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5432                 m_pref = list_first_entry_or_null(
5433                         &aconnector->base.modes, struct drm_display_mode, head);
5434                 if (!m_pref) {
5435                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5436                         return NULL;
5437                 }
5438         }
5439
5440         highest_refresh = drm_mode_vrefresh(m_pref);
5441
5442         /*
5443          * Find the mode with highest refresh rate with same resolution.
5444          * For some monitors, preferred mode is not the mode with highest
5445          * supported refresh rate.
5446          */
5447         list_for_each_entry (m, list_head, head) {
5448                 current_refresh  = drm_mode_vrefresh(m);
5449
5450                 if (m->hdisplay == m_pref->hdisplay &&
5451                     m->vdisplay == m_pref->vdisplay &&
5452                     highest_refresh < current_refresh) {
5453                         highest_refresh = current_refresh;
5454                         m_pref = m;
5455                 }
5456         }
5457
5458         aconnector->freesync_vid_base = *m_pref;
5459         return m_pref;
5460 }
5461
5462 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5463                                    struct amdgpu_dm_connector *aconnector)
5464 {
5465         struct drm_display_mode *high_mode;
5466         int timing_diff;
5467
5468         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5469         if (!high_mode || !mode)
5470                 return false;
5471
5472         timing_diff = high_mode->vtotal - mode->vtotal;
5473
5474         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5475             high_mode->hdisplay != mode->hdisplay ||
5476             high_mode->vdisplay != mode->vdisplay ||
5477             high_mode->hsync_start != mode->hsync_start ||
5478             high_mode->hsync_end != mode->hsync_end ||
5479             high_mode->htotal != mode->htotal ||
5480             high_mode->hskew != mode->hskew ||
5481             high_mode->vscan != mode->vscan ||
5482             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5483             high_mode->vsync_end - mode->vsync_end != timing_diff)
5484                 return false;
5485         else
5486                 return true;
5487 }
5488
5489 static struct dc_stream_state *
5490 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5491                        const struct drm_display_mode *drm_mode,
5492                        const struct dm_connector_state *dm_state,
5493                        const struct dc_stream_state *old_stream,
5494                        int requested_bpc)
5495 {
5496         struct drm_display_mode *preferred_mode = NULL;
5497         struct drm_connector *drm_connector;
5498         const struct drm_connector_state *con_state =
5499                 dm_state ? &dm_state->base : NULL;
5500         struct dc_stream_state *stream = NULL;
5501         struct drm_display_mode mode = *drm_mode;
5502         struct drm_display_mode saved_mode;
5503         struct drm_display_mode *freesync_mode = NULL;
5504         bool native_mode_found = false;
5505         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5506         int mode_refresh;
5507         int preferred_refresh = 0;
5508 #if defined(CONFIG_DRM_AMD_DC_DCN)
5509         struct dsc_dec_dpcd_caps dsc_caps;
5510         uint32_t link_bandwidth_kbps;
5511 #endif
5512         struct dc_sink *sink = NULL;
5513
5514         memset(&saved_mode, 0, sizeof(saved_mode));
5515
5516         if (aconnector == NULL) {
5517                 DRM_ERROR("aconnector is NULL!\n");
5518                 return stream;
5519         }
5520
5521         drm_connector = &aconnector->base;
5522
5523         if (!aconnector->dc_sink) {
5524                 sink = create_fake_sink(aconnector);
5525                 if (!sink)
5526                         return stream;
5527         } else {
5528                 sink = aconnector->dc_sink;
5529                 dc_sink_retain(sink);
5530         }
5531
5532         stream = dc_create_stream_for_sink(sink);
5533
5534         if (stream == NULL) {
5535                 DRM_ERROR("Failed to create stream for sink!\n");
5536                 goto finish;
5537         }
5538
5539         stream->dm_stream_context = aconnector;
5540
5541         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5542                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5543
5544         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5545                 /* Search for preferred mode */
5546                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5547                         native_mode_found = true;
5548                         break;
5549                 }
5550         }
5551         if (!native_mode_found)
5552                 preferred_mode = list_first_entry_or_null(
5553                                 &aconnector->base.modes,
5554                                 struct drm_display_mode,
5555                                 head);
5556
5557         mode_refresh = drm_mode_vrefresh(&mode);
5558
5559         if (preferred_mode == NULL) {
5560                 /*
5561                  * This may not be an error, the use case is when we have no
5562                  * usermode calls to reset and set mode upon hotplug. In this
5563                  * case, we call set mode ourselves to restore the previous mode
5564                  * and the modelist may not be filled in in time.
5565                  */
5566                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5567         } else {
5568                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5569                                  is_freesync_video_mode(&mode, aconnector);
5570                 if (recalculate_timing) {
5571                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5572                         saved_mode = mode;
5573                         mode = *freesync_mode;
5574                 } else {
5575                         decide_crtc_timing_for_drm_display_mode(
5576                                 &mode, preferred_mode,
5577                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5578                 }
5579
5580                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5581         }
5582
5583         if (recalculate_timing)
5584                 drm_mode_set_crtcinfo(&saved_mode, 0);
5585         else if (!dm_state)
5586                 drm_mode_set_crtcinfo(&mode, 0);
5587
5588        /*
5589         * If scaling is enabled and refresh rate didn't change
5590         * we copy the vic and polarities of the old timings
5591         */
5592         if (!recalculate_timing || mode_refresh != preferred_refresh)
5593                 fill_stream_properties_from_drm_display_mode(
5594                         stream, &mode, &aconnector->base, con_state, NULL,
5595                         requested_bpc);
5596         else
5597                 fill_stream_properties_from_drm_display_mode(
5598                         stream, &mode, &aconnector->base, con_state, old_stream,
5599                         requested_bpc);
5600
5601         stream->timing.flags.DSC = 0;
5602
5603         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5604 #if defined(CONFIG_DRM_AMD_DC_DCN)
5605                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5606                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5607                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5608                                       &dsc_caps);
5609                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5610                                                              dc_link_get_link_cap(aconnector->dc_link));
5611
5612                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5613                         /* Set DSC policy according to dsc_clock_en */
5614                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5615                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5616
5617                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5618                                                   &dsc_caps,
5619                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5620                                                   0,
5621                                                   link_bandwidth_kbps,
5622                                                   &stream->timing,
5623                                                   &stream->timing.dsc_cfg))
5624                                 stream->timing.flags.DSC = 1;
5625                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5626                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5627                                 stream->timing.flags.DSC = 1;
5628
5629                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5630                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5631
5632                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5633                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5634
5635                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5636                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5637                 }
5638 #endif
5639         }
5640
5641         update_stream_scaling_settings(&mode, dm_state, stream);
5642
5643         fill_audio_info(
5644                 &stream->audio_info,
5645                 drm_connector,
5646                 sink);
5647
5648         update_stream_signal(stream, sink);
5649
5650         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5651                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5652
5653         if (stream->link->psr_settings.psr_feature_enabled) {
5654                 //
5655                 // should decide stream support vsc sdp colorimetry capability
5656                 // before building vsc info packet
5657                 //
5658                 stream->use_vsc_sdp_for_colorimetry = false;
5659                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5660                         stream->use_vsc_sdp_for_colorimetry =
5661                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5662                 } else {
5663                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5664                                 stream->use_vsc_sdp_for_colorimetry = true;
5665                 }
5666                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5667         }
5668 finish:
5669         dc_sink_release(sink);
5670
5671         return stream;
5672 }
5673
5674 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5675 {
5676         drm_crtc_cleanup(crtc);
5677         kfree(crtc);
5678 }
5679
5680 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5681                                   struct drm_crtc_state *state)
5682 {
5683         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5684
5685         /* TODO Destroy dc_stream objects are stream object is flattened */
5686         if (cur->stream)
5687                 dc_stream_release(cur->stream);
5688
5689
5690         __drm_atomic_helper_crtc_destroy_state(state);
5691
5692
5693         kfree(state);
5694 }
5695
5696 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5697 {
5698         struct dm_crtc_state *state;
5699
5700         if (crtc->state)
5701                 dm_crtc_destroy_state(crtc, crtc->state);
5702
5703         state = kzalloc(sizeof(*state), GFP_KERNEL);
5704         if (WARN_ON(!state))
5705                 return;
5706
5707         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5708 }
5709
5710 static struct drm_crtc_state *
5711 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5712 {
5713         struct dm_crtc_state *state, *cur;
5714
5715         cur = to_dm_crtc_state(crtc->state);
5716
5717         if (WARN_ON(!crtc->state))
5718                 return NULL;
5719
5720         state = kzalloc(sizeof(*state), GFP_KERNEL);
5721         if (!state)
5722                 return NULL;
5723
5724         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5725
5726         if (cur->stream) {
5727                 state->stream = cur->stream;
5728                 dc_stream_retain(state->stream);
5729         }
5730
5731         state->active_planes = cur->active_planes;
5732         state->vrr_infopacket = cur->vrr_infopacket;
5733         state->abm_level = cur->abm_level;
5734         state->vrr_supported = cur->vrr_supported;
5735         state->freesync_config = cur->freesync_config;
5736         state->cm_has_degamma = cur->cm_has_degamma;
5737         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5738         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5739
5740         return &state->base;
5741 }
5742
5743 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5744 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5745 {
5746         crtc_debugfs_init(crtc);
5747
5748         return 0;
5749 }
5750 #endif
5751
5752 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5753 {
5754         enum dc_irq_source irq_source;
5755         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5756         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5757         int rc;
5758
5759         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5760
5761         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5762
5763         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5764                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5765         return rc;
5766 }
5767
5768 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5769 {
5770         enum dc_irq_source irq_source;
5771         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5772         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5773         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5774 #if defined(CONFIG_DRM_AMD_DC_DCN)
5775         struct amdgpu_display_manager *dm = &adev->dm;
5776         unsigned long flags;
5777 #endif
5778         int rc = 0;
5779
5780         if (enable) {
5781                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5782                 if (amdgpu_dm_vrr_active(acrtc_state))
5783                         rc = dm_set_vupdate_irq(crtc, true);
5784         } else {
5785                 /* vblank irq off -> vupdate irq off */
5786                 rc = dm_set_vupdate_irq(crtc, false);
5787         }
5788
5789         if (rc)
5790                 return rc;
5791
5792         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5793
5794         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5795                 return -EBUSY;
5796
5797         if (amdgpu_in_reset(adev))
5798                 return 0;
5799
5800 #if defined(CONFIG_DRM_AMD_DC_DCN)
5801         spin_lock_irqsave(&dm->vblank_lock, flags);
5802         dm->vblank_workqueue->dm = dm;
5803         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5804         dm->vblank_workqueue->enable = enable;
5805         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5806         schedule_work(&dm->vblank_workqueue->mall_work);
5807 #endif
5808
5809         return 0;
5810 }
5811
5812 static int dm_enable_vblank(struct drm_crtc *crtc)
5813 {
5814         return dm_set_vblank(crtc, true);
5815 }
5816
5817 static void dm_disable_vblank(struct drm_crtc *crtc)
5818 {
5819         dm_set_vblank(crtc, false);
5820 }
5821
5822 /* Implemented only the options currently availible for the driver */
5823 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5824         .reset = dm_crtc_reset_state,
5825         .destroy = amdgpu_dm_crtc_destroy,
5826         .set_config = drm_atomic_helper_set_config,
5827         .page_flip = drm_atomic_helper_page_flip,
5828         .atomic_duplicate_state = dm_crtc_duplicate_state,
5829         .atomic_destroy_state = dm_crtc_destroy_state,
5830         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5831         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5832         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5833         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5834         .enable_vblank = dm_enable_vblank,
5835         .disable_vblank = dm_disable_vblank,
5836         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5837 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5838         .late_register = amdgpu_dm_crtc_late_register,
5839 #endif
5840 };
5841
5842 static enum drm_connector_status
5843 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5844 {
5845         bool connected;
5846         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5847
5848         /*
5849          * Notes:
5850          * 1. This interface is NOT called in context of HPD irq.
5851          * 2. This interface *is called* in context of user-mode ioctl. Which
5852          * makes it a bad place for *any* MST-related activity.
5853          */
5854
5855         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5856             !aconnector->fake_enable)
5857                 connected = (aconnector->dc_sink != NULL);
5858         else
5859                 connected = (aconnector->base.force == DRM_FORCE_ON);
5860
5861         update_subconnector_property(aconnector);
5862
5863         return (connected ? connector_status_connected :
5864                         connector_status_disconnected);
5865 }
5866
5867 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5868                                             struct drm_connector_state *connector_state,
5869                                             struct drm_property *property,
5870                                             uint64_t val)
5871 {
5872         struct drm_device *dev = connector->dev;
5873         struct amdgpu_device *adev = drm_to_adev(dev);
5874         struct dm_connector_state *dm_old_state =
5875                 to_dm_connector_state(connector->state);
5876         struct dm_connector_state *dm_new_state =
5877                 to_dm_connector_state(connector_state);
5878
5879         int ret = -EINVAL;
5880
5881         if (property == dev->mode_config.scaling_mode_property) {
5882                 enum amdgpu_rmx_type rmx_type;
5883
5884                 switch (val) {
5885                 case DRM_MODE_SCALE_CENTER:
5886                         rmx_type = RMX_CENTER;
5887                         break;
5888                 case DRM_MODE_SCALE_ASPECT:
5889                         rmx_type = RMX_ASPECT;
5890                         break;
5891                 case DRM_MODE_SCALE_FULLSCREEN:
5892                         rmx_type = RMX_FULL;
5893                         break;
5894                 case DRM_MODE_SCALE_NONE:
5895                 default:
5896                         rmx_type = RMX_OFF;
5897                         break;
5898                 }
5899
5900                 if (dm_old_state->scaling == rmx_type)
5901                         return 0;
5902
5903                 dm_new_state->scaling = rmx_type;
5904                 ret = 0;
5905         } else if (property == adev->mode_info.underscan_hborder_property) {
5906                 dm_new_state->underscan_hborder = val;
5907                 ret = 0;
5908         } else if (property == adev->mode_info.underscan_vborder_property) {
5909                 dm_new_state->underscan_vborder = val;
5910                 ret = 0;
5911         } else if (property == adev->mode_info.underscan_property) {
5912                 dm_new_state->underscan_enable = val;
5913                 ret = 0;
5914         } else if (property == adev->mode_info.abm_level_property) {
5915                 dm_new_state->abm_level = val;
5916                 ret = 0;
5917         }
5918
5919         return ret;
5920 }
5921
5922 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5923                                             const struct drm_connector_state *state,
5924                                             struct drm_property *property,
5925                                             uint64_t *val)
5926 {
5927         struct drm_device *dev = connector->dev;
5928         struct amdgpu_device *adev = drm_to_adev(dev);
5929         struct dm_connector_state *dm_state =
5930                 to_dm_connector_state(state);
5931         int ret = -EINVAL;
5932
5933         if (property == dev->mode_config.scaling_mode_property) {
5934                 switch (dm_state->scaling) {
5935                 case RMX_CENTER:
5936                         *val = DRM_MODE_SCALE_CENTER;
5937                         break;
5938                 case RMX_ASPECT:
5939                         *val = DRM_MODE_SCALE_ASPECT;
5940                         break;
5941                 case RMX_FULL:
5942                         *val = DRM_MODE_SCALE_FULLSCREEN;
5943                         break;
5944                 case RMX_OFF:
5945                 default:
5946                         *val = DRM_MODE_SCALE_NONE;
5947                         break;
5948                 }
5949                 ret = 0;
5950         } else if (property == adev->mode_info.underscan_hborder_property) {
5951                 *val = dm_state->underscan_hborder;
5952                 ret = 0;
5953         } else if (property == adev->mode_info.underscan_vborder_property) {
5954                 *val = dm_state->underscan_vborder;
5955                 ret = 0;
5956         } else if (property == adev->mode_info.underscan_property) {
5957                 *val = dm_state->underscan_enable;
5958                 ret = 0;
5959         } else if (property == adev->mode_info.abm_level_property) {
5960                 *val = dm_state->abm_level;
5961                 ret = 0;
5962         }
5963
5964         return ret;
5965 }
5966
5967 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5968 {
5969         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5970
5971         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5972 }
5973
5974 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5975 {
5976         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5977         const struct dc_link *link = aconnector->dc_link;
5978         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5979         struct amdgpu_display_manager *dm = &adev->dm;
5980
5981         /*
5982          * Call only if mst_mgr was iniitalized before since it's not done
5983          * for all connector types.
5984          */
5985         if (aconnector->mst_mgr.dev)
5986                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5987
5988 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5989         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5990
5991         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5992             link->type != dc_connection_none &&
5993             dm->backlight_dev) {
5994                 backlight_device_unregister(dm->backlight_dev);
5995                 dm->backlight_dev = NULL;
5996         }
5997 #endif
5998
5999         if (aconnector->dc_em_sink)
6000                 dc_sink_release(aconnector->dc_em_sink);
6001         aconnector->dc_em_sink = NULL;
6002         if (aconnector->dc_sink)
6003                 dc_sink_release(aconnector->dc_sink);
6004         aconnector->dc_sink = NULL;
6005
6006         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6007         drm_connector_unregister(connector);
6008         drm_connector_cleanup(connector);
6009         if (aconnector->i2c) {
6010                 i2c_del_adapter(&aconnector->i2c->base);
6011                 kfree(aconnector->i2c);
6012         }
6013         kfree(aconnector->dm_dp_aux.aux.name);
6014
6015         kfree(connector);
6016 }
6017
6018 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6019 {
6020         struct dm_connector_state *state =
6021                 to_dm_connector_state(connector->state);
6022
6023         if (connector->state)
6024                 __drm_atomic_helper_connector_destroy_state(connector->state);
6025
6026         kfree(state);
6027
6028         state = kzalloc(sizeof(*state), GFP_KERNEL);
6029
6030         if (state) {
6031                 state->scaling = RMX_OFF;
6032                 state->underscan_enable = false;
6033                 state->underscan_hborder = 0;
6034                 state->underscan_vborder = 0;
6035                 state->base.max_requested_bpc = 8;
6036                 state->vcpi_slots = 0;
6037                 state->pbn = 0;
6038                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6039                         state->abm_level = amdgpu_dm_abm_level;
6040
6041                 __drm_atomic_helper_connector_reset(connector, &state->base);
6042         }
6043 }
6044
6045 struct drm_connector_state *
6046 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6047 {
6048         struct dm_connector_state *state =
6049                 to_dm_connector_state(connector->state);
6050
6051         struct dm_connector_state *new_state =
6052                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6053
6054         if (!new_state)
6055                 return NULL;
6056
6057         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6058
6059         new_state->freesync_capable = state->freesync_capable;
6060         new_state->abm_level = state->abm_level;
6061         new_state->scaling = state->scaling;
6062         new_state->underscan_enable = state->underscan_enable;
6063         new_state->underscan_hborder = state->underscan_hborder;
6064         new_state->underscan_vborder = state->underscan_vborder;
6065         new_state->vcpi_slots = state->vcpi_slots;
6066         new_state->pbn = state->pbn;
6067         return &new_state->base;
6068 }
6069
6070 static int
6071 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6072 {
6073         struct amdgpu_dm_connector *amdgpu_dm_connector =
6074                 to_amdgpu_dm_connector(connector);
6075         int r;
6076
6077         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6078             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6079                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6080                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6081                 if (r)
6082                         return r;
6083         }
6084
6085 #if defined(CONFIG_DEBUG_FS)
6086         connector_debugfs_init(amdgpu_dm_connector);
6087 #endif
6088
6089         return 0;
6090 }
6091
6092 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6093         .reset = amdgpu_dm_connector_funcs_reset,
6094         .detect = amdgpu_dm_connector_detect,
6095         .fill_modes = drm_helper_probe_single_connector_modes,
6096         .destroy = amdgpu_dm_connector_destroy,
6097         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6098         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6099         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6100         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6101         .late_register = amdgpu_dm_connector_late_register,
6102         .early_unregister = amdgpu_dm_connector_unregister
6103 };
6104
6105 static int get_modes(struct drm_connector *connector)
6106 {
6107         return amdgpu_dm_connector_get_modes(connector);
6108 }
6109
6110 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6111 {
6112         struct dc_sink_init_data init_params = {
6113                         .link = aconnector->dc_link,
6114                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6115         };
6116         struct edid *edid;
6117
6118         if (!aconnector->base.edid_blob_ptr) {
6119                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6120                                 aconnector->base.name);
6121
6122                 aconnector->base.force = DRM_FORCE_OFF;
6123                 aconnector->base.override_edid = false;
6124                 return;
6125         }
6126
6127         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6128
6129         aconnector->edid = edid;
6130
6131         aconnector->dc_em_sink = dc_link_add_remote_sink(
6132                 aconnector->dc_link,
6133                 (uint8_t *)edid,
6134                 (edid->extensions + 1) * EDID_LENGTH,
6135                 &init_params);
6136
6137         if (aconnector->base.force == DRM_FORCE_ON) {
6138                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6139                 aconnector->dc_link->local_sink :
6140                 aconnector->dc_em_sink;
6141                 dc_sink_retain(aconnector->dc_sink);
6142         }
6143 }
6144
6145 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6146 {
6147         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6148
6149         /*
6150          * In case of headless boot with force on for DP managed connector
6151          * Those settings have to be != 0 to get initial modeset
6152          */
6153         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6154                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6155                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6156         }
6157
6158
6159         aconnector->base.override_edid = true;
6160         create_eml_sink(aconnector);
6161 }
6162
6163 static struct dc_stream_state *
6164 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6165                                 const struct drm_display_mode *drm_mode,
6166                                 const struct dm_connector_state *dm_state,
6167                                 const struct dc_stream_state *old_stream)
6168 {
6169         struct drm_connector *connector = &aconnector->base;
6170         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6171         struct dc_stream_state *stream;
6172         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6173         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6174         enum dc_status dc_result = DC_OK;
6175
6176         do {
6177                 stream = create_stream_for_sink(aconnector, drm_mode,
6178                                                 dm_state, old_stream,
6179                                                 requested_bpc);
6180                 if (stream == NULL) {
6181                         DRM_ERROR("Failed to create stream for sink!\n");
6182                         break;
6183                 }
6184
6185                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6186
6187                 if (dc_result != DC_OK) {
6188                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6189                                       drm_mode->hdisplay,
6190                                       drm_mode->vdisplay,
6191                                       drm_mode->clock,
6192                                       dc_result,
6193                                       dc_status_to_str(dc_result));
6194
6195                         dc_stream_release(stream);
6196                         stream = NULL;
6197                         requested_bpc -= 2; /* lower bpc to retry validation */
6198                 }
6199
6200         } while (stream == NULL && requested_bpc >= 6);
6201
6202         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6203                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6204
6205                 aconnector->force_yuv420_output = true;
6206                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6207                                                 dm_state, old_stream);
6208                 aconnector->force_yuv420_output = false;
6209         }
6210
6211         return stream;
6212 }
6213
6214 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6215                                    struct drm_display_mode *mode)
6216 {
6217         int result = MODE_ERROR;
6218         struct dc_sink *dc_sink;
6219         /* TODO: Unhardcode stream count */
6220         struct dc_stream_state *stream;
6221         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6222
6223         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6224                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6225                 return result;
6226
6227         /*
6228          * Only run this the first time mode_valid is called to initilialize
6229          * EDID mgmt
6230          */
6231         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6232                 !aconnector->dc_em_sink)
6233                 handle_edid_mgmt(aconnector);
6234
6235         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6236
6237         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6238                                 aconnector->base.force != DRM_FORCE_ON) {
6239                 DRM_ERROR("dc_sink is NULL!\n");
6240                 goto fail;
6241         }
6242
6243         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6244         if (stream) {
6245                 dc_stream_release(stream);
6246                 result = MODE_OK;
6247         }
6248
6249 fail:
6250         /* TODO: error handling*/
6251         return result;
6252 }
6253
6254 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6255                                 struct dc_info_packet *out)
6256 {
6257         struct hdmi_drm_infoframe frame;
6258         unsigned char buf[30]; /* 26 + 4 */
6259         ssize_t len;
6260         int ret, i;
6261
6262         memset(out, 0, sizeof(*out));
6263
6264         if (!state->hdr_output_metadata)
6265                 return 0;
6266
6267         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6268         if (ret)
6269                 return ret;
6270
6271         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6272         if (len < 0)
6273                 return (int)len;
6274
6275         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6276         if (len != 30)
6277                 return -EINVAL;
6278
6279         /* Prepare the infopacket for DC. */
6280         switch (state->connector->connector_type) {
6281         case DRM_MODE_CONNECTOR_HDMIA:
6282                 out->hb0 = 0x87; /* type */
6283                 out->hb1 = 0x01; /* version */
6284                 out->hb2 = 0x1A; /* length */
6285                 out->sb[0] = buf[3]; /* checksum */
6286                 i = 1;
6287                 break;
6288
6289         case DRM_MODE_CONNECTOR_DisplayPort:
6290         case DRM_MODE_CONNECTOR_eDP:
6291                 out->hb0 = 0x00; /* sdp id, zero */
6292                 out->hb1 = 0x87; /* type */
6293                 out->hb2 = 0x1D; /* payload len - 1 */
6294                 out->hb3 = (0x13 << 2); /* sdp version */
6295                 out->sb[0] = 0x01; /* version */
6296                 out->sb[1] = 0x1A; /* length */
6297                 i = 2;
6298                 break;
6299
6300         default:
6301                 return -EINVAL;
6302         }
6303
6304         memcpy(&out->sb[i], &buf[4], 26);
6305         out->valid = true;
6306
6307         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6308                        sizeof(out->sb), false);
6309
6310         return 0;
6311 }
6312
6313 static bool
6314 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6315                           const struct drm_connector_state *new_state)
6316 {
6317         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6318         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6319
6320         if (old_blob != new_blob) {
6321                 if (old_blob && new_blob &&
6322                     old_blob->length == new_blob->length)
6323                         return memcmp(old_blob->data, new_blob->data,
6324                                       old_blob->length);
6325
6326                 return true;
6327         }
6328
6329         return false;
6330 }
6331
6332 static int
6333 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6334                                  struct drm_atomic_state *state)
6335 {
6336         struct drm_connector_state *new_con_state =
6337                 drm_atomic_get_new_connector_state(state, conn);
6338         struct drm_connector_state *old_con_state =
6339                 drm_atomic_get_old_connector_state(state, conn);
6340         struct drm_crtc *crtc = new_con_state->crtc;
6341         struct drm_crtc_state *new_crtc_state;
6342         int ret;
6343
6344         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6345
6346         if (!crtc)
6347                 return 0;
6348
6349         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6350                 struct dc_info_packet hdr_infopacket;
6351
6352                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6353                 if (ret)
6354                         return ret;
6355
6356                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6357                 if (IS_ERR(new_crtc_state))
6358                         return PTR_ERR(new_crtc_state);
6359
6360                 /*
6361                  * DC considers the stream backends changed if the
6362                  * static metadata changes. Forcing the modeset also
6363                  * gives a simple way for userspace to switch from
6364                  * 8bpc to 10bpc when setting the metadata to enter
6365                  * or exit HDR.
6366                  *
6367                  * Changing the static metadata after it's been
6368                  * set is permissible, however. So only force a
6369                  * modeset if we're entering or exiting HDR.
6370                  */
6371                 new_crtc_state->mode_changed =
6372                         !old_con_state->hdr_output_metadata ||
6373                         !new_con_state->hdr_output_metadata;
6374         }
6375
6376         return 0;
6377 }
6378
6379 static const struct drm_connector_helper_funcs
6380 amdgpu_dm_connector_helper_funcs = {
6381         /*
6382          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6383          * modes will be filtered by drm_mode_validate_size(), and those modes
6384          * are missing after user start lightdm. So we need to renew modes list.
6385          * in get_modes call back, not just return the modes count
6386          */
6387         .get_modes = get_modes,
6388         .mode_valid = amdgpu_dm_connector_mode_valid,
6389         .atomic_check = amdgpu_dm_connector_atomic_check,
6390 };
6391
6392 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6393 {
6394 }
6395
6396 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6397 {
6398         struct drm_atomic_state *state = new_crtc_state->state;
6399         struct drm_plane *plane;
6400         int num_active = 0;
6401
6402         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6403                 struct drm_plane_state *new_plane_state;
6404
6405                 /* Cursor planes are "fake". */
6406                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6407                         continue;
6408
6409                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6410
6411                 if (!new_plane_state) {
6412                         /*
6413                          * The plane is enable on the CRTC and hasn't changed
6414                          * state. This means that it previously passed
6415                          * validation and is therefore enabled.
6416                          */
6417                         num_active += 1;
6418                         continue;
6419                 }
6420
6421                 /* We need a framebuffer to be considered enabled. */
6422                 num_active += (new_plane_state->fb != NULL);
6423         }
6424
6425         return num_active;
6426 }
6427
6428 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6429                                          struct drm_crtc_state *new_crtc_state)
6430 {
6431         struct dm_crtc_state *dm_new_crtc_state =
6432                 to_dm_crtc_state(new_crtc_state);
6433
6434         dm_new_crtc_state->active_planes = 0;
6435
6436         if (!dm_new_crtc_state->stream)
6437                 return;
6438
6439         dm_new_crtc_state->active_planes =
6440                 count_crtc_active_planes(new_crtc_state);
6441 }
6442
6443 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6444                                        struct drm_atomic_state *state)
6445 {
6446         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6447                                                                           crtc);
6448         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6449         struct dc *dc = adev->dm.dc;
6450         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6451         int ret = -EINVAL;
6452
6453         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6454
6455         dm_update_crtc_active_planes(crtc, crtc_state);
6456
6457         if (unlikely(!dm_crtc_state->stream &&
6458                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6459                 WARN_ON(1);
6460                 return ret;
6461         }
6462
6463         /*
6464          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6465          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6466          * planes are disabled, which is not supported by the hardware. And there is legacy
6467          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6468          */
6469         if (crtc_state->enable &&
6470             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6471                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6472                 return -EINVAL;
6473         }
6474
6475         /* In some use cases, like reset, no stream is attached */
6476         if (!dm_crtc_state->stream)
6477                 return 0;
6478
6479         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6480                 return 0;
6481
6482         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6483         return ret;
6484 }
6485
6486 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6487                                       const struct drm_display_mode *mode,
6488                                       struct drm_display_mode *adjusted_mode)
6489 {
6490         return true;
6491 }
6492
6493 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6494         .disable = dm_crtc_helper_disable,
6495         .atomic_check = dm_crtc_helper_atomic_check,
6496         .mode_fixup = dm_crtc_helper_mode_fixup,
6497         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6498 };
6499
6500 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6501 {
6502
6503 }
6504
6505 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6506 {
6507         switch (display_color_depth) {
6508                 case COLOR_DEPTH_666:
6509                         return 6;
6510                 case COLOR_DEPTH_888:
6511                         return 8;
6512                 case COLOR_DEPTH_101010:
6513                         return 10;
6514                 case COLOR_DEPTH_121212:
6515                         return 12;
6516                 case COLOR_DEPTH_141414:
6517                         return 14;
6518                 case COLOR_DEPTH_161616:
6519                         return 16;
6520                 default:
6521                         break;
6522                 }
6523         return 0;
6524 }
6525
6526 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6527                                           struct drm_crtc_state *crtc_state,
6528                                           struct drm_connector_state *conn_state)
6529 {
6530         struct drm_atomic_state *state = crtc_state->state;
6531         struct drm_connector *connector = conn_state->connector;
6532         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6533         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6534         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6535         struct drm_dp_mst_topology_mgr *mst_mgr;
6536         struct drm_dp_mst_port *mst_port;
6537         enum dc_color_depth color_depth;
6538         int clock, bpp = 0;
6539         bool is_y420 = false;
6540
6541         if (!aconnector->port || !aconnector->dc_sink)
6542                 return 0;
6543
6544         mst_port = aconnector->port;
6545         mst_mgr = &aconnector->mst_port->mst_mgr;
6546
6547         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6548                 return 0;
6549
6550         if (!state->duplicated) {
6551                 int max_bpc = conn_state->max_requested_bpc;
6552                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6553                                 aconnector->force_yuv420_output;
6554                 color_depth = convert_color_depth_from_display_info(connector,
6555                                                                     is_y420,
6556                                                                     max_bpc);
6557                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6558                 clock = adjusted_mode->clock;
6559                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6560         }
6561         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6562                                                                            mst_mgr,
6563                                                                            mst_port,
6564                                                                            dm_new_connector_state->pbn,
6565                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6566         if (dm_new_connector_state->vcpi_slots < 0) {
6567                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6568                 return dm_new_connector_state->vcpi_slots;
6569         }
6570         return 0;
6571 }
6572
6573 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6574         .disable = dm_encoder_helper_disable,
6575         .atomic_check = dm_encoder_helper_atomic_check
6576 };
6577
6578 #if defined(CONFIG_DRM_AMD_DC_DCN)
6579 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6580                                             struct dc_state *dc_state)
6581 {
6582         struct dc_stream_state *stream = NULL;
6583         struct drm_connector *connector;
6584         struct drm_connector_state *new_con_state;
6585         struct amdgpu_dm_connector *aconnector;
6586         struct dm_connector_state *dm_conn_state;
6587         int i, j, clock, bpp;
6588         int vcpi, pbn_div, pbn = 0;
6589
6590         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6591
6592                 aconnector = to_amdgpu_dm_connector(connector);
6593
6594                 if (!aconnector->port)
6595                         continue;
6596
6597                 if (!new_con_state || !new_con_state->crtc)
6598                         continue;
6599
6600                 dm_conn_state = to_dm_connector_state(new_con_state);
6601
6602                 for (j = 0; j < dc_state->stream_count; j++) {
6603                         stream = dc_state->streams[j];
6604                         if (!stream)
6605                                 continue;
6606
6607                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6608                                 break;
6609
6610                         stream = NULL;
6611                 }
6612
6613                 if (!stream)
6614                         continue;
6615
6616                 if (stream->timing.flags.DSC != 1) {
6617                         drm_dp_mst_atomic_enable_dsc(state,
6618                                                      aconnector->port,
6619                                                      dm_conn_state->pbn,
6620                                                      0,
6621                                                      false);
6622                         continue;
6623                 }
6624
6625                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6626                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6627                 clock = stream->timing.pix_clk_100hz / 10;
6628                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6629                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6630                                                     aconnector->port,
6631                                                     pbn, pbn_div,
6632                                                     true);
6633                 if (vcpi < 0)
6634                         return vcpi;
6635
6636                 dm_conn_state->pbn = pbn;
6637                 dm_conn_state->vcpi_slots = vcpi;
6638         }
6639         return 0;
6640 }
6641 #endif
6642
6643 static void dm_drm_plane_reset(struct drm_plane *plane)
6644 {
6645         struct dm_plane_state *amdgpu_state = NULL;
6646
6647         if (plane->state)
6648                 plane->funcs->atomic_destroy_state(plane, plane->state);
6649
6650         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6651         WARN_ON(amdgpu_state == NULL);
6652
6653         if (amdgpu_state)
6654                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6655 }
6656
6657 static struct drm_plane_state *
6658 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6659 {
6660         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6661
6662         old_dm_plane_state = to_dm_plane_state(plane->state);
6663         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6664         if (!dm_plane_state)
6665                 return NULL;
6666
6667         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6668
6669         if (old_dm_plane_state->dc_state) {
6670                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6671                 dc_plane_state_retain(dm_plane_state->dc_state);
6672         }
6673
6674         return &dm_plane_state->base;
6675 }
6676
6677 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6678                                 struct drm_plane_state *state)
6679 {
6680         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6681
6682         if (dm_plane_state->dc_state)
6683                 dc_plane_state_release(dm_plane_state->dc_state);
6684
6685         drm_atomic_helper_plane_destroy_state(plane, state);
6686 }
6687
6688 static const struct drm_plane_funcs dm_plane_funcs = {
6689         .update_plane   = drm_atomic_helper_update_plane,
6690         .disable_plane  = drm_atomic_helper_disable_plane,
6691         .destroy        = drm_primary_helper_destroy,
6692         .reset = dm_drm_plane_reset,
6693         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6694         .atomic_destroy_state = dm_drm_plane_destroy_state,
6695         .format_mod_supported = dm_plane_format_mod_supported,
6696 };
6697
6698 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6699                                       struct drm_plane_state *new_state)
6700 {
6701         struct amdgpu_framebuffer *afb;
6702         struct drm_gem_object *obj;
6703         struct amdgpu_device *adev;
6704         struct amdgpu_bo *rbo;
6705         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6706         struct list_head list;
6707         struct ttm_validate_buffer tv;
6708         struct ww_acquire_ctx ticket;
6709         uint32_t domain;
6710         int r;
6711
6712         if (!new_state->fb) {
6713                 DRM_DEBUG_KMS("No FB bound\n");
6714                 return 0;
6715         }
6716
6717         afb = to_amdgpu_framebuffer(new_state->fb);
6718         obj = new_state->fb->obj[0];
6719         rbo = gem_to_amdgpu_bo(obj);
6720         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6721         INIT_LIST_HEAD(&list);
6722
6723         tv.bo = &rbo->tbo;
6724         tv.num_shared = 1;
6725         list_add(&tv.head, &list);
6726
6727         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6728         if (r) {
6729                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6730                 return r;
6731         }
6732
6733         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6734                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6735         else
6736                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6737
6738         r = amdgpu_bo_pin(rbo, domain);
6739         if (unlikely(r != 0)) {
6740                 if (r != -ERESTARTSYS)
6741                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6742                 ttm_eu_backoff_reservation(&ticket, &list);
6743                 return r;
6744         }
6745
6746         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6747         if (unlikely(r != 0)) {
6748                 amdgpu_bo_unpin(rbo);
6749                 ttm_eu_backoff_reservation(&ticket, &list);
6750                 DRM_ERROR("%p bind failed\n", rbo);
6751                 return r;
6752         }
6753
6754         ttm_eu_backoff_reservation(&ticket, &list);
6755
6756         afb->address = amdgpu_bo_gpu_offset(rbo);
6757
6758         amdgpu_bo_ref(rbo);
6759
6760         /**
6761          * We don't do surface updates on planes that have been newly created,
6762          * but we also don't have the afb->address during atomic check.
6763          *
6764          * Fill in buffer attributes depending on the address here, but only on
6765          * newly created planes since they're not being used by DC yet and this
6766          * won't modify global state.
6767          */
6768         dm_plane_state_old = to_dm_plane_state(plane->state);
6769         dm_plane_state_new = to_dm_plane_state(new_state);
6770
6771         if (dm_plane_state_new->dc_state &&
6772             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6773                 struct dc_plane_state *plane_state =
6774                         dm_plane_state_new->dc_state;
6775                 bool force_disable_dcc = !plane_state->dcc.enable;
6776
6777                 fill_plane_buffer_attributes(
6778                         adev, afb, plane_state->format, plane_state->rotation,
6779                         afb->tiling_flags,
6780                         &plane_state->tiling_info, &plane_state->plane_size,
6781                         &plane_state->dcc, &plane_state->address,
6782                         afb->tmz_surface, force_disable_dcc);
6783         }
6784
6785         return 0;
6786 }
6787
6788 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6789                                        struct drm_plane_state *old_state)
6790 {
6791         struct amdgpu_bo *rbo;
6792         int r;
6793
6794         if (!old_state->fb)
6795                 return;
6796
6797         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6798         r = amdgpu_bo_reserve(rbo, false);
6799         if (unlikely(r)) {
6800                 DRM_ERROR("failed to reserve rbo before unpin\n");
6801                 return;
6802         }
6803
6804         amdgpu_bo_unpin(rbo);
6805         amdgpu_bo_unreserve(rbo);
6806         amdgpu_bo_unref(&rbo);
6807 }
6808
6809 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6810                                        struct drm_crtc_state *new_crtc_state)
6811 {
6812         struct drm_framebuffer *fb = state->fb;
6813         int min_downscale, max_upscale;
6814         int min_scale = 0;
6815         int max_scale = INT_MAX;
6816
6817         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6818         if (fb && state->crtc) {
6819                 /* Validate viewport to cover the case when only the position changes */
6820                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6821                         int viewport_width = state->crtc_w;
6822                         int viewport_height = state->crtc_h;
6823
6824                         if (state->crtc_x < 0)
6825                                 viewport_width += state->crtc_x;
6826                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6827                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6828
6829                         if (state->crtc_y < 0)
6830                                 viewport_height += state->crtc_y;
6831                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6832                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6833
6834                         if (viewport_width < 0 || viewport_height < 0) {
6835                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6836                                 return -EINVAL;
6837                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6838                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6839                                 return -EINVAL;
6840                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6841                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6842                                 return -EINVAL;
6843                         }
6844
6845                 }
6846
6847                 /* Get min/max allowed scaling factors from plane caps. */
6848                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6849                                              &min_downscale, &max_upscale);
6850                 /*
6851                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6852                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6853                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6854                  */
6855                 min_scale = (1000 << 16) / max_upscale;
6856                 max_scale = (1000 << 16) / min_downscale;
6857         }
6858
6859         return drm_atomic_helper_check_plane_state(
6860                 state, new_crtc_state, min_scale, max_scale, true, true);
6861 }
6862
6863 static int dm_plane_atomic_check(struct drm_plane *plane,
6864                                  struct drm_atomic_state *state)
6865 {
6866         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6867                                                                                  plane);
6868         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6869         struct dc *dc = adev->dm.dc;
6870         struct dm_plane_state *dm_plane_state;
6871         struct dc_scaling_info scaling_info;
6872         struct drm_crtc_state *new_crtc_state;
6873         int ret;
6874
6875         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6876
6877         dm_plane_state = to_dm_plane_state(new_plane_state);
6878
6879         if (!dm_plane_state->dc_state)
6880                 return 0;
6881
6882         new_crtc_state =
6883                 drm_atomic_get_new_crtc_state(state,
6884                                               new_plane_state->crtc);
6885         if (!new_crtc_state)
6886                 return -EINVAL;
6887
6888         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6889         if (ret)
6890                 return ret;
6891
6892         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6893         if (ret)
6894                 return ret;
6895
6896         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6897                 return 0;
6898
6899         return -EINVAL;
6900 }
6901
6902 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6903                                        struct drm_atomic_state *state)
6904 {
6905         /* Only support async updates on cursor planes. */
6906         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6907                 return -EINVAL;
6908
6909         return 0;
6910 }
6911
6912 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6913                                          struct drm_atomic_state *state)
6914 {
6915         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6916                                                                            plane);
6917         struct drm_plane_state *old_state =
6918                 drm_atomic_get_old_plane_state(state, plane);
6919
6920         trace_amdgpu_dm_atomic_update_cursor(new_state);
6921
6922         swap(plane->state->fb, new_state->fb);
6923
6924         plane->state->src_x = new_state->src_x;
6925         plane->state->src_y = new_state->src_y;
6926         plane->state->src_w = new_state->src_w;
6927         plane->state->src_h = new_state->src_h;
6928         plane->state->crtc_x = new_state->crtc_x;
6929         plane->state->crtc_y = new_state->crtc_y;
6930         plane->state->crtc_w = new_state->crtc_w;
6931         plane->state->crtc_h = new_state->crtc_h;
6932
6933         handle_cursor_update(plane, old_state);
6934 }
6935
6936 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6937         .prepare_fb = dm_plane_helper_prepare_fb,
6938         .cleanup_fb = dm_plane_helper_cleanup_fb,
6939         .atomic_check = dm_plane_atomic_check,
6940         .atomic_async_check = dm_plane_atomic_async_check,
6941         .atomic_async_update = dm_plane_atomic_async_update
6942 };
6943
6944 /*
6945  * TODO: these are currently initialized to rgb formats only.
6946  * For future use cases we should either initialize them dynamically based on
6947  * plane capabilities, or initialize this array to all formats, so internal drm
6948  * check will succeed, and let DC implement proper check
6949  */
6950 static const uint32_t rgb_formats[] = {
6951         DRM_FORMAT_XRGB8888,
6952         DRM_FORMAT_ARGB8888,
6953         DRM_FORMAT_RGBA8888,
6954         DRM_FORMAT_XRGB2101010,
6955         DRM_FORMAT_XBGR2101010,
6956         DRM_FORMAT_ARGB2101010,
6957         DRM_FORMAT_ABGR2101010,
6958         DRM_FORMAT_XBGR8888,
6959         DRM_FORMAT_ABGR8888,
6960         DRM_FORMAT_RGB565,
6961 };
6962
6963 static const uint32_t overlay_formats[] = {
6964         DRM_FORMAT_XRGB8888,
6965         DRM_FORMAT_ARGB8888,
6966         DRM_FORMAT_RGBA8888,
6967         DRM_FORMAT_XBGR8888,
6968         DRM_FORMAT_ABGR8888,
6969         DRM_FORMAT_RGB565
6970 };
6971
6972 static const u32 cursor_formats[] = {
6973         DRM_FORMAT_ARGB8888
6974 };
6975
6976 static int get_plane_formats(const struct drm_plane *plane,
6977                              const struct dc_plane_cap *plane_cap,
6978                              uint32_t *formats, int max_formats)
6979 {
6980         int i, num_formats = 0;
6981
6982         /*
6983          * TODO: Query support for each group of formats directly from
6984          * DC plane caps. This will require adding more formats to the
6985          * caps list.
6986          */
6987
6988         switch (plane->type) {
6989         case DRM_PLANE_TYPE_PRIMARY:
6990                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6991                         if (num_formats >= max_formats)
6992                                 break;
6993
6994                         formats[num_formats++] = rgb_formats[i];
6995                 }
6996
6997                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6998                         formats[num_formats++] = DRM_FORMAT_NV12;
6999                 if (plane_cap && plane_cap->pixel_format_support.p010)
7000                         formats[num_formats++] = DRM_FORMAT_P010;
7001                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7002                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7003                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7004                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7005                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7006                 }
7007                 break;
7008
7009         case DRM_PLANE_TYPE_OVERLAY:
7010                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7011                         if (num_formats >= max_formats)
7012                                 break;
7013
7014                         formats[num_formats++] = overlay_formats[i];
7015                 }
7016                 break;
7017
7018         case DRM_PLANE_TYPE_CURSOR:
7019                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7020                         if (num_formats >= max_formats)
7021                                 break;
7022
7023                         formats[num_formats++] = cursor_formats[i];
7024                 }
7025                 break;
7026         }
7027
7028         return num_formats;
7029 }
7030
7031 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7032                                 struct drm_plane *plane,
7033                                 unsigned long possible_crtcs,
7034                                 const struct dc_plane_cap *plane_cap)
7035 {
7036         uint32_t formats[32];
7037         int num_formats;
7038         int res = -EPERM;
7039         unsigned int supported_rotations;
7040         uint64_t *modifiers = NULL;
7041
7042         num_formats = get_plane_formats(plane, plane_cap, formats,
7043                                         ARRAY_SIZE(formats));
7044
7045         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7046         if (res)
7047                 return res;
7048
7049         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7050                                        &dm_plane_funcs, formats, num_formats,
7051                                        modifiers, plane->type, NULL);
7052         kfree(modifiers);
7053         if (res)
7054                 return res;
7055
7056         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7057             plane_cap && plane_cap->per_pixel_alpha) {
7058                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7059                                           BIT(DRM_MODE_BLEND_PREMULTI);
7060
7061                 drm_plane_create_alpha_property(plane);
7062                 drm_plane_create_blend_mode_property(plane, blend_caps);
7063         }
7064
7065         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7066             plane_cap &&
7067             (plane_cap->pixel_format_support.nv12 ||
7068              plane_cap->pixel_format_support.p010)) {
7069                 /* This only affects YUV formats. */
7070                 drm_plane_create_color_properties(
7071                         plane,
7072                         BIT(DRM_COLOR_YCBCR_BT601) |
7073                         BIT(DRM_COLOR_YCBCR_BT709) |
7074                         BIT(DRM_COLOR_YCBCR_BT2020),
7075                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7076                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7077                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7078         }
7079
7080         supported_rotations =
7081                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7082                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7083
7084         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7085             plane->type != DRM_PLANE_TYPE_CURSOR)
7086                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7087                                                    supported_rotations);
7088
7089         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7090
7091         /* Create (reset) the plane state */
7092         if (plane->funcs->reset)
7093                 plane->funcs->reset(plane);
7094
7095         return 0;
7096 }
7097
7098 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7099                                struct drm_plane *plane,
7100                                uint32_t crtc_index)
7101 {
7102         struct amdgpu_crtc *acrtc = NULL;
7103         struct drm_plane *cursor_plane;
7104
7105         int res = -ENOMEM;
7106
7107         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7108         if (!cursor_plane)
7109                 goto fail;
7110
7111         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7112         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7113
7114         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7115         if (!acrtc)
7116                 goto fail;
7117
7118         res = drm_crtc_init_with_planes(
7119                         dm->ddev,
7120                         &acrtc->base,
7121                         plane,
7122                         cursor_plane,
7123                         &amdgpu_dm_crtc_funcs, NULL);
7124
7125         if (res)
7126                 goto fail;
7127
7128         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7129
7130         /* Create (reset) the plane state */
7131         if (acrtc->base.funcs->reset)
7132                 acrtc->base.funcs->reset(&acrtc->base);
7133
7134         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7135         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7136
7137         acrtc->crtc_id = crtc_index;
7138         acrtc->base.enabled = false;
7139         acrtc->otg_inst = -1;
7140
7141         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7142         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7143                                    true, MAX_COLOR_LUT_ENTRIES);
7144         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7145
7146         return 0;
7147
7148 fail:
7149         kfree(acrtc);
7150         kfree(cursor_plane);
7151         return res;
7152 }
7153
7154
7155 static int to_drm_connector_type(enum signal_type st)
7156 {
7157         switch (st) {
7158         case SIGNAL_TYPE_HDMI_TYPE_A:
7159                 return DRM_MODE_CONNECTOR_HDMIA;
7160         case SIGNAL_TYPE_EDP:
7161                 return DRM_MODE_CONNECTOR_eDP;
7162         case SIGNAL_TYPE_LVDS:
7163                 return DRM_MODE_CONNECTOR_LVDS;
7164         case SIGNAL_TYPE_RGB:
7165                 return DRM_MODE_CONNECTOR_VGA;
7166         case SIGNAL_TYPE_DISPLAY_PORT:
7167         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7168                 return DRM_MODE_CONNECTOR_DisplayPort;
7169         case SIGNAL_TYPE_DVI_DUAL_LINK:
7170         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7171                 return DRM_MODE_CONNECTOR_DVID;
7172         case SIGNAL_TYPE_VIRTUAL:
7173                 return DRM_MODE_CONNECTOR_VIRTUAL;
7174
7175         default:
7176                 return DRM_MODE_CONNECTOR_Unknown;
7177         }
7178 }
7179
7180 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7181 {
7182         struct drm_encoder *encoder;
7183
7184         /* There is only one encoder per connector */
7185         drm_connector_for_each_possible_encoder(connector, encoder)
7186                 return encoder;
7187
7188         return NULL;
7189 }
7190
7191 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7192 {
7193         struct drm_encoder *encoder;
7194         struct amdgpu_encoder *amdgpu_encoder;
7195
7196         encoder = amdgpu_dm_connector_to_encoder(connector);
7197
7198         if (encoder == NULL)
7199                 return;
7200
7201         amdgpu_encoder = to_amdgpu_encoder(encoder);
7202
7203         amdgpu_encoder->native_mode.clock = 0;
7204
7205         if (!list_empty(&connector->probed_modes)) {
7206                 struct drm_display_mode *preferred_mode = NULL;
7207
7208                 list_for_each_entry(preferred_mode,
7209                                     &connector->probed_modes,
7210                                     head) {
7211                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7212                                 amdgpu_encoder->native_mode = *preferred_mode;
7213
7214                         break;
7215                 }
7216
7217         }
7218 }
7219
7220 static struct drm_display_mode *
7221 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7222                              char *name,
7223                              int hdisplay, int vdisplay)
7224 {
7225         struct drm_device *dev = encoder->dev;
7226         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7227         struct drm_display_mode *mode = NULL;
7228         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7229
7230         mode = drm_mode_duplicate(dev, native_mode);
7231
7232         if (mode == NULL)
7233                 return NULL;
7234
7235         mode->hdisplay = hdisplay;
7236         mode->vdisplay = vdisplay;
7237         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7238         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7239
7240         return mode;
7241
7242 }
7243
7244 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7245                                                  struct drm_connector *connector)
7246 {
7247         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7248         struct drm_display_mode *mode = NULL;
7249         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7250         struct amdgpu_dm_connector *amdgpu_dm_connector =
7251                                 to_amdgpu_dm_connector(connector);
7252         int i;
7253         int n;
7254         struct mode_size {
7255                 char name[DRM_DISPLAY_MODE_LEN];
7256                 int w;
7257                 int h;
7258         } common_modes[] = {
7259                 {  "640x480",  640,  480},
7260                 {  "800x600",  800,  600},
7261                 { "1024x768", 1024,  768},
7262                 { "1280x720", 1280,  720},
7263                 { "1280x800", 1280,  800},
7264                 {"1280x1024", 1280, 1024},
7265                 { "1440x900", 1440,  900},
7266                 {"1680x1050", 1680, 1050},
7267                 {"1600x1200", 1600, 1200},
7268                 {"1920x1080", 1920, 1080},
7269                 {"1920x1200", 1920, 1200}
7270         };
7271
7272         n = ARRAY_SIZE(common_modes);
7273
7274         for (i = 0; i < n; i++) {
7275                 struct drm_display_mode *curmode = NULL;
7276                 bool mode_existed = false;
7277
7278                 if (common_modes[i].w > native_mode->hdisplay ||
7279                     common_modes[i].h > native_mode->vdisplay ||
7280                    (common_modes[i].w == native_mode->hdisplay &&
7281                     common_modes[i].h == native_mode->vdisplay))
7282                         continue;
7283
7284                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7285                         if (common_modes[i].w == curmode->hdisplay &&
7286                             common_modes[i].h == curmode->vdisplay) {
7287                                 mode_existed = true;
7288                                 break;
7289                         }
7290                 }
7291
7292                 if (mode_existed)
7293                         continue;
7294
7295                 mode = amdgpu_dm_create_common_mode(encoder,
7296                                 common_modes[i].name, common_modes[i].w,
7297                                 common_modes[i].h);
7298                 drm_mode_probed_add(connector, mode);
7299                 amdgpu_dm_connector->num_modes++;
7300         }
7301 }
7302
7303 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7304                                               struct edid *edid)
7305 {
7306         struct amdgpu_dm_connector *amdgpu_dm_connector =
7307                         to_amdgpu_dm_connector(connector);
7308
7309         if (edid) {
7310                 /* empty probed_modes */
7311                 INIT_LIST_HEAD(&connector->probed_modes);
7312                 amdgpu_dm_connector->num_modes =
7313                                 drm_add_edid_modes(connector, edid);
7314
7315                 /* sorting the probed modes before calling function
7316                  * amdgpu_dm_get_native_mode() since EDID can have
7317                  * more than one preferred mode. The modes that are
7318                  * later in the probed mode list could be of higher
7319                  * and preferred resolution. For example, 3840x2160
7320                  * resolution in base EDID preferred timing and 4096x2160
7321                  * preferred resolution in DID extension block later.
7322                  */
7323                 drm_mode_sort(&connector->probed_modes);
7324                 amdgpu_dm_get_native_mode(connector);
7325
7326                 /* Freesync capabilities are reset by calling
7327                  * drm_add_edid_modes() and need to be
7328                  * restored here.
7329                  */
7330                 amdgpu_dm_update_freesync_caps(connector, edid);
7331         } else {
7332                 amdgpu_dm_connector->num_modes = 0;
7333         }
7334 }
7335
7336 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7337                               struct drm_display_mode *mode)
7338 {
7339         struct drm_display_mode *m;
7340
7341         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7342                 if (drm_mode_equal(m, mode))
7343                         return true;
7344         }
7345
7346         return false;
7347 }
7348
7349 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7350 {
7351         const struct drm_display_mode *m;
7352         struct drm_display_mode *new_mode;
7353         uint i;
7354         uint32_t new_modes_count = 0;
7355
7356         /* Standard FPS values
7357          *
7358          * 23.976   - TV/NTSC
7359          * 24       - Cinema
7360          * 25       - TV/PAL
7361          * 29.97    - TV/NTSC
7362          * 30       - TV/NTSC
7363          * 48       - Cinema HFR
7364          * 50       - TV/PAL
7365          * 60       - Commonly used
7366          * 48,72,96 - Multiples of 24
7367          */
7368         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7369                                          48000, 50000, 60000, 72000, 96000 };
7370
7371         /*
7372          * Find mode with highest refresh rate with the same resolution
7373          * as the preferred mode. Some monitors report a preferred mode
7374          * with lower resolution than the highest refresh rate supported.
7375          */
7376
7377         m = get_highest_refresh_rate_mode(aconnector, true);
7378         if (!m)
7379                 return 0;
7380
7381         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7382                 uint64_t target_vtotal, target_vtotal_diff;
7383                 uint64_t num, den;
7384
7385                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7386                         continue;
7387
7388                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7389                     common_rates[i] > aconnector->max_vfreq * 1000)
7390                         continue;
7391
7392                 num = (unsigned long long)m->clock * 1000 * 1000;
7393                 den = common_rates[i] * (unsigned long long)m->htotal;
7394                 target_vtotal = div_u64(num, den);
7395                 target_vtotal_diff = target_vtotal - m->vtotal;
7396
7397                 /* Check for illegal modes */
7398                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7399                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7400                     m->vtotal + target_vtotal_diff < m->vsync_end)
7401                         continue;
7402
7403                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7404                 if (!new_mode)
7405                         goto out;
7406
7407                 new_mode->vtotal += (u16)target_vtotal_diff;
7408                 new_mode->vsync_start += (u16)target_vtotal_diff;
7409                 new_mode->vsync_end += (u16)target_vtotal_diff;
7410                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7411                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7412
7413                 if (!is_duplicate_mode(aconnector, new_mode)) {
7414                         drm_mode_probed_add(&aconnector->base, new_mode);
7415                         new_modes_count += 1;
7416                 } else
7417                         drm_mode_destroy(aconnector->base.dev, new_mode);
7418         }
7419  out:
7420         return new_modes_count;
7421 }
7422
7423 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7424                                                    struct edid *edid)
7425 {
7426         struct amdgpu_dm_connector *amdgpu_dm_connector =
7427                 to_amdgpu_dm_connector(connector);
7428
7429         if (!(amdgpu_freesync_vid_mode && edid))
7430                 return;
7431
7432         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7433                 amdgpu_dm_connector->num_modes +=
7434                         add_fs_modes(amdgpu_dm_connector);
7435 }
7436
7437 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7438 {
7439         struct amdgpu_dm_connector *amdgpu_dm_connector =
7440                         to_amdgpu_dm_connector(connector);
7441         struct drm_encoder *encoder;
7442         struct edid *edid = amdgpu_dm_connector->edid;
7443
7444         encoder = amdgpu_dm_connector_to_encoder(connector);
7445
7446         if (!drm_edid_is_valid(edid)) {
7447                 amdgpu_dm_connector->num_modes =
7448                                 drm_add_modes_noedid(connector, 640, 480);
7449         } else {
7450                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7451                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7452                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7453         }
7454         amdgpu_dm_fbc_init(connector);
7455
7456         return amdgpu_dm_connector->num_modes;
7457 }
7458
7459 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7460                                      struct amdgpu_dm_connector *aconnector,
7461                                      int connector_type,
7462                                      struct dc_link *link,
7463                                      int link_index)
7464 {
7465         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7466
7467         /*
7468          * Some of the properties below require access to state, like bpc.
7469          * Allocate some default initial connector state with our reset helper.
7470          */
7471         if (aconnector->base.funcs->reset)
7472                 aconnector->base.funcs->reset(&aconnector->base);
7473
7474         aconnector->connector_id = link_index;
7475         aconnector->dc_link = link;
7476         aconnector->base.interlace_allowed = false;
7477         aconnector->base.doublescan_allowed = false;
7478         aconnector->base.stereo_allowed = false;
7479         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7480         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7481         aconnector->audio_inst = -1;
7482         mutex_init(&aconnector->hpd_lock);
7483
7484         /*
7485          * configure support HPD hot plug connector_>polled default value is 0
7486          * which means HPD hot plug not supported
7487          */
7488         switch (connector_type) {
7489         case DRM_MODE_CONNECTOR_HDMIA:
7490                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7491                 aconnector->base.ycbcr_420_allowed =
7492                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7493                 break;
7494         case DRM_MODE_CONNECTOR_DisplayPort:
7495                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7496                 aconnector->base.ycbcr_420_allowed =
7497                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7498                 break;
7499         case DRM_MODE_CONNECTOR_DVID:
7500                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7501                 break;
7502         default:
7503                 break;
7504         }
7505
7506         drm_object_attach_property(&aconnector->base.base,
7507                                 dm->ddev->mode_config.scaling_mode_property,
7508                                 DRM_MODE_SCALE_NONE);
7509
7510         drm_object_attach_property(&aconnector->base.base,
7511                                 adev->mode_info.underscan_property,
7512                                 UNDERSCAN_OFF);
7513         drm_object_attach_property(&aconnector->base.base,
7514                                 adev->mode_info.underscan_hborder_property,
7515                                 0);
7516         drm_object_attach_property(&aconnector->base.base,
7517                                 adev->mode_info.underscan_vborder_property,
7518                                 0);
7519
7520         if (!aconnector->mst_port)
7521                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7522
7523         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7524         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7525         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7526
7527         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7528             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7529                 drm_object_attach_property(&aconnector->base.base,
7530                                 adev->mode_info.abm_level_property, 0);
7531         }
7532
7533         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7534             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7535             connector_type == DRM_MODE_CONNECTOR_eDP) {
7536                 drm_object_attach_property(
7537                         &aconnector->base.base,
7538                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7539
7540                 if (!aconnector->mst_port)
7541                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7542
7543 #ifdef CONFIG_DRM_AMD_DC_HDCP
7544                 if (adev->dm.hdcp_workqueue)
7545                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7546 #endif
7547         }
7548 }
7549
7550 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7551                               struct i2c_msg *msgs, int num)
7552 {
7553         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7554         struct ddc_service *ddc_service = i2c->ddc_service;
7555         struct i2c_command cmd;
7556         int i;
7557         int result = -EIO;
7558
7559         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7560
7561         if (!cmd.payloads)
7562                 return result;
7563
7564         cmd.number_of_payloads = num;
7565         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7566         cmd.speed = 100;
7567
7568         for (i = 0; i < num; i++) {
7569                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7570                 cmd.payloads[i].address = msgs[i].addr;
7571                 cmd.payloads[i].length = msgs[i].len;
7572                 cmd.payloads[i].data = msgs[i].buf;
7573         }
7574
7575         if (dc_submit_i2c(
7576                         ddc_service->ctx->dc,
7577                         ddc_service->ddc_pin->hw_info.ddc_channel,
7578                         &cmd))
7579                 result = num;
7580
7581         kfree(cmd.payloads);
7582         return result;
7583 }
7584
7585 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7586 {
7587         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7588 }
7589
7590 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7591         .master_xfer = amdgpu_dm_i2c_xfer,
7592         .functionality = amdgpu_dm_i2c_func,
7593 };
7594
7595 static struct amdgpu_i2c_adapter *
7596 create_i2c(struct ddc_service *ddc_service,
7597            int link_index,
7598            int *res)
7599 {
7600         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7601         struct amdgpu_i2c_adapter *i2c;
7602
7603         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7604         if (!i2c)
7605                 return NULL;
7606         i2c->base.owner = THIS_MODULE;
7607         i2c->base.class = I2C_CLASS_DDC;
7608         i2c->base.dev.parent = &adev->pdev->dev;
7609         i2c->base.algo = &amdgpu_dm_i2c_algo;
7610         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7611         i2c_set_adapdata(&i2c->base, i2c);
7612         i2c->ddc_service = ddc_service;
7613         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7614
7615         return i2c;
7616 }
7617
7618
7619 /*
7620  * Note: this function assumes that dc_link_detect() was called for the
7621  * dc_link which will be represented by this aconnector.
7622  */
7623 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7624                                     struct amdgpu_dm_connector *aconnector,
7625                                     uint32_t link_index,
7626                                     struct amdgpu_encoder *aencoder)
7627 {
7628         int res = 0;
7629         int connector_type;
7630         struct dc *dc = dm->dc;
7631         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7632         struct amdgpu_i2c_adapter *i2c;
7633
7634         link->priv = aconnector;
7635
7636         DRM_DEBUG_DRIVER("%s()\n", __func__);
7637
7638         i2c = create_i2c(link->ddc, link->link_index, &res);
7639         if (!i2c) {
7640                 DRM_ERROR("Failed to create i2c adapter data\n");
7641                 return -ENOMEM;
7642         }
7643
7644         aconnector->i2c = i2c;
7645         res = i2c_add_adapter(&i2c->base);
7646
7647         if (res) {
7648                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7649                 goto out_free;
7650         }
7651
7652         connector_type = to_drm_connector_type(link->connector_signal);
7653
7654         res = drm_connector_init_with_ddc(
7655                         dm->ddev,
7656                         &aconnector->base,
7657                         &amdgpu_dm_connector_funcs,
7658                         connector_type,
7659                         &i2c->base);
7660
7661         if (res) {
7662                 DRM_ERROR("connector_init failed\n");
7663                 aconnector->connector_id = -1;
7664                 goto out_free;
7665         }
7666
7667         drm_connector_helper_add(
7668                         &aconnector->base,
7669                         &amdgpu_dm_connector_helper_funcs);
7670
7671         amdgpu_dm_connector_init_helper(
7672                 dm,
7673                 aconnector,
7674                 connector_type,
7675                 link,
7676                 link_index);
7677
7678         drm_connector_attach_encoder(
7679                 &aconnector->base, &aencoder->base);
7680
7681         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7682                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7683                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7684
7685 out_free:
7686         if (res) {
7687                 kfree(i2c);
7688                 aconnector->i2c = NULL;
7689         }
7690         return res;
7691 }
7692
7693 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7694 {
7695         switch (adev->mode_info.num_crtc) {
7696         case 1:
7697                 return 0x1;
7698         case 2:
7699                 return 0x3;
7700         case 3:
7701                 return 0x7;
7702         case 4:
7703                 return 0xf;
7704         case 5:
7705                 return 0x1f;
7706         case 6:
7707         default:
7708                 return 0x3f;
7709         }
7710 }
7711
7712 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7713                                   struct amdgpu_encoder *aencoder,
7714                                   uint32_t link_index)
7715 {
7716         struct amdgpu_device *adev = drm_to_adev(dev);
7717
7718         int res = drm_encoder_init(dev,
7719                                    &aencoder->base,
7720                                    &amdgpu_dm_encoder_funcs,
7721                                    DRM_MODE_ENCODER_TMDS,
7722                                    NULL);
7723
7724         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7725
7726         if (!res)
7727                 aencoder->encoder_id = link_index;
7728         else
7729                 aencoder->encoder_id = -1;
7730
7731         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7732
7733         return res;
7734 }
7735
7736 static void manage_dm_interrupts(struct amdgpu_device *adev,
7737                                  struct amdgpu_crtc *acrtc,
7738                                  bool enable)
7739 {
7740         /*
7741          * We have no guarantee that the frontend index maps to the same
7742          * backend index - some even map to more than one.
7743          *
7744          * TODO: Use a different interrupt or check DC itself for the mapping.
7745          */
7746         int irq_type =
7747                 amdgpu_display_crtc_idx_to_irq_type(
7748                         adev,
7749                         acrtc->crtc_id);
7750
7751         if (enable) {
7752                 drm_crtc_vblank_on(&acrtc->base);
7753                 amdgpu_irq_get(
7754                         adev,
7755                         &adev->pageflip_irq,
7756                         irq_type);
7757 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7758                 amdgpu_irq_get(
7759                         adev,
7760                         &adev->vline0_irq,
7761                         irq_type);
7762 #endif
7763         } else {
7764 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7765                 amdgpu_irq_put(
7766                         adev,
7767                         &adev->vline0_irq,
7768                         irq_type);
7769 #endif
7770                 amdgpu_irq_put(
7771                         adev,
7772                         &adev->pageflip_irq,
7773                         irq_type);
7774                 drm_crtc_vblank_off(&acrtc->base);
7775         }
7776 }
7777
7778 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7779                                       struct amdgpu_crtc *acrtc)
7780 {
7781         int irq_type =
7782                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7783
7784         /**
7785          * This reads the current state for the IRQ and force reapplies
7786          * the setting to hardware.
7787          */
7788         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7789 }
7790
7791 static bool
7792 is_scaling_state_different(const struct dm_connector_state *dm_state,
7793                            const struct dm_connector_state *old_dm_state)
7794 {
7795         if (dm_state->scaling != old_dm_state->scaling)
7796                 return true;
7797         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7798                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7799                         return true;
7800         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7801                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7802                         return true;
7803         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7804                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7805                 return true;
7806         return false;
7807 }
7808
7809 #ifdef CONFIG_DRM_AMD_DC_HDCP
7810 static bool is_content_protection_different(struct drm_connector_state *state,
7811                                             const struct drm_connector_state *old_state,
7812                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7813 {
7814         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7815         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7816
7817         /* Handle: Type0/1 change */
7818         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7819             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7820                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7821                 return true;
7822         }
7823
7824         /* CP is being re enabled, ignore this
7825          *
7826          * Handles:     ENABLED -> DESIRED
7827          */
7828         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7829             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7830                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7831                 return false;
7832         }
7833
7834         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7835          *
7836          * Handles:     UNDESIRED -> ENABLED
7837          */
7838         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7839             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7840                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7841
7842         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7843          * hot-plug, headless s3, dpms
7844          *
7845          * Handles:     DESIRED -> DESIRED (Special case)
7846          */
7847         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7848             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7849                 dm_con_state->update_hdcp = false;
7850                 return true;
7851         }
7852
7853         /*
7854          * Handles:     UNDESIRED -> UNDESIRED
7855          *              DESIRED -> DESIRED
7856          *              ENABLED -> ENABLED
7857          */
7858         if (old_state->content_protection == state->content_protection)
7859                 return false;
7860
7861         /*
7862          * Handles:     UNDESIRED -> DESIRED
7863          *              DESIRED -> UNDESIRED
7864          *              ENABLED -> UNDESIRED
7865          */
7866         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7867                 return true;
7868
7869         /*
7870          * Handles:     DESIRED -> ENABLED
7871          */
7872         return false;
7873 }
7874
7875 #endif
7876 static void remove_stream(struct amdgpu_device *adev,
7877                           struct amdgpu_crtc *acrtc,
7878                           struct dc_stream_state *stream)
7879 {
7880         /* this is the update mode case */
7881
7882         acrtc->otg_inst = -1;
7883         acrtc->enabled = false;
7884 }
7885
7886 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7887                                struct dc_cursor_position *position)
7888 {
7889         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7890         int x, y;
7891         int xorigin = 0, yorigin = 0;
7892
7893         if (!crtc || !plane->state->fb)
7894                 return 0;
7895
7896         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7897             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7898                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7899                           __func__,
7900                           plane->state->crtc_w,
7901                           plane->state->crtc_h);
7902                 return -EINVAL;
7903         }
7904
7905         x = plane->state->crtc_x;
7906         y = plane->state->crtc_y;
7907
7908         if (x <= -amdgpu_crtc->max_cursor_width ||
7909             y <= -amdgpu_crtc->max_cursor_height)
7910                 return 0;
7911
7912         if (x < 0) {
7913                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7914                 x = 0;
7915         }
7916         if (y < 0) {
7917                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7918                 y = 0;
7919         }
7920         position->enable = true;
7921         position->translate_by_source = true;
7922         position->x = x;
7923         position->y = y;
7924         position->x_hotspot = xorigin;
7925         position->y_hotspot = yorigin;
7926
7927         return 0;
7928 }
7929
7930 static void handle_cursor_update(struct drm_plane *plane,
7931                                  struct drm_plane_state *old_plane_state)
7932 {
7933         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7934         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7935         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7936         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7937         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7938         uint64_t address = afb ? afb->address : 0;
7939         struct dc_cursor_position position = {0};
7940         struct dc_cursor_attributes attributes;
7941         int ret;
7942
7943         if (!plane->state->fb && !old_plane_state->fb)
7944                 return;
7945
7946         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7947                       __func__,
7948                       amdgpu_crtc->crtc_id,
7949                       plane->state->crtc_w,
7950                       plane->state->crtc_h);
7951
7952         ret = get_cursor_position(plane, crtc, &position);
7953         if (ret)
7954                 return;
7955
7956         if (!position.enable) {
7957                 /* turn off cursor */
7958                 if (crtc_state && crtc_state->stream) {
7959                         mutex_lock(&adev->dm.dc_lock);
7960                         dc_stream_set_cursor_position(crtc_state->stream,
7961                                                       &position);
7962                         mutex_unlock(&adev->dm.dc_lock);
7963                 }
7964                 return;
7965         }
7966
7967         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7968         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7969
7970         memset(&attributes, 0, sizeof(attributes));
7971         attributes.address.high_part = upper_32_bits(address);
7972         attributes.address.low_part  = lower_32_bits(address);
7973         attributes.width             = plane->state->crtc_w;
7974         attributes.height            = plane->state->crtc_h;
7975         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7976         attributes.rotation_angle    = 0;
7977         attributes.attribute_flags.value = 0;
7978
7979         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7980
7981         if (crtc_state->stream) {
7982                 mutex_lock(&adev->dm.dc_lock);
7983                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7984                                                          &attributes))
7985                         DRM_ERROR("DC failed to set cursor attributes\n");
7986
7987                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7988                                                    &position))
7989                         DRM_ERROR("DC failed to set cursor position\n");
7990                 mutex_unlock(&adev->dm.dc_lock);
7991         }
7992 }
7993
7994 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7995 {
7996
7997         assert_spin_locked(&acrtc->base.dev->event_lock);
7998         WARN_ON(acrtc->event);
7999
8000         acrtc->event = acrtc->base.state->event;
8001
8002         /* Set the flip status */
8003         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8004
8005         /* Mark this event as consumed */
8006         acrtc->base.state->event = NULL;
8007
8008         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8009                      acrtc->crtc_id);
8010 }
8011
8012 static void update_freesync_state_on_stream(
8013         struct amdgpu_display_manager *dm,
8014         struct dm_crtc_state *new_crtc_state,
8015         struct dc_stream_state *new_stream,
8016         struct dc_plane_state *surface,
8017         u32 flip_timestamp_in_us)
8018 {
8019         struct mod_vrr_params vrr_params;
8020         struct dc_info_packet vrr_infopacket = {0};
8021         struct amdgpu_device *adev = dm->adev;
8022         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8023         unsigned long flags;
8024         bool pack_sdp_v1_3 = false;
8025
8026         if (!new_stream)
8027                 return;
8028
8029         /*
8030          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8031          * For now it's sufficient to just guard against these conditions.
8032          */
8033
8034         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8035                 return;
8036
8037         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8038         vrr_params = acrtc->dm_irq_params.vrr_params;
8039
8040         if (surface) {
8041                 mod_freesync_handle_preflip(
8042                         dm->freesync_module,
8043                         surface,
8044                         new_stream,
8045                         flip_timestamp_in_us,
8046                         &vrr_params);
8047
8048                 if (adev->family < AMDGPU_FAMILY_AI &&
8049                     amdgpu_dm_vrr_active(new_crtc_state)) {
8050                         mod_freesync_handle_v_update(dm->freesync_module,
8051                                                      new_stream, &vrr_params);
8052
8053                         /* Need to call this before the frame ends. */
8054                         dc_stream_adjust_vmin_vmax(dm->dc,
8055                                                    new_crtc_state->stream,
8056                                                    &vrr_params.adjust);
8057                 }
8058         }
8059
8060         mod_freesync_build_vrr_infopacket(
8061                 dm->freesync_module,
8062                 new_stream,
8063                 &vrr_params,
8064                 PACKET_TYPE_VRR,
8065                 TRANSFER_FUNC_UNKNOWN,
8066                 &vrr_infopacket,
8067                 pack_sdp_v1_3);
8068
8069         new_crtc_state->freesync_timing_changed |=
8070                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8071                         &vrr_params.adjust,
8072                         sizeof(vrr_params.adjust)) != 0);
8073
8074         new_crtc_state->freesync_vrr_info_changed |=
8075                 (memcmp(&new_crtc_state->vrr_infopacket,
8076                         &vrr_infopacket,
8077                         sizeof(vrr_infopacket)) != 0);
8078
8079         acrtc->dm_irq_params.vrr_params = vrr_params;
8080         new_crtc_state->vrr_infopacket = vrr_infopacket;
8081
8082         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8083         new_stream->vrr_infopacket = vrr_infopacket;
8084
8085         if (new_crtc_state->freesync_vrr_info_changed)
8086                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8087                               new_crtc_state->base.crtc->base.id,
8088                               (int)new_crtc_state->base.vrr_enabled,
8089                               (int)vrr_params.state);
8090
8091         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8092 }
8093
8094 static void update_stream_irq_parameters(
8095         struct amdgpu_display_manager *dm,
8096         struct dm_crtc_state *new_crtc_state)
8097 {
8098         struct dc_stream_state *new_stream = new_crtc_state->stream;
8099         struct mod_vrr_params vrr_params;
8100         struct mod_freesync_config config = new_crtc_state->freesync_config;
8101         struct amdgpu_device *adev = dm->adev;
8102         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8103         unsigned long flags;
8104
8105         if (!new_stream)
8106                 return;
8107
8108         /*
8109          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8110          * For now it's sufficient to just guard against these conditions.
8111          */
8112         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8113                 return;
8114
8115         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8116         vrr_params = acrtc->dm_irq_params.vrr_params;
8117
8118         if (new_crtc_state->vrr_supported &&
8119             config.min_refresh_in_uhz &&
8120             config.max_refresh_in_uhz) {
8121                 /*
8122                  * if freesync compatible mode was set, config.state will be set
8123                  * in atomic check
8124                  */
8125                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8126                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8127                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8128                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8129                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8130                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8131                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8132                 } else {
8133                         config.state = new_crtc_state->base.vrr_enabled ?
8134                                                      VRR_STATE_ACTIVE_VARIABLE :
8135                                                      VRR_STATE_INACTIVE;
8136                 }
8137         } else {
8138                 config.state = VRR_STATE_UNSUPPORTED;
8139         }
8140
8141         mod_freesync_build_vrr_params(dm->freesync_module,
8142                                       new_stream,
8143                                       &config, &vrr_params);
8144
8145         new_crtc_state->freesync_timing_changed |=
8146                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8147                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8148
8149         new_crtc_state->freesync_config = config;
8150         /* Copy state for access from DM IRQ handler */
8151         acrtc->dm_irq_params.freesync_config = config;
8152         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8153         acrtc->dm_irq_params.vrr_params = vrr_params;
8154         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8155 }
8156
8157 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8158                                             struct dm_crtc_state *new_state)
8159 {
8160         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8161         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8162
8163         if (!old_vrr_active && new_vrr_active) {
8164                 /* Transition VRR inactive -> active:
8165                  * While VRR is active, we must not disable vblank irq, as a
8166                  * reenable after disable would compute bogus vblank/pflip
8167                  * timestamps if it likely happened inside display front-porch.
8168                  *
8169                  * We also need vupdate irq for the actual core vblank handling
8170                  * at end of vblank.
8171                  */
8172                 dm_set_vupdate_irq(new_state->base.crtc, true);
8173                 drm_crtc_vblank_get(new_state->base.crtc);
8174                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8175                                  __func__, new_state->base.crtc->base.id);
8176         } else if (old_vrr_active && !new_vrr_active) {
8177                 /* Transition VRR active -> inactive:
8178                  * Allow vblank irq disable again for fixed refresh rate.
8179                  */
8180                 dm_set_vupdate_irq(new_state->base.crtc, false);
8181                 drm_crtc_vblank_put(new_state->base.crtc);
8182                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8183                                  __func__, new_state->base.crtc->base.id);
8184         }
8185 }
8186
8187 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8188 {
8189         struct drm_plane *plane;
8190         struct drm_plane_state *old_plane_state;
8191         int i;
8192
8193         /*
8194          * TODO: Make this per-stream so we don't issue redundant updates for
8195          * commits with multiple streams.
8196          */
8197         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8198                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8199                         handle_cursor_update(plane, old_plane_state);
8200 }
8201
8202 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8203                                     struct dc_state *dc_state,
8204                                     struct drm_device *dev,
8205                                     struct amdgpu_display_manager *dm,
8206                                     struct drm_crtc *pcrtc,
8207                                     bool wait_for_vblank)
8208 {
8209         uint32_t i;
8210         uint64_t timestamp_ns;
8211         struct drm_plane *plane;
8212         struct drm_plane_state *old_plane_state, *new_plane_state;
8213         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8214         struct drm_crtc_state *new_pcrtc_state =
8215                         drm_atomic_get_new_crtc_state(state, pcrtc);
8216         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8217         struct dm_crtc_state *dm_old_crtc_state =
8218                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8219         int planes_count = 0, vpos, hpos;
8220         long r;
8221         unsigned long flags;
8222         struct amdgpu_bo *abo;
8223         uint32_t target_vblank, last_flip_vblank;
8224         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8225         bool pflip_present = false;
8226         struct {
8227                 struct dc_surface_update surface_updates[MAX_SURFACES];
8228                 struct dc_plane_info plane_infos[MAX_SURFACES];
8229                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8230                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8231                 struct dc_stream_update stream_update;
8232         } *bundle;
8233
8234         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8235
8236         if (!bundle) {
8237                 dm_error("Failed to allocate update bundle\n");
8238                 goto cleanup;
8239         }
8240
8241         /*
8242          * Disable the cursor first if we're disabling all the planes.
8243          * It'll remain on the screen after the planes are re-enabled
8244          * if we don't.
8245          */
8246         if (acrtc_state->active_planes == 0)
8247                 amdgpu_dm_commit_cursors(state);
8248
8249         /* update planes when needed */
8250         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8251                 struct drm_crtc *crtc = new_plane_state->crtc;
8252                 struct drm_crtc_state *new_crtc_state;
8253                 struct drm_framebuffer *fb = new_plane_state->fb;
8254                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8255                 bool plane_needs_flip;
8256                 struct dc_plane_state *dc_plane;
8257                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8258
8259                 /* Cursor plane is handled after stream updates */
8260                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8261                         continue;
8262
8263                 if (!fb || !crtc || pcrtc != crtc)
8264                         continue;
8265
8266                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8267                 if (!new_crtc_state->active)
8268                         continue;
8269
8270                 dc_plane = dm_new_plane_state->dc_state;
8271
8272                 bundle->surface_updates[planes_count].surface = dc_plane;
8273                 if (new_pcrtc_state->color_mgmt_changed) {
8274                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8275                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8276                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8277                 }
8278
8279                 fill_dc_scaling_info(new_plane_state,
8280                                      &bundle->scaling_infos[planes_count]);
8281
8282                 bundle->surface_updates[planes_count].scaling_info =
8283                         &bundle->scaling_infos[planes_count];
8284
8285                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8286
8287                 pflip_present = pflip_present || plane_needs_flip;
8288
8289                 if (!plane_needs_flip) {
8290                         planes_count += 1;
8291                         continue;
8292                 }
8293
8294                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8295
8296                 /*
8297                  * Wait for all fences on this FB. Do limited wait to avoid
8298                  * deadlock during GPU reset when this fence will not signal
8299                  * but we hold reservation lock for the BO.
8300                  */
8301                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8302                                                         false,
8303                                                         msecs_to_jiffies(5000));
8304                 if (unlikely(r <= 0))
8305                         DRM_ERROR("Waiting for fences timed out!");
8306
8307                 fill_dc_plane_info_and_addr(
8308                         dm->adev, new_plane_state,
8309                         afb->tiling_flags,
8310                         &bundle->plane_infos[planes_count],
8311                         &bundle->flip_addrs[planes_count].address,
8312                         afb->tmz_surface, false);
8313
8314                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8315                                  new_plane_state->plane->index,
8316                                  bundle->plane_infos[planes_count].dcc.enable);
8317
8318                 bundle->surface_updates[planes_count].plane_info =
8319                         &bundle->plane_infos[planes_count];
8320
8321                 /*
8322                  * Only allow immediate flips for fast updates that don't
8323                  * change FB pitch, DCC state, rotation or mirroing.
8324                  */
8325                 bundle->flip_addrs[planes_count].flip_immediate =
8326                         crtc->state->async_flip &&
8327                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8328
8329                 timestamp_ns = ktime_get_ns();
8330                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8331                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8332                 bundle->surface_updates[planes_count].surface = dc_plane;
8333
8334                 if (!bundle->surface_updates[planes_count].surface) {
8335                         DRM_ERROR("No surface for CRTC: id=%d\n",
8336                                         acrtc_attach->crtc_id);
8337                         continue;
8338                 }
8339
8340                 if (plane == pcrtc->primary)
8341                         update_freesync_state_on_stream(
8342                                 dm,
8343                                 acrtc_state,
8344                                 acrtc_state->stream,
8345                                 dc_plane,
8346                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8347
8348                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8349                                  __func__,
8350                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8351                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8352
8353                 planes_count += 1;
8354
8355         }
8356
8357         if (pflip_present) {
8358                 if (!vrr_active) {
8359                         /* Use old throttling in non-vrr fixed refresh rate mode
8360                          * to keep flip scheduling based on target vblank counts
8361                          * working in a backwards compatible way, e.g., for
8362                          * clients using the GLX_OML_sync_control extension or
8363                          * DRI3/Present extension with defined target_msc.
8364                          */
8365                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8366                 }
8367                 else {
8368                         /* For variable refresh rate mode only:
8369                          * Get vblank of last completed flip to avoid > 1 vrr
8370                          * flips per video frame by use of throttling, but allow
8371                          * flip programming anywhere in the possibly large
8372                          * variable vrr vblank interval for fine-grained flip
8373                          * timing control and more opportunity to avoid stutter
8374                          * on late submission of flips.
8375                          */
8376                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8377                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8378                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8379                 }
8380
8381                 target_vblank = last_flip_vblank + wait_for_vblank;
8382
8383                 /*
8384                  * Wait until we're out of the vertical blank period before the one
8385                  * targeted by the flip
8386                  */
8387                 while ((acrtc_attach->enabled &&
8388                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8389                                                             0, &vpos, &hpos, NULL,
8390                                                             NULL, &pcrtc->hwmode)
8391                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8392                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8393                         (int)(target_vblank -
8394                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8395                         usleep_range(1000, 1100);
8396                 }
8397
8398                 /**
8399                  * Prepare the flip event for the pageflip interrupt to handle.
8400                  *
8401                  * This only works in the case where we've already turned on the
8402                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8403                  * from 0 -> n planes we have to skip a hardware generated event
8404                  * and rely on sending it from software.
8405                  */
8406                 if (acrtc_attach->base.state->event &&
8407                     acrtc_state->active_planes > 0) {
8408                         drm_crtc_vblank_get(pcrtc);
8409
8410                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8411
8412                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8413                         prepare_flip_isr(acrtc_attach);
8414
8415                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8416                 }
8417
8418                 if (acrtc_state->stream) {
8419                         if (acrtc_state->freesync_vrr_info_changed)
8420                                 bundle->stream_update.vrr_infopacket =
8421                                         &acrtc_state->stream->vrr_infopacket;
8422                 }
8423         }
8424
8425         /* Update the planes if changed or disable if we don't have any. */
8426         if ((planes_count || acrtc_state->active_planes == 0) &&
8427                 acrtc_state->stream) {
8428                 bundle->stream_update.stream = acrtc_state->stream;
8429                 if (new_pcrtc_state->mode_changed) {
8430                         bundle->stream_update.src = acrtc_state->stream->src;
8431                         bundle->stream_update.dst = acrtc_state->stream->dst;
8432                 }
8433
8434                 if (new_pcrtc_state->color_mgmt_changed) {
8435                         /*
8436                          * TODO: This isn't fully correct since we've actually
8437                          * already modified the stream in place.
8438                          */
8439                         bundle->stream_update.gamut_remap =
8440                                 &acrtc_state->stream->gamut_remap_matrix;
8441                         bundle->stream_update.output_csc_transform =
8442                                 &acrtc_state->stream->csc_color_matrix;
8443                         bundle->stream_update.out_transfer_func =
8444                                 acrtc_state->stream->out_transfer_func;
8445                 }
8446
8447                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8448                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8449                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8450
8451                 /*
8452                  * If FreeSync state on the stream has changed then we need to
8453                  * re-adjust the min/max bounds now that DC doesn't handle this
8454                  * as part of commit.
8455                  */
8456                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8457                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8458                         dc_stream_adjust_vmin_vmax(
8459                                 dm->dc, acrtc_state->stream,
8460                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8461                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8462                 }
8463                 mutex_lock(&dm->dc_lock);
8464                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8465                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8466                         amdgpu_dm_psr_disable(acrtc_state->stream);
8467
8468                 dc_commit_updates_for_stream(dm->dc,
8469                                                      bundle->surface_updates,
8470                                                      planes_count,
8471                                                      acrtc_state->stream,
8472                                                      &bundle->stream_update,
8473                                                      dc_state);
8474
8475                 /**
8476                  * Enable or disable the interrupts on the backend.
8477                  *
8478                  * Most pipes are put into power gating when unused.
8479                  *
8480                  * When power gating is enabled on a pipe we lose the
8481                  * interrupt enablement state when power gating is disabled.
8482                  *
8483                  * So we need to update the IRQ control state in hardware
8484                  * whenever the pipe turns on (since it could be previously
8485                  * power gated) or off (since some pipes can't be power gated
8486                  * on some ASICs).
8487                  */
8488                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8489                         dm_update_pflip_irq_state(drm_to_adev(dev),
8490                                                   acrtc_attach);
8491
8492                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8493                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8494                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8495                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8496                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8497                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8498                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8499                         amdgpu_dm_psr_enable(acrtc_state->stream);
8500                 }
8501
8502                 mutex_unlock(&dm->dc_lock);
8503         }
8504
8505         /*
8506          * Update cursor state *after* programming all the planes.
8507          * This avoids redundant programming in the case where we're going
8508          * to be disabling a single plane - those pipes are being disabled.
8509          */
8510         if (acrtc_state->active_planes)
8511                 amdgpu_dm_commit_cursors(state);
8512
8513 cleanup:
8514         kfree(bundle);
8515 }
8516
8517 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8518                                    struct drm_atomic_state *state)
8519 {
8520         struct amdgpu_device *adev = drm_to_adev(dev);
8521         struct amdgpu_dm_connector *aconnector;
8522         struct drm_connector *connector;
8523         struct drm_connector_state *old_con_state, *new_con_state;
8524         struct drm_crtc_state *new_crtc_state;
8525         struct dm_crtc_state *new_dm_crtc_state;
8526         const struct dc_stream_status *status;
8527         int i, inst;
8528
8529         /* Notify device removals. */
8530         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8531                 if (old_con_state->crtc != new_con_state->crtc) {
8532                         /* CRTC changes require notification. */
8533                         goto notify;
8534                 }
8535
8536                 if (!new_con_state->crtc)
8537                         continue;
8538
8539                 new_crtc_state = drm_atomic_get_new_crtc_state(
8540                         state, new_con_state->crtc);
8541
8542                 if (!new_crtc_state)
8543                         continue;
8544
8545                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8546                         continue;
8547
8548         notify:
8549                 aconnector = to_amdgpu_dm_connector(connector);
8550
8551                 mutex_lock(&adev->dm.audio_lock);
8552                 inst = aconnector->audio_inst;
8553                 aconnector->audio_inst = -1;
8554                 mutex_unlock(&adev->dm.audio_lock);
8555
8556                 amdgpu_dm_audio_eld_notify(adev, inst);
8557         }
8558
8559         /* Notify audio device additions. */
8560         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8561                 if (!new_con_state->crtc)
8562                         continue;
8563
8564                 new_crtc_state = drm_atomic_get_new_crtc_state(
8565                         state, new_con_state->crtc);
8566
8567                 if (!new_crtc_state)
8568                         continue;
8569
8570                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8571                         continue;
8572
8573                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8574                 if (!new_dm_crtc_state->stream)
8575                         continue;
8576
8577                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8578                 if (!status)
8579                         continue;
8580
8581                 aconnector = to_amdgpu_dm_connector(connector);
8582
8583                 mutex_lock(&adev->dm.audio_lock);
8584                 inst = status->audio_inst;
8585                 aconnector->audio_inst = inst;
8586                 mutex_unlock(&adev->dm.audio_lock);
8587
8588                 amdgpu_dm_audio_eld_notify(adev, inst);
8589         }
8590 }
8591
8592 /*
8593  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8594  * @crtc_state: the DRM CRTC state
8595  * @stream_state: the DC stream state.
8596  *
8597  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8598  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8599  */
8600 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8601                                                 struct dc_stream_state *stream_state)
8602 {
8603         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8604 }
8605
8606 /**
8607  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8608  * @state: The atomic state to commit
8609  *
8610  * This will tell DC to commit the constructed DC state from atomic_check,
8611  * programming the hardware. Any failures here implies a hardware failure, since
8612  * atomic check should have filtered anything non-kosher.
8613  */
8614 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8615 {
8616         struct drm_device *dev = state->dev;
8617         struct amdgpu_device *adev = drm_to_adev(dev);
8618         struct amdgpu_display_manager *dm = &adev->dm;
8619         struct dm_atomic_state *dm_state;
8620         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8621         uint32_t i, j;
8622         struct drm_crtc *crtc;
8623         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8624         unsigned long flags;
8625         bool wait_for_vblank = true;
8626         struct drm_connector *connector;
8627         struct drm_connector_state *old_con_state, *new_con_state;
8628         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8629         int crtc_disable_count = 0;
8630         bool mode_set_reset_required = false;
8631
8632         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8633
8634         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8635
8636         dm_state = dm_atomic_get_new_state(state);
8637         if (dm_state && dm_state->context) {
8638                 dc_state = dm_state->context;
8639         } else {
8640                 /* No state changes, retain current state. */
8641                 dc_state_temp = dc_create_state(dm->dc);
8642                 ASSERT(dc_state_temp);
8643                 dc_state = dc_state_temp;
8644                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8645         }
8646
8647         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8648                                        new_crtc_state, i) {
8649                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8650
8651                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8652
8653                 if (old_crtc_state->active &&
8654                     (!new_crtc_state->active ||
8655                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8656                         manage_dm_interrupts(adev, acrtc, false);
8657                         dc_stream_release(dm_old_crtc_state->stream);
8658                 }
8659         }
8660
8661         drm_atomic_helper_calc_timestamping_constants(state);
8662
8663         /* update changed items */
8664         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8665                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8666
8667                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8668                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8669
8670                 DRM_DEBUG_ATOMIC(
8671                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8672                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8673                         "connectors_changed:%d\n",
8674                         acrtc->crtc_id,
8675                         new_crtc_state->enable,
8676                         new_crtc_state->active,
8677                         new_crtc_state->planes_changed,
8678                         new_crtc_state->mode_changed,
8679                         new_crtc_state->active_changed,
8680                         new_crtc_state->connectors_changed);
8681
8682                 /* Disable cursor if disabling crtc */
8683                 if (old_crtc_state->active && !new_crtc_state->active) {
8684                         struct dc_cursor_position position;
8685
8686                         memset(&position, 0, sizeof(position));
8687                         mutex_lock(&dm->dc_lock);
8688                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8689                         mutex_unlock(&dm->dc_lock);
8690                 }
8691
8692                 /* Copy all transient state flags into dc state */
8693                 if (dm_new_crtc_state->stream) {
8694                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8695                                                             dm_new_crtc_state->stream);
8696                 }
8697
8698                 /* handles headless hotplug case, updating new_state and
8699                  * aconnector as needed
8700                  */
8701
8702                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8703
8704                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8705
8706                         if (!dm_new_crtc_state->stream) {
8707                                 /*
8708                                  * this could happen because of issues with
8709                                  * userspace notifications delivery.
8710                                  * In this case userspace tries to set mode on
8711                                  * display which is disconnected in fact.
8712                                  * dc_sink is NULL in this case on aconnector.
8713                                  * We expect reset mode will come soon.
8714                                  *
8715                                  * This can also happen when unplug is done
8716                                  * during resume sequence ended
8717                                  *
8718                                  * In this case, we want to pretend we still
8719                                  * have a sink to keep the pipe running so that
8720                                  * hw state is consistent with the sw state
8721                                  */
8722                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8723                                                 __func__, acrtc->base.base.id);
8724                                 continue;
8725                         }
8726
8727                         if (dm_old_crtc_state->stream)
8728                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8729
8730                         pm_runtime_get_noresume(dev->dev);
8731
8732                         acrtc->enabled = true;
8733                         acrtc->hw_mode = new_crtc_state->mode;
8734                         crtc->hwmode = new_crtc_state->mode;
8735                         mode_set_reset_required = true;
8736                 } else if (modereset_required(new_crtc_state)) {
8737                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8738                         /* i.e. reset mode */
8739                         if (dm_old_crtc_state->stream)
8740                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8741
8742                         mode_set_reset_required = true;
8743                 }
8744         } /* for_each_crtc_in_state() */
8745
8746         if (dc_state) {
8747                 /* if there mode set or reset, disable eDP PSR */
8748                 if (mode_set_reset_required)
8749                         amdgpu_dm_psr_disable_all(dm);
8750
8751                 dm_enable_per_frame_crtc_master_sync(dc_state);
8752                 mutex_lock(&dm->dc_lock);
8753                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8754 #if defined(CONFIG_DRM_AMD_DC_DCN)
8755                /* Allow idle optimization when vblank count is 0 for display off */
8756                if (dm->active_vblank_irq_count == 0)
8757                    dc_allow_idle_optimizations(dm->dc,true);
8758 #endif
8759                 mutex_unlock(&dm->dc_lock);
8760         }
8761
8762         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8763                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8764
8765                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8766
8767                 if (dm_new_crtc_state->stream != NULL) {
8768                         const struct dc_stream_status *status =
8769                                         dc_stream_get_status(dm_new_crtc_state->stream);
8770
8771                         if (!status)
8772                                 status = dc_stream_get_status_from_state(dc_state,
8773                                                                          dm_new_crtc_state->stream);
8774                         if (!status)
8775                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8776                         else
8777                                 acrtc->otg_inst = status->primary_otg_inst;
8778                 }
8779         }
8780 #ifdef CONFIG_DRM_AMD_DC_HDCP
8781         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8782                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8783                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8784                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8785
8786                 new_crtc_state = NULL;
8787
8788                 if (acrtc)
8789                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8790
8791                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8792
8793                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8794                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8795                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8796                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8797                         dm_new_con_state->update_hdcp = true;
8798                         continue;
8799                 }
8800
8801                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8802                         hdcp_update_display(
8803                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8804                                 new_con_state->hdcp_content_type,
8805                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8806         }
8807 #endif
8808
8809         /* Handle connector state changes */
8810         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8811                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8812                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8813                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8814                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8815                 struct dc_stream_update stream_update;
8816                 struct dc_info_packet hdr_packet;
8817                 struct dc_stream_status *status = NULL;
8818                 bool abm_changed, hdr_changed, scaling_changed;
8819
8820                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8821                 memset(&stream_update, 0, sizeof(stream_update));
8822
8823                 if (acrtc) {
8824                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8825                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8826                 }
8827
8828                 /* Skip any modesets/resets */
8829                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8830                         continue;
8831
8832                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8833                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8834
8835                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8836                                                              dm_old_con_state);
8837
8838                 abm_changed = dm_new_crtc_state->abm_level !=
8839                               dm_old_crtc_state->abm_level;
8840
8841                 hdr_changed =
8842                         is_hdr_metadata_different(old_con_state, new_con_state);
8843
8844                 if (!scaling_changed && !abm_changed && !hdr_changed)
8845                         continue;
8846
8847                 stream_update.stream = dm_new_crtc_state->stream;
8848                 if (scaling_changed) {
8849                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8850                                         dm_new_con_state, dm_new_crtc_state->stream);
8851
8852                         stream_update.src = dm_new_crtc_state->stream->src;
8853                         stream_update.dst = dm_new_crtc_state->stream->dst;
8854                 }
8855
8856                 if (abm_changed) {
8857                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8858
8859                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8860                 }
8861
8862                 if (hdr_changed) {
8863                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8864                         stream_update.hdr_static_metadata = &hdr_packet;
8865                 }
8866
8867                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8868                 WARN_ON(!status);
8869                 WARN_ON(!status->plane_count);
8870
8871                 /*
8872                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8873                  * Here we create an empty update on each plane.
8874                  * To fix this, DC should permit updating only stream properties.
8875                  */
8876                 for (j = 0; j < status->plane_count; j++)
8877                         dummy_updates[j].surface = status->plane_states[0];
8878
8879
8880                 mutex_lock(&dm->dc_lock);
8881                 dc_commit_updates_for_stream(dm->dc,
8882                                                      dummy_updates,
8883                                                      status->plane_count,
8884                                                      dm_new_crtc_state->stream,
8885                                                      &stream_update,
8886                                                      dc_state);
8887                 mutex_unlock(&dm->dc_lock);
8888         }
8889
8890         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8891         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8892                                       new_crtc_state, i) {
8893                 if (old_crtc_state->active && !new_crtc_state->active)
8894                         crtc_disable_count++;
8895
8896                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8897                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8898
8899                 /* For freesync config update on crtc state and params for irq */
8900                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8901
8902                 /* Handle vrr on->off / off->on transitions */
8903                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8904                                                 dm_new_crtc_state);
8905         }
8906
8907         /**
8908          * Enable interrupts for CRTCs that are newly enabled or went through
8909          * a modeset. It was intentionally deferred until after the front end
8910          * state was modified to wait until the OTG was on and so the IRQ
8911          * handlers didn't access stale or invalid state.
8912          */
8913         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8914                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8915 #ifdef CONFIG_DEBUG_FS
8916                 bool configure_crc = false;
8917                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8918 #endif
8919                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8920
8921                 if (new_crtc_state->active &&
8922                     (!old_crtc_state->active ||
8923                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8924                         dc_stream_retain(dm_new_crtc_state->stream);
8925                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8926                         manage_dm_interrupts(adev, acrtc, true);
8927
8928 #ifdef CONFIG_DEBUG_FS
8929                         /**
8930                          * Frontend may have changed so reapply the CRC capture
8931                          * settings for the stream.
8932                          */
8933                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8934                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8935                         cur_crc_src = acrtc->dm_irq_params.crc_src;
8936                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8937
8938                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8939                                 configure_crc = true;
8940 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8941                                 if (amdgpu_dm_crc_window_is_activated(crtc))
8942                                         configure_crc = false;
8943 #endif
8944                         }
8945
8946                         if (configure_crc)
8947                                 amdgpu_dm_crtc_configure_crc_source(
8948                                         crtc, dm_new_crtc_state, cur_crc_src);
8949 #endif
8950                 }
8951         }
8952
8953         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8954                 if (new_crtc_state->async_flip)
8955                         wait_for_vblank = false;
8956
8957         /* update planes when needed per crtc*/
8958         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8959                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8960
8961                 if (dm_new_crtc_state->stream)
8962                         amdgpu_dm_commit_planes(state, dc_state, dev,
8963                                                 dm, crtc, wait_for_vblank);
8964         }
8965
8966         /* Update audio instances for each connector. */
8967         amdgpu_dm_commit_audio(dev, state);
8968
8969         /*
8970          * send vblank event on all events not handled in flip and
8971          * mark consumed event for drm_atomic_helper_commit_hw_done
8972          */
8973         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8974         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8975
8976                 if (new_crtc_state->event)
8977                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8978
8979                 new_crtc_state->event = NULL;
8980         }
8981         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8982
8983         /* Signal HW programming completion */
8984         drm_atomic_helper_commit_hw_done(state);
8985
8986         if (wait_for_vblank)
8987                 drm_atomic_helper_wait_for_flip_done(dev, state);
8988
8989         drm_atomic_helper_cleanup_planes(dev, state);
8990
8991         /* return the stolen vga memory back to VRAM */
8992         if (!adev->mman.keep_stolen_vga_memory)
8993                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8994         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8995
8996         /*
8997          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8998          * so we can put the GPU into runtime suspend if we're not driving any
8999          * displays anymore
9000          */
9001         for (i = 0; i < crtc_disable_count; i++)
9002                 pm_runtime_put_autosuspend(dev->dev);
9003         pm_runtime_mark_last_busy(dev->dev);
9004
9005         if (dc_state_temp)
9006                 dc_release_state(dc_state_temp);
9007 }
9008
9009
9010 static int dm_force_atomic_commit(struct drm_connector *connector)
9011 {
9012         int ret = 0;
9013         struct drm_device *ddev = connector->dev;
9014         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9015         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9016         struct drm_plane *plane = disconnected_acrtc->base.primary;
9017         struct drm_connector_state *conn_state;
9018         struct drm_crtc_state *crtc_state;
9019         struct drm_plane_state *plane_state;
9020
9021         if (!state)
9022                 return -ENOMEM;
9023
9024         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9025
9026         /* Construct an atomic state to restore previous display setting */
9027
9028         /*
9029          * Attach connectors to drm_atomic_state
9030          */
9031         conn_state = drm_atomic_get_connector_state(state, connector);
9032
9033         ret = PTR_ERR_OR_ZERO(conn_state);
9034         if (ret)
9035                 goto out;
9036
9037         /* Attach crtc to drm_atomic_state*/
9038         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9039
9040         ret = PTR_ERR_OR_ZERO(crtc_state);
9041         if (ret)
9042                 goto out;
9043
9044         /* force a restore */
9045         crtc_state->mode_changed = true;
9046
9047         /* Attach plane to drm_atomic_state */
9048         plane_state = drm_atomic_get_plane_state(state, plane);
9049
9050         ret = PTR_ERR_OR_ZERO(plane_state);
9051         if (ret)
9052                 goto out;
9053
9054         /* Call commit internally with the state we just constructed */
9055         ret = drm_atomic_commit(state);
9056
9057 out:
9058         drm_atomic_state_put(state);
9059         if (ret)
9060                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9061
9062         return ret;
9063 }
9064
9065 /*
9066  * This function handles all cases when set mode does not come upon hotplug.
9067  * This includes when a display is unplugged then plugged back into the
9068  * same port and when running without usermode desktop manager supprot
9069  */
9070 void dm_restore_drm_connector_state(struct drm_device *dev,
9071                                     struct drm_connector *connector)
9072 {
9073         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9074         struct amdgpu_crtc *disconnected_acrtc;
9075         struct dm_crtc_state *acrtc_state;
9076
9077         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9078                 return;
9079
9080         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9081         if (!disconnected_acrtc)
9082                 return;
9083
9084         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9085         if (!acrtc_state->stream)
9086                 return;
9087
9088         /*
9089          * If the previous sink is not released and different from the current,
9090          * we deduce we are in a state where we can not rely on usermode call
9091          * to turn on the display, so we do it here
9092          */
9093         if (acrtc_state->stream->sink != aconnector->dc_sink)
9094                 dm_force_atomic_commit(&aconnector->base);
9095 }
9096
9097 /*
9098  * Grabs all modesetting locks to serialize against any blocking commits,
9099  * Waits for completion of all non blocking commits.
9100  */
9101 static int do_aquire_global_lock(struct drm_device *dev,
9102                                  struct drm_atomic_state *state)
9103 {
9104         struct drm_crtc *crtc;
9105         struct drm_crtc_commit *commit;
9106         long ret;
9107
9108         /*
9109          * Adding all modeset locks to aquire_ctx will
9110          * ensure that when the framework release it the
9111          * extra locks we are locking here will get released to
9112          */
9113         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9114         if (ret)
9115                 return ret;
9116
9117         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9118                 spin_lock(&crtc->commit_lock);
9119                 commit = list_first_entry_or_null(&crtc->commit_list,
9120                                 struct drm_crtc_commit, commit_entry);
9121                 if (commit)
9122                         drm_crtc_commit_get(commit);
9123                 spin_unlock(&crtc->commit_lock);
9124
9125                 if (!commit)
9126                         continue;
9127
9128                 /*
9129                  * Make sure all pending HW programming completed and
9130                  * page flips done
9131                  */
9132                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9133
9134                 if (ret > 0)
9135                         ret = wait_for_completion_interruptible_timeout(
9136                                         &commit->flip_done, 10*HZ);
9137
9138                 if (ret == 0)
9139                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9140                                   "timed out\n", crtc->base.id, crtc->name);
9141
9142                 drm_crtc_commit_put(commit);
9143         }
9144
9145         return ret < 0 ? ret : 0;
9146 }
9147
9148 static void get_freesync_config_for_crtc(
9149         struct dm_crtc_state *new_crtc_state,
9150         struct dm_connector_state *new_con_state)
9151 {
9152         struct mod_freesync_config config = {0};
9153         struct amdgpu_dm_connector *aconnector =
9154                         to_amdgpu_dm_connector(new_con_state->base.connector);
9155         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9156         int vrefresh = drm_mode_vrefresh(mode);
9157         bool fs_vid_mode = false;
9158
9159         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9160                                         vrefresh >= aconnector->min_vfreq &&
9161                                         vrefresh <= aconnector->max_vfreq;
9162
9163         if (new_crtc_state->vrr_supported) {
9164                 new_crtc_state->stream->ignore_msa_timing_param = true;
9165                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9166
9167                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9168                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9169                 config.vsif_supported = true;
9170                 config.btr = true;
9171
9172                 if (fs_vid_mode) {
9173                         config.state = VRR_STATE_ACTIVE_FIXED;
9174                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9175                         goto out;
9176                 } else if (new_crtc_state->base.vrr_enabled) {
9177                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9178                 } else {
9179                         config.state = VRR_STATE_INACTIVE;
9180                 }
9181         }
9182 out:
9183         new_crtc_state->freesync_config = config;
9184 }
9185
9186 static void reset_freesync_config_for_crtc(
9187         struct dm_crtc_state *new_crtc_state)
9188 {
9189         new_crtc_state->vrr_supported = false;
9190
9191         memset(&new_crtc_state->vrr_infopacket, 0,
9192                sizeof(new_crtc_state->vrr_infopacket));
9193 }
9194
9195 static bool
9196 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9197                                  struct drm_crtc_state *new_crtc_state)
9198 {
9199         struct drm_display_mode old_mode, new_mode;
9200
9201         if (!old_crtc_state || !new_crtc_state)
9202                 return false;
9203
9204         old_mode = old_crtc_state->mode;
9205         new_mode = new_crtc_state->mode;
9206
9207         if (old_mode.clock       == new_mode.clock &&
9208             old_mode.hdisplay    == new_mode.hdisplay &&
9209             old_mode.vdisplay    == new_mode.vdisplay &&
9210             old_mode.htotal      == new_mode.htotal &&
9211             old_mode.vtotal      != new_mode.vtotal &&
9212             old_mode.hsync_start == new_mode.hsync_start &&
9213             old_mode.vsync_start != new_mode.vsync_start &&
9214             old_mode.hsync_end   == new_mode.hsync_end &&
9215             old_mode.vsync_end   != new_mode.vsync_end &&
9216             old_mode.hskew       == new_mode.hskew &&
9217             old_mode.vscan       == new_mode.vscan &&
9218             (old_mode.vsync_end - old_mode.vsync_start) ==
9219             (new_mode.vsync_end - new_mode.vsync_start))
9220                 return true;
9221
9222         return false;
9223 }
9224
9225 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9226         uint64_t num, den, res;
9227         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9228
9229         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9230
9231         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9232         den = (unsigned long long)new_crtc_state->mode.htotal *
9233               (unsigned long long)new_crtc_state->mode.vtotal;
9234
9235         res = div_u64(num, den);
9236         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9237 }
9238
9239 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9240                                 struct drm_atomic_state *state,
9241                                 struct drm_crtc *crtc,
9242                                 struct drm_crtc_state *old_crtc_state,
9243                                 struct drm_crtc_state *new_crtc_state,
9244                                 bool enable,
9245                                 bool *lock_and_validation_needed)
9246 {
9247         struct dm_atomic_state *dm_state = NULL;
9248         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9249         struct dc_stream_state *new_stream;
9250         int ret = 0;
9251
9252         /*
9253          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9254          * update changed items
9255          */
9256         struct amdgpu_crtc *acrtc = NULL;
9257         struct amdgpu_dm_connector *aconnector = NULL;
9258         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9259         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9260
9261         new_stream = NULL;
9262
9263         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9264         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9265         acrtc = to_amdgpu_crtc(crtc);
9266         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9267
9268         /* TODO This hack should go away */
9269         if (aconnector && enable) {
9270                 /* Make sure fake sink is created in plug-in scenario */
9271                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9272                                                             &aconnector->base);
9273                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9274                                                             &aconnector->base);
9275
9276                 if (IS_ERR(drm_new_conn_state)) {
9277                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9278                         goto fail;
9279                 }
9280
9281                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9282                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9283
9284                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9285                         goto skip_modeset;
9286
9287                 new_stream = create_validate_stream_for_sink(aconnector,
9288                                                              &new_crtc_state->mode,
9289                                                              dm_new_conn_state,
9290                                                              dm_old_crtc_state->stream);
9291
9292                 /*
9293                  * we can have no stream on ACTION_SET if a display
9294                  * was disconnected during S3, in this case it is not an
9295                  * error, the OS will be updated after detection, and
9296                  * will do the right thing on next atomic commit
9297                  */
9298
9299                 if (!new_stream) {
9300                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9301                                         __func__, acrtc->base.base.id);
9302                         ret = -ENOMEM;
9303                         goto fail;
9304                 }
9305
9306                 /*
9307                  * TODO: Check VSDB bits to decide whether this should
9308                  * be enabled or not.
9309                  */
9310                 new_stream->triggered_crtc_reset.enabled =
9311                         dm->force_timing_sync;
9312
9313                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9314
9315                 ret = fill_hdr_info_packet(drm_new_conn_state,
9316                                            &new_stream->hdr_static_metadata);
9317                 if (ret)
9318                         goto fail;
9319
9320                 /*
9321                  * If we already removed the old stream from the context
9322                  * (and set the new stream to NULL) then we can't reuse
9323                  * the old stream even if the stream and scaling are unchanged.
9324                  * We'll hit the BUG_ON and black screen.
9325                  *
9326                  * TODO: Refactor this function to allow this check to work
9327                  * in all conditions.
9328                  */
9329                 if (amdgpu_freesync_vid_mode &&
9330                     dm_new_crtc_state->stream &&
9331                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9332                         goto skip_modeset;
9333
9334                 if (dm_new_crtc_state->stream &&
9335                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9336                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9337                         new_crtc_state->mode_changed = false;
9338                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9339                                          new_crtc_state->mode_changed);
9340                 }
9341         }
9342
9343         /* mode_changed flag may get updated above, need to check again */
9344         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9345                 goto skip_modeset;
9346
9347         DRM_DEBUG_ATOMIC(
9348                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9349                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9350                 "connectors_changed:%d\n",
9351                 acrtc->crtc_id,
9352                 new_crtc_state->enable,
9353                 new_crtc_state->active,
9354                 new_crtc_state->planes_changed,
9355                 new_crtc_state->mode_changed,
9356                 new_crtc_state->active_changed,
9357                 new_crtc_state->connectors_changed);
9358
9359         /* Remove stream for any changed/disabled CRTC */
9360         if (!enable) {
9361
9362                 if (!dm_old_crtc_state->stream)
9363                         goto skip_modeset;
9364
9365                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9366                     is_timing_unchanged_for_freesync(new_crtc_state,
9367                                                      old_crtc_state)) {
9368                         new_crtc_state->mode_changed = false;
9369                         DRM_DEBUG_DRIVER(
9370                                 "Mode change not required for front porch change, "
9371                                 "setting mode_changed to %d",
9372                                 new_crtc_state->mode_changed);
9373
9374                         set_freesync_fixed_config(dm_new_crtc_state);
9375
9376                         goto skip_modeset;
9377                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9378                            is_freesync_video_mode(&new_crtc_state->mode,
9379                                                   aconnector)) {
9380                         set_freesync_fixed_config(dm_new_crtc_state);
9381                 }
9382
9383                 ret = dm_atomic_get_state(state, &dm_state);
9384                 if (ret)
9385                         goto fail;
9386
9387                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9388                                 crtc->base.id);
9389
9390                 /* i.e. reset mode */
9391                 if (dc_remove_stream_from_ctx(
9392                                 dm->dc,
9393                                 dm_state->context,
9394                                 dm_old_crtc_state->stream) != DC_OK) {
9395                         ret = -EINVAL;
9396                         goto fail;
9397                 }
9398
9399                 dc_stream_release(dm_old_crtc_state->stream);
9400                 dm_new_crtc_state->stream = NULL;
9401
9402                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9403
9404                 *lock_and_validation_needed = true;
9405
9406         } else {/* Add stream for any updated/enabled CRTC */
9407                 /*
9408                  * Quick fix to prevent NULL pointer on new_stream when
9409                  * added MST connectors not found in existing crtc_state in the chained mode
9410                  * TODO: need to dig out the root cause of that
9411                  */
9412                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9413                         goto skip_modeset;
9414
9415                 if (modereset_required(new_crtc_state))
9416                         goto skip_modeset;
9417
9418                 if (modeset_required(new_crtc_state, new_stream,
9419                                      dm_old_crtc_state->stream)) {
9420
9421                         WARN_ON(dm_new_crtc_state->stream);
9422
9423                         ret = dm_atomic_get_state(state, &dm_state);
9424                         if (ret)
9425                                 goto fail;
9426
9427                         dm_new_crtc_state->stream = new_stream;
9428
9429                         dc_stream_retain(new_stream);
9430
9431                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9432                                          crtc->base.id);
9433
9434                         if (dc_add_stream_to_ctx(
9435                                         dm->dc,
9436                                         dm_state->context,
9437                                         dm_new_crtc_state->stream) != DC_OK) {
9438                                 ret = -EINVAL;
9439                                 goto fail;
9440                         }
9441
9442                         *lock_and_validation_needed = true;
9443                 }
9444         }
9445
9446 skip_modeset:
9447         /* Release extra reference */
9448         if (new_stream)
9449                  dc_stream_release(new_stream);
9450
9451         /*
9452          * We want to do dc stream updates that do not require a
9453          * full modeset below.
9454          */
9455         if (!(enable && aconnector && new_crtc_state->active))
9456                 return 0;
9457         /*
9458          * Given above conditions, the dc state cannot be NULL because:
9459          * 1. We're in the process of enabling CRTCs (just been added
9460          *    to the dc context, or already is on the context)
9461          * 2. Has a valid connector attached, and
9462          * 3. Is currently active and enabled.
9463          * => The dc stream state currently exists.
9464          */
9465         BUG_ON(dm_new_crtc_state->stream == NULL);
9466
9467         /* Scaling or underscan settings */
9468         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9469                 update_stream_scaling_settings(
9470                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9471
9472         /* ABM settings */
9473         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9474
9475         /*
9476          * Color management settings. We also update color properties
9477          * when a modeset is needed, to ensure it gets reprogrammed.
9478          */
9479         if (dm_new_crtc_state->base.color_mgmt_changed ||
9480             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9481                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9482                 if (ret)
9483                         goto fail;
9484         }
9485
9486         /* Update Freesync settings. */
9487         get_freesync_config_for_crtc(dm_new_crtc_state,
9488                                      dm_new_conn_state);
9489
9490         return ret;
9491
9492 fail:
9493         if (new_stream)
9494                 dc_stream_release(new_stream);
9495         return ret;
9496 }
9497
9498 static bool should_reset_plane(struct drm_atomic_state *state,
9499                                struct drm_plane *plane,
9500                                struct drm_plane_state *old_plane_state,
9501                                struct drm_plane_state *new_plane_state)
9502 {
9503         struct drm_plane *other;
9504         struct drm_plane_state *old_other_state, *new_other_state;
9505         struct drm_crtc_state *new_crtc_state;
9506         int i;
9507
9508         /*
9509          * TODO: Remove this hack once the checks below are sufficient
9510          * enough to determine when we need to reset all the planes on
9511          * the stream.
9512          */
9513         if (state->allow_modeset)
9514                 return true;
9515
9516         /* Exit early if we know that we're adding or removing the plane. */
9517         if (old_plane_state->crtc != new_plane_state->crtc)
9518                 return true;
9519
9520         /* old crtc == new_crtc == NULL, plane not in context. */
9521         if (!new_plane_state->crtc)
9522                 return false;
9523
9524         new_crtc_state =
9525                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9526
9527         if (!new_crtc_state)
9528                 return true;
9529
9530         /* CRTC Degamma changes currently require us to recreate planes. */
9531         if (new_crtc_state->color_mgmt_changed)
9532                 return true;
9533
9534         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9535                 return true;
9536
9537         /*
9538          * If there are any new primary or overlay planes being added or
9539          * removed then the z-order can potentially change. To ensure
9540          * correct z-order and pipe acquisition the current DC architecture
9541          * requires us to remove and recreate all existing planes.
9542          *
9543          * TODO: Come up with a more elegant solution for this.
9544          */
9545         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9546                 struct amdgpu_framebuffer *old_afb, *new_afb;
9547                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9548                         continue;
9549
9550                 if (old_other_state->crtc != new_plane_state->crtc &&
9551                     new_other_state->crtc != new_plane_state->crtc)
9552                         continue;
9553
9554                 if (old_other_state->crtc != new_other_state->crtc)
9555                         return true;
9556
9557                 /* Src/dst size and scaling updates. */
9558                 if (old_other_state->src_w != new_other_state->src_w ||
9559                     old_other_state->src_h != new_other_state->src_h ||
9560                     old_other_state->crtc_w != new_other_state->crtc_w ||
9561                     old_other_state->crtc_h != new_other_state->crtc_h)
9562                         return true;
9563
9564                 /* Rotation / mirroring updates. */
9565                 if (old_other_state->rotation != new_other_state->rotation)
9566                         return true;
9567
9568                 /* Blending updates. */
9569                 if (old_other_state->pixel_blend_mode !=
9570                     new_other_state->pixel_blend_mode)
9571                         return true;
9572
9573                 /* Alpha updates. */
9574                 if (old_other_state->alpha != new_other_state->alpha)
9575                         return true;
9576
9577                 /* Colorspace changes. */
9578                 if (old_other_state->color_range != new_other_state->color_range ||
9579                     old_other_state->color_encoding != new_other_state->color_encoding)
9580                         return true;
9581
9582                 /* Framebuffer checks fall at the end. */
9583                 if (!old_other_state->fb || !new_other_state->fb)
9584                         continue;
9585
9586                 /* Pixel format changes can require bandwidth updates. */
9587                 if (old_other_state->fb->format != new_other_state->fb->format)
9588                         return true;
9589
9590                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9591                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9592
9593                 /* Tiling and DCC changes also require bandwidth updates. */
9594                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9595                     old_afb->base.modifier != new_afb->base.modifier)
9596                         return true;
9597         }
9598
9599         return false;
9600 }
9601
9602 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9603                               struct drm_plane_state *new_plane_state,
9604                               struct drm_framebuffer *fb)
9605 {
9606         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9607         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9608         unsigned int pitch;
9609         bool linear;
9610
9611         if (fb->width > new_acrtc->max_cursor_width ||
9612             fb->height > new_acrtc->max_cursor_height) {
9613                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9614                                  new_plane_state->fb->width,
9615                                  new_plane_state->fb->height);
9616                 return -EINVAL;
9617         }
9618         if (new_plane_state->src_w != fb->width << 16 ||
9619             new_plane_state->src_h != fb->height << 16) {
9620                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9621                 return -EINVAL;
9622         }
9623
9624         /* Pitch in pixels */
9625         pitch = fb->pitches[0] / fb->format->cpp[0];
9626
9627         if (fb->width != pitch) {
9628                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9629                                  fb->width, pitch);
9630                 return -EINVAL;
9631         }
9632
9633         switch (pitch) {
9634         case 64:
9635         case 128:
9636         case 256:
9637                 /* FB pitch is supported by cursor plane */
9638                 break;
9639         default:
9640                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9641                 return -EINVAL;
9642         }
9643
9644         /* Core DRM takes care of checking FB modifiers, so we only need to
9645          * check tiling flags when the FB doesn't have a modifier. */
9646         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9647                 if (adev->family < AMDGPU_FAMILY_AI) {
9648                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9649                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9650                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9651                 } else {
9652                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9653                 }
9654                 if (!linear) {
9655                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9656                         return -EINVAL;
9657                 }
9658         }
9659
9660         return 0;
9661 }
9662
9663 static int dm_update_plane_state(struct dc *dc,
9664                                  struct drm_atomic_state *state,
9665                                  struct drm_plane *plane,
9666                                  struct drm_plane_state *old_plane_state,
9667                                  struct drm_plane_state *new_plane_state,
9668                                  bool enable,
9669                                  bool *lock_and_validation_needed)
9670 {
9671
9672         struct dm_atomic_state *dm_state = NULL;
9673         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9674         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9675         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9676         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9677         struct amdgpu_crtc *new_acrtc;
9678         bool needs_reset;
9679         int ret = 0;
9680
9681
9682         new_plane_crtc = new_plane_state->crtc;
9683         old_plane_crtc = old_plane_state->crtc;
9684         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9685         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9686
9687         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9688                 if (!enable || !new_plane_crtc ||
9689                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9690                         return 0;
9691
9692                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9693
9694                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9695                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9696                         return -EINVAL;
9697                 }
9698
9699                 if (new_plane_state->fb) {
9700                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9701                                                  new_plane_state->fb);
9702                         if (ret)
9703                                 return ret;
9704                 }
9705
9706                 return 0;
9707         }
9708
9709         needs_reset = should_reset_plane(state, plane, old_plane_state,
9710                                          new_plane_state);
9711
9712         /* Remove any changed/removed planes */
9713         if (!enable) {
9714                 if (!needs_reset)
9715                         return 0;
9716
9717                 if (!old_plane_crtc)
9718                         return 0;
9719
9720                 old_crtc_state = drm_atomic_get_old_crtc_state(
9721                                 state, old_plane_crtc);
9722                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9723
9724                 if (!dm_old_crtc_state->stream)
9725                         return 0;
9726
9727                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9728                                 plane->base.id, old_plane_crtc->base.id);
9729
9730                 ret = dm_atomic_get_state(state, &dm_state);
9731                 if (ret)
9732                         return ret;
9733
9734                 if (!dc_remove_plane_from_context(
9735                                 dc,
9736                                 dm_old_crtc_state->stream,
9737                                 dm_old_plane_state->dc_state,
9738                                 dm_state->context)) {
9739
9740                         return -EINVAL;
9741                 }
9742
9743
9744                 dc_plane_state_release(dm_old_plane_state->dc_state);
9745                 dm_new_plane_state->dc_state = NULL;
9746
9747                 *lock_and_validation_needed = true;
9748
9749         } else { /* Add new planes */
9750                 struct dc_plane_state *dc_new_plane_state;
9751
9752                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9753                         return 0;
9754
9755                 if (!new_plane_crtc)
9756                         return 0;
9757
9758                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9759                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9760
9761                 if (!dm_new_crtc_state->stream)
9762                         return 0;
9763
9764                 if (!needs_reset)
9765                         return 0;
9766
9767                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9768                 if (ret)
9769                         return ret;
9770
9771                 WARN_ON(dm_new_plane_state->dc_state);
9772
9773                 dc_new_plane_state = dc_create_plane_state(dc);
9774                 if (!dc_new_plane_state)
9775                         return -ENOMEM;
9776
9777                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9778                                  plane->base.id, new_plane_crtc->base.id);
9779
9780                 ret = fill_dc_plane_attributes(
9781                         drm_to_adev(new_plane_crtc->dev),
9782                         dc_new_plane_state,
9783                         new_plane_state,
9784                         new_crtc_state);
9785                 if (ret) {
9786                         dc_plane_state_release(dc_new_plane_state);
9787                         return ret;
9788                 }
9789
9790                 ret = dm_atomic_get_state(state, &dm_state);
9791                 if (ret) {
9792                         dc_plane_state_release(dc_new_plane_state);
9793                         return ret;
9794                 }
9795
9796                 /*
9797                  * Any atomic check errors that occur after this will
9798                  * not need a release. The plane state will be attached
9799                  * to the stream, and therefore part of the atomic
9800                  * state. It'll be released when the atomic state is
9801                  * cleaned.
9802                  */
9803                 if (!dc_add_plane_to_context(
9804                                 dc,
9805                                 dm_new_crtc_state->stream,
9806                                 dc_new_plane_state,
9807                                 dm_state->context)) {
9808
9809                         dc_plane_state_release(dc_new_plane_state);
9810                         return -EINVAL;
9811                 }
9812
9813                 dm_new_plane_state->dc_state = dc_new_plane_state;
9814
9815                 /* Tell DC to do a full surface update every time there
9816                  * is a plane change. Inefficient, but works for now.
9817                  */
9818                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9819
9820                 *lock_and_validation_needed = true;
9821         }
9822
9823
9824         return ret;
9825 }
9826
9827 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9828                                 struct drm_crtc *crtc,
9829                                 struct drm_crtc_state *new_crtc_state)
9830 {
9831         struct drm_plane_state *new_cursor_state, *new_primary_state;
9832         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9833
9834         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9835          * cursor per pipe but it's going to inherit the scaling and
9836          * positioning from the underlying pipe. Check the cursor plane's
9837          * blending properties match the primary plane's. */
9838
9839         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9840         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9841         if (!new_cursor_state || !new_primary_state ||
9842             !new_cursor_state->fb || !new_primary_state->fb) {
9843                 return 0;
9844         }
9845
9846         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9847                          (new_cursor_state->src_w >> 16);
9848         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9849                          (new_cursor_state->src_h >> 16);
9850
9851         primary_scale_w = new_primary_state->crtc_w * 1000 /
9852                          (new_primary_state->src_w >> 16);
9853         primary_scale_h = new_primary_state->crtc_h * 1000 /
9854                          (new_primary_state->src_h >> 16);
9855
9856         if (cursor_scale_w != primary_scale_w ||
9857             cursor_scale_h != primary_scale_h) {
9858                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9859                 return -EINVAL;
9860         }
9861
9862         return 0;
9863 }
9864
9865 #if defined(CONFIG_DRM_AMD_DC_DCN)
9866 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9867 {
9868         struct drm_connector *connector;
9869         struct drm_connector_state *conn_state;
9870         struct amdgpu_dm_connector *aconnector = NULL;
9871         int i;
9872         for_each_new_connector_in_state(state, connector, conn_state, i) {
9873                 if (conn_state->crtc != crtc)
9874                         continue;
9875
9876                 aconnector = to_amdgpu_dm_connector(connector);
9877                 if (!aconnector->port || !aconnector->mst_port)
9878                         aconnector = NULL;
9879                 else
9880                         break;
9881         }
9882
9883         if (!aconnector)
9884                 return 0;
9885
9886         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9887 }
9888 #endif
9889
9890 static int validate_overlay(struct drm_atomic_state *state)
9891 {
9892         int i;
9893         struct drm_plane *plane;
9894         struct drm_plane_state *old_plane_state, *new_plane_state;
9895         struct drm_plane_state *primary_state, *overlay_state = NULL;
9896
9897         /* Check if primary plane is contained inside overlay */
9898         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9899                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9900                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9901                                 return 0;
9902
9903                         overlay_state = new_plane_state;
9904                         continue;
9905                 }
9906         }
9907
9908         /* check if we're making changes to the overlay plane */
9909         if (!overlay_state)
9910                 return 0;
9911
9912         /* check if overlay plane is enabled */
9913         if (!overlay_state->crtc)
9914                 return 0;
9915
9916         /* find the primary plane for the CRTC that the overlay is enabled on */
9917         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
9918         if (IS_ERR(primary_state))
9919                 return PTR_ERR(primary_state);
9920
9921         /* check if primary plane is enabled */
9922         if (!primary_state->crtc)
9923                 return 0;
9924
9925         /* Perform the bounds check to ensure the overlay plane covers the primary */
9926         if (primary_state->crtc_x < overlay_state->crtc_x ||
9927             primary_state->crtc_y < overlay_state->crtc_y ||
9928             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
9929             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
9930                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
9931                 return -EINVAL;
9932         }
9933
9934         return 0;
9935 }
9936
9937 /**
9938  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9939  * @dev: The DRM device
9940  * @state: The atomic state to commit
9941  *
9942  * Validate that the given atomic state is programmable by DC into hardware.
9943  * This involves constructing a &struct dc_state reflecting the new hardware
9944  * state we wish to commit, then querying DC to see if it is programmable. It's
9945  * important not to modify the existing DC state. Otherwise, atomic_check
9946  * may unexpectedly commit hardware changes.
9947  *
9948  * When validating the DC state, it's important that the right locks are
9949  * acquired. For full updates case which removes/adds/updates streams on one
9950  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9951  * that any such full update commit will wait for completion of any outstanding
9952  * flip using DRMs synchronization events.
9953  *
9954  * Note that DM adds the affected connectors for all CRTCs in state, when that
9955  * might not seem necessary. This is because DC stream creation requires the
9956  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9957  * be possible but non-trivial - a possible TODO item.
9958  *
9959  * Return: -Error code if validation failed.
9960  */
9961 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9962                                   struct drm_atomic_state *state)
9963 {
9964         struct amdgpu_device *adev = drm_to_adev(dev);
9965         struct dm_atomic_state *dm_state = NULL;
9966         struct dc *dc = adev->dm.dc;
9967         struct drm_connector *connector;
9968         struct drm_connector_state *old_con_state, *new_con_state;
9969         struct drm_crtc *crtc;
9970         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9971         struct drm_plane *plane;
9972         struct drm_plane_state *old_plane_state, *new_plane_state;
9973         enum dc_status status;
9974         int ret, i;
9975         bool lock_and_validation_needed = false;
9976         struct dm_crtc_state *dm_old_crtc_state;
9977
9978         trace_amdgpu_dm_atomic_check_begin(state);
9979
9980         ret = drm_atomic_helper_check_modeset(dev, state);
9981         if (ret)
9982                 goto fail;
9983
9984         /* Check connector changes */
9985         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9986                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9987                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9988
9989                 /* Skip connectors that are disabled or part of modeset already. */
9990                 if (!old_con_state->crtc && !new_con_state->crtc)
9991                         continue;
9992
9993                 if (!new_con_state->crtc)
9994                         continue;
9995
9996                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9997                 if (IS_ERR(new_crtc_state)) {
9998                         ret = PTR_ERR(new_crtc_state);
9999                         goto fail;
10000                 }
10001
10002                 if (dm_old_con_state->abm_level !=
10003                     dm_new_con_state->abm_level)
10004                         new_crtc_state->connectors_changed = true;
10005         }
10006
10007 #if defined(CONFIG_DRM_AMD_DC_DCN)
10008         if (dc_resource_is_dsc_encoding_supported(dc)) {
10009                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10010                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10011                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10012                                 if (ret)
10013                                         goto fail;
10014                         }
10015                 }
10016         }
10017 #endif
10018         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10019                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10020
10021                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10022                     !new_crtc_state->color_mgmt_changed &&
10023                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10024                         dm_old_crtc_state->dsc_force_changed == false)
10025                         continue;
10026
10027                 if (!new_crtc_state->enable)
10028                         continue;
10029
10030                 ret = drm_atomic_add_affected_connectors(state, crtc);
10031                 if (ret)
10032                         return ret;
10033
10034                 ret = drm_atomic_add_affected_planes(state, crtc);
10035                 if (ret)
10036                         goto fail;
10037
10038                 if (dm_old_crtc_state->dsc_force_changed)
10039                         new_crtc_state->mode_changed = true;
10040         }
10041
10042         /*
10043          * Add all primary and overlay planes on the CRTC to the state
10044          * whenever a plane is enabled to maintain correct z-ordering
10045          * and to enable fast surface updates.
10046          */
10047         drm_for_each_crtc(crtc, dev) {
10048                 bool modified = false;
10049
10050                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10051                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10052                                 continue;
10053
10054                         if (new_plane_state->crtc == crtc ||
10055                             old_plane_state->crtc == crtc) {
10056                                 modified = true;
10057                                 break;
10058                         }
10059                 }
10060
10061                 if (!modified)
10062                         continue;
10063
10064                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10065                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10066                                 continue;
10067
10068                         new_plane_state =
10069                                 drm_atomic_get_plane_state(state, plane);
10070
10071                         if (IS_ERR(new_plane_state)) {
10072                                 ret = PTR_ERR(new_plane_state);
10073                                 goto fail;
10074                         }
10075                 }
10076         }
10077
10078         /* Remove exiting planes if they are modified */
10079         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10080                 ret = dm_update_plane_state(dc, state, plane,
10081                                             old_plane_state,
10082                                             new_plane_state,
10083                                             false,
10084                                             &lock_and_validation_needed);
10085                 if (ret)
10086                         goto fail;
10087         }
10088
10089         /* Disable all crtcs which require disable */
10090         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10091                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10092                                            old_crtc_state,
10093                                            new_crtc_state,
10094                                            false,
10095                                            &lock_and_validation_needed);
10096                 if (ret)
10097                         goto fail;
10098         }
10099
10100         /* Enable all crtcs which require enable */
10101         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10102                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10103                                            old_crtc_state,
10104                                            new_crtc_state,
10105                                            true,
10106                                            &lock_and_validation_needed);
10107                 if (ret)
10108                         goto fail;
10109         }
10110
10111         ret = validate_overlay(state);
10112         if (ret)
10113                 goto fail;
10114
10115         /* Add new/modified planes */
10116         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10117                 ret = dm_update_plane_state(dc, state, plane,
10118                                             old_plane_state,
10119                                             new_plane_state,
10120                                             true,
10121                                             &lock_and_validation_needed);
10122                 if (ret)
10123                         goto fail;
10124         }
10125
10126         /* Run this here since we want to validate the streams we created */
10127         ret = drm_atomic_helper_check_planes(dev, state);
10128         if (ret)
10129                 goto fail;
10130
10131         /* Check cursor planes scaling */
10132         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10133                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10134                 if (ret)
10135                         goto fail;
10136         }
10137
10138         if (state->legacy_cursor_update) {
10139                 /*
10140                  * This is a fast cursor update coming from the plane update
10141                  * helper, check if it can be done asynchronously for better
10142                  * performance.
10143                  */
10144                 state->async_update =
10145                         !drm_atomic_helper_async_check(dev, state);
10146
10147                 /*
10148                  * Skip the remaining global validation if this is an async
10149                  * update. Cursor updates can be done without affecting
10150                  * state or bandwidth calcs and this avoids the performance
10151                  * penalty of locking the private state object and
10152                  * allocating a new dc_state.
10153                  */
10154                 if (state->async_update)
10155                         return 0;
10156         }
10157
10158         /* Check scaling and underscan changes*/
10159         /* TODO Removed scaling changes validation due to inability to commit
10160          * new stream into context w\o causing full reset. Need to
10161          * decide how to handle.
10162          */
10163         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10164                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10165                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10166                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10167
10168                 /* Skip any modesets/resets */
10169                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10170                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10171                         continue;
10172
10173                 /* Skip any thing not scale or underscan changes */
10174                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10175                         continue;
10176
10177                 lock_and_validation_needed = true;
10178         }
10179
10180         /**
10181          * Streams and planes are reset when there are changes that affect
10182          * bandwidth. Anything that affects bandwidth needs to go through
10183          * DC global validation to ensure that the configuration can be applied
10184          * to hardware.
10185          *
10186          * We have to currently stall out here in atomic_check for outstanding
10187          * commits to finish in this case because our IRQ handlers reference
10188          * DRM state directly - we can end up disabling interrupts too early
10189          * if we don't.
10190          *
10191          * TODO: Remove this stall and drop DM state private objects.
10192          */
10193         if (lock_and_validation_needed) {
10194                 ret = dm_atomic_get_state(state, &dm_state);
10195                 if (ret)
10196                         goto fail;
10197
10198                 ret = do_aquire_global_lock(dev, state);
10199                 if (ret)
10200                         goto fail;
10201
10202 #if defined(CONFIG_DRM_AMD_DC_DCN)
10203                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10204                         goto fail;
10205
10206                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10207                 if (ret)
10208                         goto fail;
10209 #endif
10210
10211                 /*
10212                  * Perform validation of MST topology in the state:
10213                  * We need to perform MST atomic check before calling
10214                  * dc_validate_global_state(), or there is a chance
10215                  * to get stuck in an infinite loop and hang eventually.
10216                  */
10217                 ret = drm_dp_mst_atomic_check(state);
10218                 if (ret)
10219                         goto fail;
10220                 status = dc_validate_global_state(dc, dm_state->context, false);
10221                 if (status != DC_OK) {
10222                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10223                                        dc_status_to_str(status), status);
10224                         ret = -EINVAL;
10225                         goto fail;
10226                 }
10227         } else {
10228                 /*
10229                  * The commit is a fast update. Fast updates shouldn't change
10230                  * the DC context, affect global validation, and can have their
10231                  * commit work done in parallel with other commits not touching
10232                  * the same resource. If we have a new DC context as part of
10233                  * the DM atomic state from validation we need to free it and
10234                  * retain the existing one instead.
10235                  *
10236                  * Furthermore, since the DM atomic state only contains the DC
10237                  * context and can safely be annulled, we can free the state
10238                  * and clear the associated private object now to free
10239                  * some memory and avoid a possible use-after-free later.
10240                  */
10241
10242                 for (i = 0; i < state->num_private_objs; i++) {
10243                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10244
10245                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10246                                 int j = state->num_private_objs-1;
10247
10248                                 dm_atomic_destroy_state(obj,
10249                                                 state->private_objs[i].state);
10250
10251                                 /* If i is not at the end of the array then the
10252                                  * last element needs to be moved to where i was
10253                                  * before the array can safely be truncated.
10254                                  */
10255                                 if (i != j)
10256                                         state->private_objs[i] =
10257                                                 state->private_objs[j];
10258
10259                                 state->private_objs[j].ptr = NULL;
10260                                 state->private_objs[j].state = NULL;
10261                                 state->private_objs[j].old_state = NULL;
10262                                 state->private_objs[j].new_state = NULL;
10263
10264                                 state->num_private_objs = j;
10265                                 break;
10266                         }
10267                 }
10268         }
10269
10270         /* Store the overall update type for use later in atomic check. */
10271         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10272                 struct dm_crtc_state *dm_new_crtc_state =
10273                         to_dm_crtc_state(new_crtc_state);
10274
10275                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10276                                                          UPDATE_TYPE_FULL :
10277                                                          UPDATE_TYPE_FAST;
10278         }
10279
10280         /* Must be success */
10281         WARN_ON(ret);
10282
10283         trace_amdgpu_dm_atomic_check_finish(state, ret);
10284
10285         return ret;
10286
10287 fail:
10288         if (ret == -EDEADLK)
10289                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10290         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10291                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10292         else
10293                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10294
10295         trace_amdgpu_dm_atomic_check_finish(state, ret);
10296
10297         return ret;
10298 }
10299
10300 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10301                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10302 {
10303         uint8_t dpcd_data;
10304         bool capable = false;
10305
10306         if (amdgpu_dm_connector->dc_link &&
10307                 dm_helpers_dp_read_dpcd(
10308                                 NULL,
10309                                 amdgpu_dm_connector->dc_link,
10310                                 DP_DOWN_STREAM_PORT_COUNT,
10311                                 &dpcd_data,
10312                                 sizeof(dpcd_data))) {
10313                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10314         }
10315
10316         return capable;
10317 }
10318
10319 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10320                 uint8_t *edid_ext, int len,
10321                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10322 {
10323         int i;
10324         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10325         struct dc *dc = adev->dm.dc;
10326
10327         /* send extension block to DMCU for parsing */
10328         for (i = 0; i < len; i += 8) {
10329                 bool res;
10330                 int offset;
10331
10332                 /* send 8 bytes a time */
10333                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10334                         return false;
10335
10336                 if (i+8 == len) {
10337                         /* EDID block sent completed, expect result */
10338                         int version, min_rate, max_rate;
10339
10340                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10341                         if (res) {
10342                                 /* amd vsdb found */
10343                                 vsdb_info->freesync_supported = 1;
10344                                 vsdb_info->amd_vsdb_version = version;
10345                                 vsdb_info->min_refresh_rate_hz = min_rate;
10346                                 vsdb_info->max_refresh_rate_hz = max_rate;
10347                                 return true;
10348                         }
10349                         /* not amd vsdb */
10350                         return false;
10351                 }
10352
10353                 /* check for ack*/
10354                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10355                 if (!res)
10356                         return false;
10357         }
10358
10359         return false;
10360 }
10361
10362 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10363                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10364 {
10365         uint8_t *edid_ext = NULL;
10366         int i;
10367         bool valid_vsdb_found = false;
10368
10369         /*----- drm_find_cea_extension() -----*/
10370         /* No EDID or EDID extensions */
10371         if (edid == NULL || edid->extensions == 0)
10372                 return -ENODEV;
10373
10374         /* Find CEA extension */
10375         for (i = 0; i < edid->extensions; i++) {
10376                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10377                 if (edid_ext[0] == CEA_EXT)
10378                         break;
10379         }
10380
10381         if (i == edid->extensions)
10382                 return -ENODEV;
10383
10384         /*----- cea_db_offsets() -----*/
10385         if (edid_ext[0] != CEA_EXT)
10386                 return -ENODEV;
10387
10388         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10389
10390         return valid_vsdb_found ? i : -ENODEV;
10391 }
10392
10393 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10394                                         struct edid *edid)
10395 {
10396         int i = 0;
10397         struct detailed_timing *timing;
10398         struct detailed_non_pixel *data;
10399         struct detailed_data_monitor_range *range;
10400         struct amdgpu_dm_connector *amdgpu_dm_connector =
10401                         to_amdgpu_dm_connector(connector);
10402         struct dm_connector_state *dm_con_state = NULL;
10403
10404         struct drm_device *dev = connector->dev;
10405         struct amdgpu_device *adev = drm_to_adev(dev);
10406         bool freesync_capable = false;
10407         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10408
10409         if (!connector->state) {
10410                 DRM_ERROR("%s - Connector has no state", __func__);
10411                 goto update;
10412         }
10413
10414         if (!edid) {
10415                 dm_con_state = to_dm_connector_state(connector->state);
10416
10417                 amdgpu_dm_connector->min_vfreq = 0;
10418                 amdgpu_dm_connector->max_vfreq = 0;
10419                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10420
10421                 goto update;
10422         }
10423
10424         dm_con_state = to_dm_connector_state(connector->state);
10425
10426         if (!amdgpu_dm_connector->dc_sink) {
10427                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10428                 goto update;
10429         }
10430         if (!adev->dm.freesync_module)
10431                 goto update;
10432
10433
10434         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10435                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10436                 bool edid_check_required = false;
10437
10438                 if (edid) {
10439                         edid_check_required = is_dp_capable_without_timing_msa(
10440                                                 adev->dm.dc,
10441                                                 amdgpu_dm_connector);
10442                 }
10443
10444                 if (edid_check_required == true && (edid->version > 1 ||
10445                    (edid->version == 1 && edid->revision > 1))) {
10446                         for (i = 0; i < 4; i++) {
10447
10448                                 timing  = &edid->detailed_timings[i];
10449                                 data    = &timing->data.other_data;
10450                                 range   = &data->data.range;
10451                                 /*
10452                                  * Check if monitor has continuous frequency mode
10453                                  */
10454                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10455                                         continue;
10456                                 /*
10457                                  * Check for flag range limits only. If flag == 1 then
10458                                  * no additional timing information provided.
10459                                  * Default GTF, GTF Secondary curve and CVT are not
10460                                  * supported
10461                                  */
10462                                 if (range->flags != 1)
10463                                         continue;
10464
10465                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10466                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10467                                 amdgpu_dm_connector->pixel_clock_mhz =
10468                                         range->pixel_clock_mhz * 10;
10469
10470                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10471                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10472
10473                                 break;
10474                         }
10475
10476                         if (amdgpu_dm_connector->max_vfreq -
10477                             amdgpu_dm_connector->min_vfreq > 10) {
10478
10479                                 freesync_capable = true;
10480                         }
10481                 }
10482         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10483                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10484                 if (i >= 0 && vsdb_info.freesync_supported) {
10485                         timing  = &edid->detailed_timings[i];
10486                         data    = &timing->data.other_data;
10487
10488                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10489                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10490                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10491                                 freesync_capable = true;
10492
10493                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10494                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10495                 }
10496         }
10497
10498 update:
10499         if (dm_con_state)
10500                 dm_con_state->freesync_capable = freesync_capable;
10501
10502         if (connector->vrr_capable_property)
10503                 drm_connector_set_vrr_capable_property(connector,
10504                                                        freesync_capable);
10505 }
10506
10507 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10508 {
10509         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10510
10511         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10512                 return;
10513         if (link->type == dc_connection_none)
10514                 return;
10515         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10516                                         dpcd_data, sizeof(dpcd_data))) {
10517                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10518
10519                 if (dpcd_data[0] == 0) {
10520                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10521                         link->psr_settings.psr_feature_enabled = false;
10522                 } else {
10523                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10524                         link->psr_settings.psr_feature_enabled = true;
10525                 }
10526
10527                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10528         }
10529 }
10530
10531 /*
10532  * amdgpu_dm_link_setup_psr() - configure psr link
10533  * @stream: stream state
10534  *
10535  * Return: true if success
10536  */
10537 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10538 {
10539         struct dc_link *link = NULL;
10540         struct psr_config psr_config = {0};
10541         struct psr_context psr_context = {0};
10542         bool ret = false;
10543
10544         if (stream == NULL)
10545                 return false;
10546
10547         link = stream->link;
10548
10549         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10550
10551         if (psr_config.psr_version > 0) {
10552                 psr_config.psr_exit_link_training_required = 0x1;
10553                 psr_config.psr_frame_capture_indication_req = 0;
10554                 psr_config.psr_rfb_setup_time = 0x37;
10555                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10556                 psr_config.allow_smu_optimizations = 0x0;
10557
10558                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10559
10560         }
10561         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10562
10563         return ret;
10564 }
10565
10566 /*
10567  * amdgpu_dm_psr_enable() - enable psr f/w
10568  * @stream: stream state
10569  *
10570  * Return: true if success
10571  */
10572 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10573 {
10574         struct dc_link *link = stream->link;
10575         unsigned int vsync_rate_hz = 0;
10576         struct dc_static_screen_params params = {0};
10577         /* Calculate number of static frames before generating interrupt to
10578          * enter PSR.
10579          */
10580         // Init fail safe of 2 frames static
10581         unsigned int num_frames_static = 2;
10582
10583         DRM_DEBUG_DRIVER("Enabling psr...\n");
10584
10585         vsync_rate_hz = div64_u64(div64_u64((
10586                         stream->timing.pix_clk_100hz * 100),
10587                         stream->timing.v_total),
10588                         stream->timing.h_total);
10589
10590         /* Round up
10591          * Calculate number of frames such that at least 30 ms of time has
10592          * passed.
10593          */
10594         if (vsync_rate_hz != 0) {
10595                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10596                 num_frames_static = (30000 / frame_time_microsec) + 1;
10597         }
10598
10599         params.triggers.cursor_update = true;
10600         params.triggers.overlay_update = true;
10601         params.triggers.surface_update = true;
10602         params.num_frames = num_frames_static;
10603
10604         dc_stream_set_static_screen_params(link->ctx->dc,
10605                                            &stream, 1,
10606                                            &params);
10607
10608         return dc_link_set_psr_allow_active(link, true, false, false);
10609 }
10610
10611 /*
10612  * amdgpu_dm_psr_disable() - disable psr f/w
10613  * @stream:  stream state
10614  *
10615  * Return: true if success
10616  */
10617 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10618 {
10619
10620         DRM_DEBUG_DRIVER("Disabling psr...\n");
10621
10622         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10623 }
10624
10625 /*
10626  * amdgpu_dm_psr_disable() - disable psr f/w
10627  * if psr is enabled on any stream
10628  *
10629  * Return: true if success
10630  */
10631 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10632 {
10633         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10634         return dc_set_psr_allow_active(dm->dc, false);
10635 }
10636
10637 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10638 {
10639         struct amdgpu_device *adev = drm_to_adev(dev);
10640         struct dc *dc = adev->dm.dc;
10641         int i;
10642
10643         mutex_lock(&adev->dm.dc_lock);
10644         if (dc->current_state) {
10645                 for (i = 0; i < dc->current_state->stream_count; ++i)
10646                         dc->current_state->streams[i]
10647                                 ->triggered_crtc_reset.enabled =
10648                                 adev->dm.force_timing_sync;
10649
10650                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10651                 dc_trigger_sync(dc, dc->current_state);
10652         }
10653         mutex_unlock(&adev->dm.dc_lock);
10654 }
10655
10656 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10657                        uint32_t value, const char *func_name)
10658 {
10659 #ifdef DM_CHECK_ADDR_0
10660         if (address == 0) {
10661                 DC_ERR("invalid register write. address = 0");
10662                 return;
10663         }
10664 #endif
10665         cgs_write_register(ctx->cgs_device, address, value);
10666         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10667 }
10668
10669 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10670                           const char *func_name)
10671 {
10672         uint32_t value;
10673 #ifdef DM_CHECK_ADDR_0
10674         if (address == 0) {
10675                 DC_ERR("invalid register read; address = 0\n");
10676                 return 0;
10677         }
10678 #endif
10679
10680         if (ctx->dmub_srv &&
10681             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10682             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10683                 ASSERT(false);
10684                 return 0;
10685         }
10686
10687         value = cgs_read_register(ctx->cgs_device, address);
10688
10689         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10690
10691         return value;
10692 }