Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137         switch (link->dpcd_caps.dongle_type) {
138         case DISPLAY_DONGLE_NONE:
139                 return DRM_MODE_SUBCONNECTOR_Native;
140         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141                 return DRM_MODE_SUBCONNECTOR_VGA;
142         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143         case DISPLAY_DONGLE_DP_DVI_DONGLE:
144                 return DRM_MODE_SUBCONNECTOR_DVID;
145         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147                 return DRM_MODE_SUBCONNECTOR_HDMIA;
148         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149         default:
150                 return DRM_MODE_SUBCONNECTOR_Unknown;
151         }
152 }
153
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156         struct dc_link *link = aconnector->dc_link;
157         struct drm_connector *connector = &aconnector->base;
158         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159
160         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161                 return;
162
163         if (aconnector->dc_sink)
164                 subconnector = get_subconnector_type(link);
165
166         drm_object_property_set_value(&connector->base,
167                         connector->dev->mode_config.dp_subconnector_property,
168                         subconnector);
169 }
170
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183                                 struct drm_plane *plane,
184                                 unsigned long possible_crtcs,
185                                 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187                                struct drm_plane *plane,
188                                uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
191                                     uint32_t link_index,
192                                     struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194                                   struct amdgpu_encoder *aencoder,
195                                   uint32_t link_index);
196
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202                                   struct drm_atomic_state *state);
203
204 static void handle_cursor_update(struct drm_plane *plane,
205                                  struct drm_plane_state *old_plane_state);
206
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218                                  struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234         if (crtc >= adev->mode_info.num_crtc)
235                 return 0;
236         else {
237                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238
239                 if (acrtc->dm_irq_params.stream == NULL) {
240                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241                                   crtc);
242                         return 0;
243                 }
244
245                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246         }
247 }
248
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250                                   u32 *vbl, u32 *position)
251 {
252         uint32_t v_blank_start, v_blank_end, h_position, v_position;
253
254         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255                 return -EINVAL;
256         else {
257                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258
259                 if (acrtc->dm_irq_params.stream ==  NULL) {
260                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261                                   crtc);
262                         return 0;
263                 }
264
265                 /*
266                  * TODO rework base driver to use values directly.
267                  * for now parse it back into reg-format
268                  */
269                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270                                          &v_blank_start,
271                                          &v_blank_end,
272                                          &h_position,
273                                          &v_position);
274
275                 *position = v_position | (h_position << 16);
276                 *vbl = v_blank_start | (v_blank_end << 16);
277         }
278
279         return 0;
280 }
281
282 static bool dm_is_idle(void *handle)
283 {
284         /* XXX todo */
285         return true;
286 }
287
288 static int dm_wait_for_idle(void *handle)
289 {
290         /* XXX todo */
291         return 0;
292 }
293
294 static bool dm_check_soft_reset(void *handle)
295 {
296         return false;
297 }
298
299 static int dm_soft_reset(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307                      int otg_inst)
308 {
309         struct drm_device *dev = adev_to_drm(adev);
310         struct drm_crtc *crtc;
311         struct amdgpu_crtc *amdgpu_crtc;
312
313         if (otg_inst == -1) {
314                 WARN_ON(1);
315                 return adev->mode_info.crtcs[0];
316         }
317
318         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319                 amdgpu_crtc = to_amdgpu_crtc(crtc);
320
321                 if (amdgpu_crtc->otg_inst == otg_inst)
322                         return amdgpu_crtc;
323         }
324
325         return NULL;
326 }
327
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330         return acrtc->dm_irq_params.freesync_config.state ==
331                        VRR_STATE_ACTIVE_VARIABLE ||
332                acrtc->dm_irq_params.freesync_config.state ==
333                        VRR_STATE_ACTIVE_FIXED;
334 }
335
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343                                               struct dm_crtc_state *new_state)
344 {
345         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346                 return true;
347         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348                 return true;
349         else
350                 return false;
351 }
352
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362         struct amdgpu_crtc *amdgpu_crtc;
363         struct common_irq_params *irq_params = interrupt_params;
364         struct amdgpu_device *adev = irq_params->adev;
365         unsigned long flags;
366         struct drm_pending_vblank_event *e;
367         uint32_t vpos, hpos, v_blank_start, v_blank_end;
368         bool vrr_active;
369
370         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372         /* IRQ could occur when in initial stage */
373         /* TODO work and BO cleanup */
374         if (amdgpu_crtc == NULL) {
375                 DC_LOG_PFLIP("CRTC is null, returning.\n");
376                 return;
377         }
378
379         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383                                                  amdgpu_crtc->pflip_status,
384                                                  AMDGPU_FLIP_SUBMITTED,
385                                                  amdgpu_crtc->crtc_id,
386                                                  amdgpu_crtc);
387                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388                 return;
389         }
390
391         /* page flip completed. */
392         e = amdgpu_crtc->event;
393         amdgpu_crtc->event = NULL;
394
395         if (!e)
396                 WARN_ON(1);
397
398         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399
400         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401         if (!vrr_active ||
402             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403                                       &v_blank_end, &hpos, &vpos) ||
404             (vpos < v_blank_start)) {
405                 /* Update to correct count and vblank timestamp if racing with
406                  * vblank irq. This also updates to the correct vblank timestamp
407                  * even in VRR mode, as scanout is past the front-porch atm.
408                  */
409                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410
411                 /* Wake up userspace by sending the pageflip event with proper
412                  * count and timestamp of vblank of flip completion.
413                  */
414                 if (e) {
415                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416
417                         /* Event sent, so done with vblank for this flip */
418                         drm_crtc_vblank_put(&amdgpu_crtc->base);
419                 }
420         } else if (e) {
421                 /* VRR active and inside front-porch: vblank count and
422                  * timestamp for pageflip event will only be up to date after
423                  * drm_crtc_handle_vblank() has been executed from late vblank
424                  * irq handler after start of back-porch (vline 0). We queue the
425                  * pageflip event for send-out by drm_crtc_handle_vblank() with
426                  * updated timestamp and count, once it runs after us.
427                  *
428                  * We need to open-code this instead of using the helper
429                  * drm_crtc_arm_vblank_event(), as that helper would
430                  * call drm_crtc_accurate_vblank_count(), which we must
431                  * not call in VRR mode while we are in front-porch!
432                  */
433
434                 /* sequence will be replaced by real count during send-out. */
435                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436                 e->pipe = amdgpu_crtc->crtc_id;
437
438                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439                 e = NULL;
440         }
441
442         /* Keep track of vblank of this flip for flip throttling. We use the
443          * cooked hw counter, as that one incremented at start of this vblank
444          * of pageflip completion, so last_flip_vblank is the forbidden count
445          * for queueing new pageflips if vsync + VRR is enabled.
446          */
447         amdgpu_crtc->dm_irq_params.last_flip_vblank =
448                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449
450         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452
453         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454                      amdgpu_crtc->crtc_id, amdgpu_crtc,
455                      vrr_active, (int) !e);
456 }
457
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460         struct common_irq_params *irq_params = interrupt_params;
461         struct amdgpu_device *adev = irq_params->adev;
462         struct amdgpu_crtc *acrtc;
463         struct drm_device *drm_dev;
464         struct drm_vblank_crtc *vblank;
465         ktime_t frame_duration_ns, previous_timestamp;
466         unsigned long flags;
467         int vrr_active;
468
469         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470
471         if (acrtc) {
472                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473                 drm_dev = acrtc->base.dev;
474                 vblank = &drm_dev->vblank[acrtc->base.index];
475                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476                 frame_duration_ns = vblank->time - previous_timestamp;
477
478                 if (frame_duration_ns > 0) {
479                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
480                                                 frame_duration_ns,
481                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
483                 }
484
485                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486                               acrtc->crtc_id,
487                               vrr_active);
488
489                 /* Core vblank handling is done here after end of front-porch in
490                  * vrr mode, as vblank timestamping will give valid results
491                  * while now done after front-porch. This will also deliver
492                  * page-flip completion events that have been queued to us
493                  * if a pageflip happened inside front-porch.
494                  */
495                 if (vrr_active) {
496                         drm_crtc_handle_vblank(&acrtc->base);
497
498                         /* BTR processing for pre-DCE12 ASICs */
499                         if (acrtc->dm_irq_params.stream &&
500                             adev->family < AMDGPU_FAMILY_AI) {
501                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502                                 mod_freesync_handle_v_update(
503                                     adev->dm.freesync_module,
504                                     acrtc->dm_irq_params.stream,
505                                     &acrtc->dm_irq_params.vrr_params);
506
507                                 dc_stream_adjust_vmin_vmax(
508                                     adev->dm.dc,
509                                     acrtc->dm_irq_params.stream,
510                                     &acrtc->dm_irq_params.vrr_params.adjust);
511                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512                         }
513                 }
514         }
515 }
516
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526         struct common_irq_params *irq_params = interrupt_params;
527         struct amdgpu_device *adev = irq_params->adev;
528         struct amdgpu_crtc *acrtc;
529         unsigned long flags;
530         int vrr_active;
531
532         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533         if (!acrtc)
534                 return;
535
536         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537
538         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539                       vrr_active, acrtc->dm_irq_params.active_planes);
540
541         /**
542          * Core vblank handling at start of front-porch is only possible
543          * in non-vrr mode, as only there vblank timestamping will give
544          * valid results while done in front-porch. Otherwise defer it
545          * to dm_vupdate_high_irq after end of front-porch.
546          */
547         if (!vrr_active)
548                 drm_crtc_handle_vblank(&acrtc->base);
549
550         /**
551          * Following stuff must happen at start of vblank, for crc
552          * computation and below-the-range btr support in vrr mode.
553          */
554         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555
556         /* BTR updates need to happen before VUPDATE on Vega and above. */
557         if (adev->family < AMDGPU_FAMILY_AI)
558                 return;
559
560         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561
562         if (acrtc->dm_irq_params.stream &&
563             acrtc->dm_irq_params.vrr_params.supported &&
564             acrtc->dm_irq_params.freesync_config.state ==
565                     VRR_STATE_ACTIVE_VARIABLE) {
566                 mod_freesync_handle_v_update(adev->dm.freesync_module,
567                                              acrtc->dm_irq_params.stream,
568                                              &acrtc->dm_irq_params.vrr_params);
569
570                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571                                            &acrtc->dm_irq_params.vrr_params.adjust);
572         }
573
574         /*
575          * If there aren't any active_planes then DCH HUBP may be clock-gated.
576          * In that case, pageflip completion interrupts won't fire and pageflip
577          * completion events won't get delivered. Prevent this by sending
578          * pending pageflip events from here if a flip is still pending.
579          *
580          * If any planes are enabled, use dm_pflip_high_irq() instead, to
581          * avoid race conditions between flip programming and completion,
582          * which could cause too early flip completion events.
583          */
584         if (adev->family >= AMDGPU_FAMILY_RV &&
585             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586             acrtc->dm_irq_params.active_planes == 0) {
587                 if (acrtc->event) {
588                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589                         acrtc->event = NULL;
590                         drm_crtc_vblank_put(&acrtc->base);
591                 }
592                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
593         }
594
595         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609         struct common_irq_params *irq_params = interrupt_params;
610         struct amdgpu_device *adev = irq_params->adev;
611         struct amdgpu_crtc *acrtc;
612
613         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614
615         if (!acrtc)
616                 return;
617
618         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622
623 static int dm_set_clockgating_state(void *handle,
624                   enum amd_clockgating_state state)
625 {
626         return 0;
627 }
628
629 static int dm_set_powergating_state(void *handle,
630                   enum amd_powergating_state state)
631 {
632         return 0;
633 }
634
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641         struct drm_device *dev = connector->dev;
642         struct amdgpu_device *adev = drm_to_adev(dev);
643         struct dm_compressor_info *compressor = &adev->dm.compressor;
644         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645         struct drm_display_mode *mode;
646         unsigned long max_size = 0;
647
648         if (adev->dm.dc->fbc_compressor == NULL)
649                 return;
650
651         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652                 return;
653
654         if (compressor->bo_ptr)
655                 return;
656
657
658         list_for_each_entry(mode, &connector->modes, head) {
659                 if (max_size < mode->htotal * mode->vtotal)
660                         max_size = mode->htotal * mode->vtotal;
661         }
662
663         if (max_size) {
664                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666                             &compressor->gpu_addr, &compressor->cpu_addr);
667
668                 if (r)
669                         DRM_ERROR("DM: Failed to initialize FBC\n");
670                 else {
671                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673                 }
674
675         }
676
677 }
678
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680                                           int pipe, bool *enabled,
681                                           unsigned char *buf, int max_bytes)
682 {
683         struct drm_device *dev = dev_get_drvdata(kdev);
684         struct amdgpu_device *adev = drm_to_adev(dev);
685         struct drm_connector *connector;
686         struct drm_connector_list_iter conn_iter;
687         struct amdgpu_dm_connector *aconnector;
688         int ret = 0;
689
690         *enabled = false;
691
692         mutex_lock(&adev->dm.audio_lock);
693
694         drm_connector_list_iter_begin(dev, &conn_iter);
695         drm_for_each_connector_iter(connector, &conn_iter) {
696                 aconnector = to_amdgpu_dm_connector(connector);
697                 if (aconnector->audio_inst != port)
698                         continue;
699
700                 *enabled = true;
701                 ret = drm_eld_size(connector->eld);
702                 memcpy(buf, connector->eld, min(max_bytes, ret));
703
704                 break;
705         }
706         drm_connector_list_iter_end(&conn_iter);
707
708         mutex_unlock(&adev->dm.audio_lock);
709
710         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711
712         return ret;
713 }
714
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716         .get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720                                        struct device *hda_kdev, void *data)
721 {
722         struct drm_device *dev = dev_get_drvdata(kdev);
723         struct amdgpu_device *adev = drm_to_adev(dev);
724         struct drm_audio_component *acomp = data;
725
726         acomp->ops = &amdgpu_dm_audio_component_ops;
727         acomp->dev = kdev;
728         adev->dm.audio_component = acomp;
729
730         return 0;
731 }
732
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734                                           struct device *hda_kdev, void *data)
735 {
736         struct drm_device *dev = dev_get_drvdata(kdev);
737         struct amdgpu_device *adev = drm_to_adev(dev);
738         struct drm_audio_component *acomp = data;
739
740         acomp->ops = NULL;
741         acomp->dev = NULL;
742         adev->dm.audio_component = NULL;
743 }
744
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746         .bind   = amdgpu_dm_audio_component_bind,
747         .unbind = amdgpu_dm_audio_component_unbind,
748 };
749
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752         int i, ret;
753
754         if (!amdgpu_audio)
755                 return 0;
756
757         adev->mode_info.audio.enabled = true;
758
759         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760
761         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762                 adev->mode_info.audio.pin[i].channels = -1;
763                 adev->mode_info.audio.pin[i].rate = -1;
764                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
765                 adev->mode_info.audio.pin[i].status_bits = 0;
766                 adev->mode_info.audio.pin[i].category_code = 0;
767                 adev->mode_info.audio.pin[i].connected = false;
768                 adev->mode_info.audio.pin[i].id =
769                         adev->dm.dc->res_pool->audios[i]->inst;
770                 adev->mode_info.audio.pin[i].offset = 0;
771         }
772
773         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774         if (ret < 0)
775                 return ret;
776
777         adev->dm.audio_registered = true;
778
779         return 0;
780 }
781
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784         if (!amdgpu_audio)
785                 return;
786
787         if (!adev->mode_info.audio.enabled)
788                 return;
789
790         if (adev->dm.audio_registered) {
791                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792                 adev->dm.audio_registered = false;
793         }
794
795         /* TODO: Disable audio? */
796
797         adev->mode_info.audio.enabled = false;
798 }
799
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802         struct drm_audio_component *acomp = adev->dm.audio_component;
803
804         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806
807                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808                                                  pin, -1);
809         }
810 }
811
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814         const struct dmcub_firmware_header_v1_0 *hdr;
815         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817         const struct firmware *dmub_fw = adev->dm.dmub_fw;
818         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819         struct abm *abm = adev->dm.dc->res_pool->abm;
820         struct dmub_srv_hw_params hw_params;
821         enum dmub_status status;
822         const unsigned char *fw_inst_const, *fw_bss_data;
823         uint32_t i, fw_inst_const_size, fw_bss_data_size;
824         bool has_hw_support;
825
826         if (!dmub_srv)
827                 /* DMUB isn't supported on the ASIC. */
828                 return 0;
829
830         if (!fb_info) {
831                 DRM_ERROR("No framebuffer info for DMUB service.\n");
832                 return -EINVAL;
833         }
834
835         if (!dmub_fw) {
836                 /* Firmware required for DMUB support. */
837                 DRM_ERROR("No firmware provided for DMUB.\n");
838                 return -EINVAL;
839         }
840
841         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842         if (status != DMUB_STATUS_OK) {
843                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844                 return -EINVAL;
845         }
846
847         if (!has_hw_support) {
848                 DRM_INFO("DMUB unsupported on ASIC\n");
849                 return 0;
850         }
851
852         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853
854         fw_inst_const = dmub_fw->data +
855                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856                         PSP_HEADER_BYTES;
857
858         fw_bss_data = dmub_fw->data +
859                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860                       le32_to_cpu(hdr->inst_const_bytes);
861
862         /* Copy firmware and bios info into FB memory. */
863         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865
866         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867
868         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869          * amdgpu_ucode_init_single_fw will load dmub firmware
870          * fw_inst_const part to cw0; otherwise, the firmware back door load
871          * will be done by dm_dmub_hw_init
872          */
873         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875                                 fw_inst_const_size);
876         }
877
878         if (fw_bss_data_size)
879                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880                        fw_bss_data, fw_bss_data_size);
881
882         /* Copy firmware bios info into FB memory. */
883         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884                adev->bios_size);
885
886         /* Reset regions that need to be reset. */
887         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889
890         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892
893         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895
896         /* Initialize hardware. */
897         memset(&hw_params, 0, sizeof(hw_params));
898         hw_params.fb_base = adev->gmc.fb_start;
899         hw_params.fb_offset = adev->gmc.aper_base;
900
901         /* backdoor load firmware and trigger dmub running */
902         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903                 hw_params.load_inst_const = true;
904
905         if (dmcu)
906                 hw_params.psp_version = dmcu->psp_version;
907
908         for (i = 0; i < fb_info->num_fb; ++i)
909                 hw_params.fb[i] = &fb_info->fb[i];
910
911         status = dmub_srv_hw_init(dmub_srv, &hw_params);
912         if (status != DMUB_STATUS_OK) {
913                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914                 return -EINVAL;
915         }
916
917         /* Wait for firmware load to finish. */
918         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919         if (status != DMUB_STATUS_OK)
920                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921
922         /* Init DMCU and ABM if available. */
923         if (dmcu && abm) {
924                 dmcu->funcs->dmcu_init(dmcu);
925                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926         }
927
928         if (!adev->dm.dc->ctx->dmub_srv)
929                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
930         if (!adev->dm.dc->ctx->dmub_srv) {
931                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
932                 return -ENOMEM;
933         }
934
935         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
936                  adev->dm.dmcub_fw_version);
937
938         return 0;
939 }
940
941 #if defined(CONFIG_DRM_AMD_DC_DCN)
942 #define DMUB_TRACE_MAX_READ 64
943 static void dm_dmub_trace_high_irq(void *interrupt_params)
944 {
945         struct common_irq_params *irq_params = interrupt_params;
946         struct amdgpu_device *adev = irq_params->adev;
947         struct amdgpu_display_manager *dm = &adev->dm;
948         struct dmcub_trace_buf_entry entry = { 0 };
949         uint32_t count = 0;
950
951         do {
952                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
953                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
954                                                         entry.param0, entry.param1);
955
956                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
957                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
958                 } else
959                         break;
960
961                 count++;
962
963         } while (count <= DMUB_TRACE_MAX_READ);
964
965         ASSERT(count <= DMUB_TRACE_MAX_READ);
966 }
967
968 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
969 {
970         uint64_t pt_base;
971         uint32_t logical_addr_low;
972         uint32_t logical_addr_high;
973         uint32_t agp_base, agp_bot, agp_top;
974         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
975
976         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
977         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
978
979         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
980                 /*
981                  * Raven2 has a HW issue that it is unable to use the vram which
982                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
983                  * workaround that increase system aperture high address (add 1)
984                  * to get rid of the VM fault and hardware hang.
985                  */
986                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
987         else
988                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
989
990         agp_base = 0;
991         agp_bot = adev->gmc.agp_start >> 24;
992         agp_top = adev->gmc.agp_end >> 24;
993
994
995         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
996         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
997         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
998         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
999         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1000         page_table_base.low_part = lower_32_bits(pt_base);
1001
1002         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1003         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1004
1005         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1006         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1007         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1008
1009         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1010         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1011         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1012
1013         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1014         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1015         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1016
1017         pa_config->is_hvm_enabled = 0;
1018
1019 }
1020 #endif
1021 #if defined(CONFIG_DRM_AMD_DC_DCN)
1022 static void event_mall_stutter(struct work_struct *work)
1023 {
1024
1025         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1026         struct amdgpu_display_manager *dm = vblank_work->dm;
1027
1028         mutex_lock(&dm->dc_lock);
1029
1030         if (vblank_work->enable)
1031                 dm->active_vblank_irq_count++;
1032         else if(dm->active_vblank_irq_count)
1033                 dm->active_vblank_irq_count--;
1034
1035         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1036
1037         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1038
1039         mutex_unlock(&dm->dc_lock);
1040 }
1041
1042 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1043 {
1044
1045         int max_caps = dc->caps.max_links;
1046         struct vblank_workqueue *vblank_work;
1047         int i = 0;
1048
1049         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1050         if (ZERO_OR_NULL_PTR(vblank_work)) {
1051                 kfree(vblank_work);
1052                 return NULL;
1053         }
1054
1055         for (i = 0; i < max_caps; i++)
1056                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1057
1058         return vblank_work;
1059 }
1060 #endif
1061 static int amdgpu_dm_init(struct amdgpu_device *adev)
1062 {
1063         struct dc_init_data init_data;
1064 #ifdef CONFIG_DRM_AMD_DC_HDCP
1065         struct dc_callback_init init_params;
1066 #endif
1067         int r;
1068
1069         adev->dm.ddev = adev_to_drm(adev);
1070         adev->dm.adev = adev;
1071
1072         /* Zero all the fields */
1073         memset(&init_data, 0, sizeof(init_data));
1074 #ifdef CONFIG_DRM_AMD_DC_HDCP
1075         memset(&init_params, 0, sizeof(init_params));
1076 #endif
1077
1078         mutex_init(&adev->dm.dc_lock);
1079         mutex_init(&adev->dm.audio_lock);
1080 #if defined(CONFIG_DRM_AMD_DC_DCN)
1081         spin_lock_init(&adev->dm.vblank_lock);
1082 #endif
1083
1084         if(amdgpu_dm_irq_init(adev)) {
1085                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1086                 goto error;
1087         }
1088
1089         init_data.asic_id.chip_family = adev->family;
1090
1091         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1092         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1093
1094         init_data.asic_id.vram_width = adev->gmc.vram_width;
1095         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1096         init_data.asic_id.atombios_base_address =
1097                 adev->mode_info.atom_context->bios;
1098
1099         init_data.driver = adev;
1100
1101         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1102
1103         if (!adev->dm.cgs_device) {
1104                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1105                 goto error;
1106         }
1107
1108         init_data.cgs_device = adev->dm.cgs_device;
1109
1110         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1111
1112         switch (adev->asic_type) {
1113         case CHIP_CARRIZO:
1114         case CHIP_STONEY:
1115         case CHIP_RAVEN:
1116         case CHIP_RENOIR:
1117                 init_data.flags.gpu_vm_support = true;
1118                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1119                         init_data.flags.disable_dmcu = true;
1120                 break;
1121 #if defined(CONFIG_DRM_AMD_DC_DCN)
1122         case CHIP_VANGOGH:
1123                 init_data.flags.gpu_vm_support = true;
1124                 break;
1125 #endif
1126         default:
1127                 break;
1128         }
1129
1130         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1131                 init_data.flags.fbc_support = true;
1132
1133         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1134                 init_data.flags.multi_mon_pp_mclk_switch = true;
1135
1136         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1137                 init_data.flags.disable_fractional_pwm = true;
1138
1139         init_data.flags.power_down_display_on_boot = true;
1140
1141         INIT_LIST_HEAD(&adev->dm.da_list);
1142         /* Display Core create. */
1143         adev->dm.dc = dc_create(&init_data);
1144
1145         if (adev->dm.dc) {
1146                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1147         } else {
1148                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1149                 goto error;
1150         }
1151
1152         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1153                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1154                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1155         }
1156
1157         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1158                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1159
1160         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1161                 adev->dm.dc->debug.disable_stutter = true;
1162
1163         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1164                 adev->dm.dc->debug.disable_dsc = true;
1165
1166         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1167                 adev->dm.dc->debug.disable_clock_gate = true;
1168
1169         r = dm_dmub_hw_init(adev);
1170         if (r) {
1171                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1172                 goto error;
1173         }
1174
1175         dc_hardware_init(adev->dm.dc);
1176
1177 #if defined(CONFIG_DRM_AMD_DC_DCN)
1178         if (adev->apu_flags) {
1179                 struct dc_phy_addr_space_config pa_config;
1180
1181                 mmhub_read_system_context(adev, &pa_config);
1182
1183                 // Call the DC init_memory func
1184                 dc_setup_system_context(adev->dm.dc, &pa_config);
1185         }
1186 #endif
1187
1188         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1189         if (!adev->dm.freesync_module) {
1190                 DRM_ERROR(
1191                 "amdgpu: failed to initialize freesync_module.\n");
1192         } else
1193                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1194                                 adev->dm.freesync_module);
1195
1196         amdgpu_dm_init_color_mod();
1197
1198 #if defined(CONFIG_DRM_AMD_DC_DCN)
1199         if (adev->dm.dc->caps.max_links > 0) {
1200                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1201
1202                 if (!adev->dm.vblank_workqueue)
1203                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1204                 else
1205                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1206         }
1207 #endif
1208
1209 #ifdef CONFIG_DRM_AMD_DC_HDCP
1210         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1211                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1212
1213                 if (!adev->dm.hdcp_workqueue)
1214                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1215                 else
1216                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1217
1218                 dc_init_callbacks(adev->dm.dc, &init_params);
1219         }
1220 #endif
1221 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1222         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1223 #endif
1224         if (amdgpu_dm_initialize_drm_device(adev)) {
1225                 DRM_ERROR(
1226                 "amdgpu: failed to initialize sw for display support.\n");
1227                 goto error;
1228         }
1229
1230         /* create fake encoders for MST */
1231         dm_dp_create_fake_mst_encoders(adev);
1232
1233         /* TODO: Add_display_info? */
1234
1235         /* TODO use dynamic cursor width */
1236         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1237         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1238
1239         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1240                 DRM_ERROR(
1241                 "amdgpu: failed to initialize sw for display support.\n");
1242                 goto error;
1243         }
1244
1245
1246         DRM_DEBUG_DRIVER("KMS initialized.\n");
1247
1248         return 0;
1249 error:
1250         amdgpu_dm_fini(adev);
1251
1252         return -EINVAL;
1253 }
1254
1255 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1256 {
1257         int i;
1258
1259         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1260                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1261         }
1262
1263         amdgpu_dm_audio_fini(adev);
1264
1265         amdgpu_dm_destroy_drm_device(&adev->dm);
1266
1267 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1268         if (adev->dm.crc_rd_wrk) {
1269                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1270                 kfree(adev->dm.crc_rd_wrk);
1271                 adev->dm.crc_rd_wrk = NULL;
1272         }
1273 #endif
1274 #ifdef CONFIG_DRM_AMD_DC_HDCP
1275         if (adev->dm.hdcp_workqueue) {
1276                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1277                 adev->dm.hdcp_workqueue = NULL;
1278         }
1279
1280         if (adev->dm.dc)
1281                 dc_deinit_callbacks(adev->dm.dc);
1282 #endif
1283
1284 #if defined(CONFIG_DRM_AMD_DC_DCN)
1285         if (adev->dm.vblank_workqueue) {
1286                 adev->dm.vblank_workqueue->dm = NULL;
1287                 kfree(adev->dm.vblank_workqueue);
1288                 adev->dm.vblank_workqueue = NULL;
1289         }
1290 #endif
1291
1292         if (adev->dm.dc->ctx->dmub_srv) {
1293                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1294                 adev->dm.dc->ctx->dmub_srv = NULL;
1295         }
1296
1297         if (adev->dm.dmub_bo)
1298                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1299                                       &adev->dm.dmub_bo_gpu_addr,
1300                                       &adev->dm.dmub_bo_cpu_addr);
1301
1302         /* DC Destroy TODO: Replace destroy DAL */
1303         if (adev->dm.dc)
1304                 dc_destroy(&adev->dm.dc);
1305         /*
1306          * TODO: pageflip, vlank interrupt
1307          *
1308          * amdgpu_dm_irq_fini(adev);
1309          */
1310
1311         if (adev->dm.cgs_device) {
1312                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1313                 adev->dm.cgs_device = NULL;
1314         }
1315         if (adev->dm.freesync_module) {
1316                 mod_freesync_destroy(adev->dm.freesync_module);
1317                 adev->dm.freesync_module = NULL;
1318         }
1319
1320         mutex_destroy(&adev->dm.audio_lock);
1321         mutex_destroy(&adev->dm.dc_lock);
1322
1323         return;
1324 }
1325
1326 static int load_dmcu_fw(struct amdgpu_device *adev)
1327 {
1328         const char *fw_name_dmcu = NULL;
1329         int r;
1330         const struct dmcu_firmware_header_v1_0 *hdr;
1331
1332         switch(adev->asic_type) {
1333 #if defined(CONFIG_DRM_AMD_DC_SI)
1334         case CHIP_TAHITI:
1335         case CHIP_PITCAIRN:
1336         case CHIP_VERDE:
1337         case CHIP_OLAND:
1338 #endif
1339         case CHIP_BONAIRE:
1340         case CHIP_HAWAII:
1341         case CHIP_KAVERI:
1342         case CHIP_KABINI:
1343         case CHIP_MULLINS:
1344         case CHIP_TONGA:
1345         case CHIP_FIJI:
1346         case CHIP_CARRIZO:
1347         case CHIP_STONEY:
1348         case CHIP_POLARIS11:
1349         case CHIP_POLARIS10:
1350         case CHIP_POLARIS12:
1351         case CHIP_VEGAM:
1352         case CHIP_VEGA10:
1353         case CHIP_VEGA12:
1354         case CHIP_VEGA20:
1355         case CHIP_NAVI10:
1356         case CHIP_NAVI14:
1357         case CHIP_RENOIR:
1358         case CHIP_SIENNA_CICHLID:
1359         case CHIP_NAVY_FLOUNDER:
1360         case CHIP_DIMGREY_CAVEFISH:
1361         case CHIP_VANGOGH:
1362                 return 0;
1363         case CHIP_NAVI12:
1364                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1365                 break;
1366         case CHIP_RAVEN:
1367                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1368                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1369                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1370                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1371                 else
1372                         return 0;
1373                 break;
1374         default:
1375                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1376                 return -EINVAL;
1377         }
1378
1379         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1380                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1381                 return 0;
1382         }
1383
1384         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1385         if (r == -ENOENT) {
1386                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1387                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1388                 adev->dm.fw_dmcu = NULL;
1389                 return 0;
1390         }
1391         if (r) {
1392                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1393                         fw_name_dmcu);
1394                 return r;
1395         }
1396
1397         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1398         if (r) {
1399                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1400                         fw_name_dmcu);
1401                 release_firmware(adev->dm.fw_dmcu);
1402                 adev->dm.fw_dmcu = NULL;
1403                 return r;
1404         }
1405
1406         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1407         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1408         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1409         adev->firmware.fw_size +=
1410                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1411
1412         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1413         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1414         adev->firmware.fw_size +=
1415                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1416
1417         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1418
1419         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1420
1421         return 0;
1422 }
1423
1424 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1425 {
1426         struct amdgpu_device *adev = ctx;
1427
1428         return dm_read_reg(adev->dm.dc->ctx, address);
1429 }
1430
1431 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1432                                      uint32_t value)
1433 {
1434         struct amdgpu_device *adev = ctx;
1435
1436         return dm_write_reg(adev->dm.dc->ctx, address, value);
1437 }
1438
1439 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1440 {
1441         struct dmub_srv_create_params create_params;
1442         struct dmub_srv_region_params region_params;
1443         struct dmub_srv_region_info region_info;
1444         struct dmub_srv_fb_params fb_params;
1445         struct dmub_srv_fb_info *fb_info;
1446         struct dmub_srv *dmub_srv;
1447         const struct dmcub_firmware_header_v1_0 *hdr;
1448         const char *fw_name_dmub;
1449         enum dmub_asic dmub_asic;
1450         enum dmub_status status;
1451         int r;
1452
1453         switch (adev->asic_type) {
1454         case CHIP_RENOIR:
1455                 dmub_asic = DMUB_ASIC_DCN21;
1456                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1457                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1458                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1459                 break;
1460         case CHIP_SIENNA_CICHLID:
1461                 dmub_asic = DMUB_ASIC_DCN30;
1462                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1463                 break;
1464         case CHIP_NAVY_FLOUNDER:
1465                 dmub_asic = DMUB_ASIC_DCN30;
1466                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1467                 break;
1468         case CHIP_VANGOGH:
1469                 dmub_asic = DMUB_ASIC_DCN301;
1470                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1471                 break;
1472         case CHIP_DIMGREY_CAVEFISH:
1473                 dmub_asic = DMUB_ASIC_DCN302;
1474                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1475                 break;
1476
1477         default:
1478                 /* ASIC doesn't support DMUB. */
1479                 return 0;
1480         }
1481
1482         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1483         if (r) {
1484                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1485                 return 0;
1486         }
1487
1488         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1489         if (r) {
1490                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1491                 return 0;
1492         }
1493
1494         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1495
1496         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1497                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1498                         AMDGPU_UCODE_ID_DMCUB;
1499                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1500                         adev->dm.dmub_fw;
1501                 adev->firmware.fw_size +=
1502                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1503
1504                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1505                          adev->dm.dmcub_fw_version);
1506         }
1507
1508         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1509
1510         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1511         dmub_srv = adev->dm.dmub_srv;
1512
1513         if (!dmub_srv) {
1514                 DRM_ERROR("Failed to allocate DMUB service!\n");
1515                 return -ENOMEM;
1516         }
1517
1518         memset(&create_params, 0, sizeof(create_params));
1519         create_params.user_ctx = adev;
1520         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1521         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1522         create_params.asic = dmub_asic;
1523
1524         /* Create the DMUB service. */
1525         status = dmub_srv_create(dmub_srv, &create_params);
1526         if (status != DMUB_STATUS_OK) {
1527                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1528                 return -EINVAL;
1529         }
1530
1531         /* Calculate the size of all the regions for the DMUB service. */
1532         memset(&region_params, 0, sizeof(region_params));
1533
1534         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1535                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1536         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1537         region_params.vbios_size = adev->bios_size;
1538         region_params.fw_bss_data = region_params.bss_data_size ?
1539                 adev->dm.dmub_fw->data +
1540                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1541                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1542         region_params.fw_inst_const =
1543                 adev->dm.dmub_fw->data +
1544                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1545                 PSP_HEADER_BYTES;
1546
1547         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1548                                            &region_info);
1549
1550         if (status != DMUB_STATUS_OK) {
1551                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1552                 return -EINVAL;
1553         }
1554
1555         /*
1556          * Allocate a framebuffer based on the total size of all the regions.
1557          * TODO: Move this into GART.
1558          */
1559         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1560                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1561                                     &adev->dm.dmub_bo_gpu_addr,
1562                                     &adev->dm.dmub_bo_cpu_addr);
1563         if (r)
1564                 return r;
1565
1566         /* Rebase the regions on the framebuffer address. */
1567         memset(&fb_params, 0, sizeof(fb_params));
1568         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1569         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1570         fb_params.region_info = &region_info;
1571
1572         adev->dm.dmub_fb_info =
1573                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1574         fb_info = adev->dm.dmub_fb_info;
1575
1576         if (!fb_info) {
1577                 DRM_ERROR(
1578                         "Failed to allocate framebuffer info for DMUB service!\n");
1579                 return -ENOMEM;
1580         }
1581
1582         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1583         if (status != DMUB_STATUS_OK) {
1584                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1585                 return -EINVAL;
1586         }
1587
1588         return 0;
1589 }
1590
1591 static int dm_sw_init(void *handle)
1592 {
1593         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1594         int r;
1595
1596         r = dm_dmub_sw_init(adev);
1597         if (r)
1598                 return r;
1599
1600         return load_dmcu_fw(adev);
1601 }
1602
1603 static int dm_sw_fini(void *handle)
1604 {
1605         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1606
1607         kfree(adev->dm.dmub_fb_info);
1608         adev->dm.dmub_fb_info = NULL;
1609
1610         if (adev->dm.dmub_srv) {
1611                 dmub_srv_destroy(adev->dm.dmub_srv);
1612                 adev->dm.dmub_srv = NULL;
1613         }
1614
1615         release_firmware(adev->dm.dmub_fw);
1616         adev->dm.dmub_fw = NULL;
1617
1618         release_firmware(adev->dm.fw_dmcu);
1619         adev->dm.fw_dmcu = NULL;
1620
1621         return 0;
1622 }
1623
1624 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1625 {
1626         struct amdgpu_dm_connector *aconnector;
1627         struct drm_connector *connector;
1628         struct drm_connector_list_iter iter;
1629         int ret = 0;
1630
1631         drm_connector_list_iter_begin(dev, &iter);
1632         drm_for_each_connector_iter(connector, &iter) {
1633                 aconnector = to_amdgpu_dm_connector(connector);
1634                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1635                     aconnector->mst_mgr.aux) {
1636                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1637                                          aconnector,
1638                                          aconnector->base.base.id);
1639
1640                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1641                         if (ret < 0) {
1642                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1643                                 aconnector->dc_link->type =
1644                                         dc_connection_single;
1645                                 break;
1646                         }
1647                 }
1648         }
1649         drm_connector_list_iter_end(&iter);
1650
1651         return ret;
1652 }
1653
1654 static int dm_late_init(void *handle)
1655 {
1656         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657
1658         struct dmcu_iram_parameters params;
1659         unsigned int linear_lut[16];
1660         int i;
1661         struct dmcu *dmcu = NULL;
1662         bool ret = true;
1663
1664         dmcu = adev->dm.dc->res_pool->dmcu;
1665
1666         for (i = 0; i < 16; i++)
1667                 linear_lut[i] = 0xFFFF * i / 15;
1668
1669         params.set = 0;
1670         params.backlight_ramping_start = 0xCCCC;
1671         params.backlight_ramping_reduction = 0xCCCCCCCC;
1672         params.backlight_lut_array_size = 16;
1673         params.backlight_lut_array = linear_lut;
1674
1675         /* Min backlight level after ABM reduction,  Don't allow below 1%
1676          * 0xFFFF x 0.01 = 0x28F
1677          */
1678         params.min_abm_backlight = 0x28F;
1679
1680         /* In the case where abm is implemented on dmcub,
1681          * dmcu object will be null.
1682          * ABM 2.4 and up are implemented on dmcub.
1683          */
1684         if (dmcu)
1685                 ret = dmcu_load_iram(dmcu, params);
1686         else if (adev->dm.dc->ctx->dmub_srv)
1687                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1688
1689         if (!ret)
1690                 return -EINVAL;
1691
1692         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1693 }
1694
1695 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1696 {
1697         struct amdgpu_dm_connector *aconnector;
1698         struct drm_connector *connector;
1699         struct drm_connector_list_iter iter;
1700         struct drm_dp_mst_topology_mgr *mgr;
1701         int ret;
1702         bool need_hotplug = false;
1703
1704         drm_connector_list_iter_begin(dev, &iter);
1705         drm_for_each_connector_iter(connector, &iter) {
1706                 aconnector = to_amdgpu_dm_connector(connector);
1707                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1708                     aconnector->mst_port)
1709                         continue;
1710
1711                 mgr = &aconnector->mst_mgr;
1712
1713                 if (suspend) {
1714                         drm_dp_mst_topology_mgr_suspend(mgr);
1715                 } else {
1716                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1717                         if (ret < 0) {
1718                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1719                                 need_hotplug = true;
1720                         }
1721                 }
1722         }
1723         drm_connector_list_iter_end(&iter);
1724
1725         if (need_hotplug)
1726                 drm_kms_helper_hotplug_event(dev);
1727 }
1728
1729 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1730 {
1731         struct smu_context *smu = &adev->smu;
1732         int ret = 0;
1733
1734         if (!is_support_sw_smu(adev))
1735                 return 0;
1736
1737         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1738          * on window driver dc implementation.
1739          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1740          * should be passed to smu during boot up and resume from s3.
1741          * boot up: dc calculate dcn watermark clock settings within dc_create,
1742          * dcn20_resource_construct
1743          * then call pplib functions below to pass the settings to smu:
1744          * smu_set_watermarks_for_clock_ranges
1745          * smu_set_watermarks_table
1746          * navi10_set_watermarks_table
1747          * smu_write_watermarks_table
1748          *
1749          * For Renoir, clock settings of dcn watermark are also fixed values.
1750          * dc has implemented different flow for window driver:
1751          * dc_hardware_init / dc_set_power_state
1752          * dcn10_init_hw
1753          * notify_wm_ranges
1754          * set_wm_ranges
1755          * -- Linux
1756          * smu_set_watermarks_for_clock_ranges
1757          * renoir_set_watermarks_table
1758          * smu_write_watermarks_table
1759          *
1760          * For Linux,
1761          * dc_hardware_init -> amdgpu_dm_init
1762          * dc_set_power_state --> dm_resume
1763          *
1764          * therefore, this function apply to navi10/12/14 but not Renoir
1765          * *
1766          */
1767         switch(adev->asic_type) {
1768         case CHIP_NAVI10:
1769         case CHIP_NAVI14:
1770         case CHIP_NAVI12:
1771                 break;
1772         default:
1773                 return 0;
1774         }
1775
1776         ret = smu_write_watermarks_table(smu);
1777         if (ret) {
1778                 DRM_ERROR("Failed to update WMTABLE!\n");
1779                 return ret;
1780         }
1781
1782         return 0;
1783 }
1784
1785 /**
1786  * dm_hw_init() - Initialize DC device
1787  * @handle: The base driver device containing the amdgpu_dm device.
1788  *
1789  * Initialize the &struct amdgpu_display_manager device. This involves calling
1790  * the initializers of each DM component, then populating the struct with them.
1791  *
1792  * Although the function implies hardware initialization, both hardware and
1793  * software are initialized here. Splitting them out to their relevant init
1794  * hooks is a future TODO item.
1795  *
1796  * Some notable things that are initialized here:
1797  *
1798  * - Display Core, both software and hardware
1799  * - DC modules that we need (freesync and color management)
1800  * - DRM software states
1801  * - Interrupt sources and handlers
1802  * - Vblank support
1803  * - Debug FS entries, if enabled
1804  */
1805 static int dm_hw_init(void *handle)
1806 {
1807         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1808         /* Create DAL display manager */
1809         amdgpu_dm_init(adev);
1810         amdgpu_dm_hpd_init(adev);
1811
1812         return 0;
1813 }
1814
1815 /**
1816  * dm_hw_fini() - Teardown DC device
1817  * @handle: The base driver device containing the amdgpu_dm device.
1818  *
1819  * Teardown components within &struct amdgpu_display_manager that require
1820  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1821  * were loaded. Also flush IRQ workqueues and disable them.
1822  */
1823 static int dm_hw_fini(void *handle)
1824 {
1825         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1826
1827         amdgpu_dm_hpd_fini(adev);
1828
1829         amdgpu_dm_irq_fini(adev);
1830         amdgpu_dm_fini(adev);
1831         return 0;
1832 }
1833
1834
1835 static int dm_enable_vblank(struct drm_crtc *crtc);
1836 static void dm_disable_vblank(struct drm_crtc *crtc);
1837
1838 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1839                                  struct dc_state *state, bool enable)
1840 {
1841         enum dc_irq_source irq_source;
1842         struct amdgpu_crtc *acrtc;
1843         int rc = -EBUSY;
1844         int i = 0;
1845
1846         for (i = 0; i < state->stream_count; i++) {
1847                 acrtc = get_crtc_by_otg_inst(
1848                                 adev, state->stream_status[i].primary_otg_inst);
1849
1850                 if (acrtc && state->stream_status[i].plane_count != 0) {
1851                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1852                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1853                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1854                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1855                         if (rc)
1856                                 DRM_WARN("Failed to %s pflip interrupts\n",
1857                                          enable ? "enable" : "disable");
1858
1859                         if (enable) {
1860                                 rc = dm_enable_vblank(&acrtc->base);
1861                                 if (rc)
1862                                         DRM_WARN("Failed to enable vblank interrupts\n");
1863                         } else {
1864                                 dm_disable_vblank(&acrtc->base);
1865                         }
1866
1867                 }
1868         }
1869
1870 }
1871
1872 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1873 {
1874         struct dc_state *context = NULL;
1875         enum dc_status res = DC_ERROR_UNEXPECTED;
1876         int i;
1877         struct dc_stream_state *del_streams[MAX_PIPES];
1878         int del_streams_count = 0;
1879
1880         memset(del_streams, 0, sizeof(del_streams));
1881
1882         context = dc_create_state(dc);
1883         if (context == NULL)
1884                 goto context_alloc_fail;
1885
1886         dc_resource_state_copy_construct_current(dc, context);
1887
1888         /* First remove from context all streams */
1889         for (i = 0; i < context->stream_count; i++) {
1890                 struct dc_stream_state *stream = context->streams[i];
1891
1892                 del_streams[del_streams_count++] = stream;
1893         }
1894
1895         /* Remove all planes for removed streams and then remove the streams */
1896         for (i = 0; i < del_streams_count; i++) {
1897                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1898                         res = DC_FAIL_DETACH_SURFACES;
1899                         goto fail;
1900                 }
1901
1902                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1903                 if (res != DC_OK)
1904                         goto fail;
1905         }
1906
1907
1908         res = dc_validate_global_state(dc, context, false);
1909
1910         if (res != DC_OK) {
1911                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1912                 goto fail;
1913         }
1914
1915         res = dc_commit_state(dc, context);
1916
1917 fail:
1918         dc_release_state(context);
1919
1920 context_alloc_fail:
1921         return res;
1922 }
1923
1924 static int dm_suspend(void *handle)
1925 {
1926         struct amdgpu_device *adev = handle;
1927         struct amdgpu_display_manager *dm = &adev->dm;
1928         int ret = 0;
1929
1930         if (amdgpu_in_reset(adev)) {
1931                 mutex_lock(&dm->dc_lock);
1932
1933 #if defined(CONFIG_DRM_AMD_DC_DCN)
1934                 dc_allow_idle_optimizations(adev->dm.dc, false);
1935 #endif
1936
1937                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1938
1939                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1940
1941                 amdgpu_dm_commit_zero_streams(dm->dc);
1942
1943                 amdgpu_dm_irq_suspend(adev);
1944
1945                 return ret;
1946         }
1947
1948 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1949         amdgpu_dm_crtc_secure_display_suspend(adev);
1950 #endif
1951         WARN_ON(adev->dm.cached_state);
1952         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1953
1954         s3_handle_mst(adev_to_drm(adev), true);
1955
1956         amdgpu_dm_irq_suspend(adev);
1957
1958         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959
1960         return 0;
1961 }
1962
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965                                              struct drm_crtc *crtc)
1966 {
1967         uint32_t i;
1968         struct drm_connector_state *new_con_state;
1969         struct drm_connector *connector;
1970         struct drm_crtc *crtc_from_state;
1971
1972         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973                 crtc_from_state = new_con_state->crtc;
1974
1975                 if (crtc_from_state == crtc)
1976                         return to_amdgpu_dm_connector(connector);
1977         }
1978
1979         return NULL;
1980 }
1981
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984         struct dc_sink_init_data sink_init_data = { 0 };
1985         struct display_sink_capability sink_caps = { 0 };
1986         enum dc_edid_status edid_status;
1987         struct dc_context *dc_ctx = link->ctx;
1988         struct dc_sink *sink = NULL;
1989         struct dc_sink *prev_sink = NULL;
1990
1991         link->type = dc_connection_none;
1992         prev_sink = link->local_sink;
1993
1994         if (prev_sink)
1995                 dc_sink_release(prev_sink);
1996
1997         switch (link->connector_signal) {
1998         case SIGNAL_TYPE_HDMI_TYPE_A: {
1999                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001                 break;
2002         }
2003
2004         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007                 break;
2008         }
2009
2010         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013                 break;
2014         }
2015
2016         case SIGNAL_TYPE_LVDS: {
2017                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2019                 break;
2020         }
2021
2022         case SIGNAL_TYPE_EDP: {
2023                 sink_caps.transaction_type =
2024                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025                 sink_caps.signal = SIGNAL_TYPE_EDP;
2026                 break;
2027         }
2028
2029         case SIGNAL_TYPE_DISPLAY_PORT: {
2030                 sink_caps.transaction_type =
2031                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033                 break;
2034         }
2035
2036         default:
2037                 DC_ERROR("Invalid connector type! signal:%d\n",
2038                         link->connector_signal);
2039                 return;
2040         }
2041
2042         sink_init_data.link = link;
2043         sink_init_data.sink_signal = sink_caps.signal;
2044
2045         sink = dc_sink_create(&sink_init_data);
2046         if (!sink) {
2047                 DC_ERROR("Failed to create sink!\n");
2048                 return;
2049         }
2050
2051         /* dc_sink_create returns a new reference */
2052         link->local_sink = sink;
2053
2054         edid_status = dm_helpers_read_local_edid(
2055                         link->ctx,
2056                         link,
2057                         sink);
2058
2059         if (edid_status != EDID_OK)
2060                 DC_ERROR("Failed to read EDID");
2061
2062 }
2063
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065                                      struct amdgpu_display_manager *dm)
2066 {
2067         struct {
2068                 struct dc_surface_update surface_updates[MAX_SURFACES];
2069                 struct dc_plane_info plane_infos[MAX_SURFACES];
2070                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072                 struct dc_stream_update stream_update;
2073         } * bundle;
2074         int k, m;
2075
2076         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077
2078         if (!bundle) {
2079                 dm_error("Failed to allocate update bundle\n");
2080                 goto cleanup;
2081         }
2082
2083         for (k = 0; k < dc_state->stream_count; k++) {
2084                 bundle->stream_update.stream = dc_state->streams[k];
2085
2086                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087                         bundle->surface_updates[m].surface =
2088                                 dc_state->stream_status->plane_states[m];
2089                         bundle->surface_updates[m].surface->force_full_update =
2090                                 true;
2091                 }
2092                 dc_commit_updates_for_stream(
2093                         dm->dc, bundle->surface_updates,
2094                         dc_state->stream_status->plane_count,
2095                         dc_state->streams[k], &bundle->stream_update, dc_state);
2096         }
2097
2098 cleanup:
2099         kfree(bundle);
2100
2101         return;
2102 }
2103
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106         struct dc_stream_state *stream_state;
2107         struct amdgpu_dm_connector *aconnector = link->priv;
2108         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109         struct dc_stream_update stream_update;
2110         bool dpms_off = true;
2111
2112         memset(&stream_update, 0, sizeof(stream_update));
2113         stream_update.dpms_off = &dpms_off;
2114
2115         mutex_lock(&adev->dm.dc_lock);
2116         stream_state = dc_stream_find_from_link(link);
2117
2118         if (stream_state == NULL) {
2119                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120                 mutex_unlock(&adev->dm.dc_lock);
2121                 return;
2122         }
2123
2124         stream_update.stream = stream_state;
2125         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126                                      stream_state, &stream_update,
2127                                      stream_state->ctx->dc->current_state);
2128         mutex_unlock(&adev->dm.dc_lock);
2129 }
2130
2131 static int dm_resume(void *handle)
2132 {
2133         struct amdgpu_device *adev = handle;
2134         struct drm_device *ddev = adev_to_drm(adev);
2135         struct amdgpu_display_manager *dm = &adev->dm;
2136         struct amdgpu_dm_connector *aconnector;
2137         struct drm_connector *connector;
2138         struct drm_connector_list_iter iter;
2139         struct drm_crtc *crtc;
2140         struct drm_crtc_state *new_crtc_state;
2141         struct dm_crtc_state *dm_new_crtc_state;
2142         struct drm_plane *plane;
2143         struct drm_plane_state *new_plane_state;
2144         struct dm_plane_state *dm_new_plane_state;
2145         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146         enum dc_connection_type new_connection_type = dc_connection_none;
2147         struct dc_state *dc_state;
2148         int i, r, j;
2149
2150         if (amdgpu_in_reset(adev)) {
2151                 dc_state = dm->cached_dc_state;
2152
2153                 r = dm_dmub_hw_init(adev);
2154                 if (r)
2155                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156
2157                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158                 dc_resume(dm->dc);
2159
2160                 amdgpu_dm_irq_resume_early(adev);
2161
2162                 for (i = 0; i < dc_state->stream_count; i++) {
2163                         dc_state->streams[i]->mode_changed = true;
2164                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2166                                         = 0xffffffff;
2167                         }
2168                 }
2169
2170                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171
2172                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173
2174                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175
2176                 dc_release_state(dm->cached_dc_state);
2177                 dm->cached_dc_state = NULL;
2178
2179                 amdgpu_dm_irq_resume_late(adev);
2180
2181                 mutex_unlock(&dm->dc_lock);
2182
2183                 return 0;
2184         }
2185         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186         dc_release_state(dm_state->context);
2187         dm_state->context = dc_create_state(dm->dc);
2188         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189         dc_resource_state_construct(dm->dc, dm_state->context);
2190
2191         /* Before powering on DC we need to re-initialize DMUB. */
2192         r = dm_dmub_hw_init(adev);
2193         if (r)
2194                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195
2196         /* power on hardware */
2197         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198
2199         /* program HPD filter */
2200         dc_resume(dm->dc);
2201
2202         /*
2203          * early enable HPD Rx IRQ, should be done before set mode as short
2204          * pulse interrupts are used for MST
2205          */
2206         amdgpu_dm_irq_resume_early(adev);
2207
2208         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2209         s3_handle_mst(ddev, false);
2210
2211         /* Do detection*/
2212         drm_connector_list_iter_begin(ddev, &iter);
2213         drm_for_each_connector_iter(connector, &iter) {
2214                 aconnector = to_amdgpu_dm_connector(connector);
2215
2216                 /*
2217                  * this is the case when traversing through already created
2218                  * MST connectors, should be skipped
2219                  */
2220                 if (aconnector->mst_port)
2221                         continue;
2222
2223                 mutex_lock(&aconnector->hpd_lock);
2224                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225                         DRM_ERROR("KMS: Failed to detect connector\n");
2226
2227                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2228                         emulated_link_detect(aconnector->dc_link);
2229                 else
2230                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231
2232                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233                         aconnector->fake_enable = false;
2234
2235                 if (aconnector->dc_sink)
2236                         dc_sink_release(aconnector->dc_sink);
2237                 aconnector->dc_sink = NULL;
2238                 amdgpu_dm_update_connector_after_detect(aconnector);
2239                 mutex_unlock(&aconnector->hpd_lock);
2240         }
2241         drm_connector_list_iter_end(&iter);
2242
2243         /* Force mode set in atomic commit */
2244         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245                 new_crtc_state->active_changed = true;
2246
2247         /*
2248          * atomic_check is expected to create the dc states. We need to release
2249          * them here, since they were duplicated as part of the suspend
2250          * procedure.
2251          */
2252         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254                 if (dm_new_crtc_state->stream) {
2255                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256                         dc_stream_release(dm_new_crtc_state->stream);
2257                         dm_new_crtc_state->stream = NULL;
2258                 }
2259         }
2260
2261         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263                 if (dm_new_plane_state->dc_state) {
2264                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265                         dc_plane_state_release(dm_new_plane_state->dc_state);
2266                         dm_new_plane_state->dc_state = NULL;
2267                 }
2268         }
2269
2270         drm_atomic_helper_resume(ddev, dm->cached_state);
2271
2272         dm->cached_state = NULL;
2273
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275         amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277
2278         amdgpu_dm_irq_resume_late(adev);
2279
2280         amdgpu_dm_smu_write_watermarks_table(adev);
2281
2282         return 0;
2283 }
2284
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296         .name = "dm",
2297         .early_init = dm_early_init,
2298         .late_init = dm_late_init,
2299         .sw_init = dm_sw_init,
2300         .sw_fini = dm_sw_fini,
2301         .hw_init = dm_hw_init,
2302         .hw_fini = dm_hw_fini,
2303         .suspend = dm_suspend,
2304         .resume = dm_resume,
2305         .is_idle = dm_is_idle,
2306         .wait_for_idle = dm_wait_for_idle,
2307         .check_soft_reset = dm_check_soft_reset,
2308         .soft_reset = dm_soft_reset,
2309         .set_clockgating_state = dm_set_clockgating_state,
2310         .set_powergating_state = dm_set_powergating_state,
2311 };
2312
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315         .type = AMD_IP_BLOCK_TYPE_DCE,
2316         .major = 1,
2317         .minor = 0,
2318         .rev = 0,
2319         .funcs = &amdgpu_dm_funcs,
2320 };
2321
2322
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330         .fb_create = amdgpu_display_user_framebuffer_create,
2331         .get_format_info = amd_get_format_info,
2332         .output_poll_changed = drm_fb_helper_output_poll_changed,
2333         .atomic_check = amdgpu_dm_atomic_check,
2334         .atomic_commit = drm_atomic_helper_commit,
2335 };
2336
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343         u32 max_cll, min_cll, max, min, q, r;
2344         struct amdgpu_dm_backlight_caps *caps;
2345         struct amdgpu_display_manager *dm;
2346         struct drm_connector *conn_base;
2347         struct amdgpu_device *adev;
2348         struct dc_link *link = NULL;
2349         static const u8 pre_computed_values[] = {
2350                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352
2353         if (!aconnector || !aconnector->dc_link)
2354                 return;
2355
2356         link = aconnector->dc_link;
2357         if (link->connector_signal != SIGNAL_TYPE_EDP)
2358                 return;
2359
2360         conn_base = &aconnector->base;
2361         adev = drm_to_adev(conn_base->dev);
2362         dm = &adev->dm;
2363         caps = &dm->backlight_caps;
2364         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365         caps->aux_support = false;
2366         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368
2369         if (caps->ext_caps->bits.oled == 1 ||
2370             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372                 caps->aux_support = true;
2373
2374         if (amdgpu_backlight == 0)
2375                 caps->aux_support = false;
2376         else if (amdgpu_backlight == 1)
2377                 caps->aux_support = true;
2378
2379         /* From the specification (CTA-861-G), for calculating the maximum
2380          * luminance we need to use:
2381          *      Luminance = 50*2**(CV/32)
2382          * Where CV is a one-byte value.
2383          * For calculating this expression we may need float point precision;
2384          * to avoid this complexity level, we take advantage that CV is divided
2385          * by a constant. From the Euclids division algorithm, we know that CV
2386          * can be written as: CV = 32*q + r. Next, we replace CV in the
2387          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388          * need to pre-compute the value of r/32. For pre-computing the values
2389          * We just used the following Ruby line:
2390          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391          * The results of the above expressions can be verified at
2392          * pre_computed_values.
2393          */
2394         q = max_cll >> 5;
2395         r = max_cll % 32;
2396         max = (1 << q) * pre_computed_values[r];
2397
2398         // min luminance: maxLum * (CV/255)^2 / 100
2399         q = DIV_ROUND_CLOSEST(min_cll, 255);
2400         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401
2402         caps->aux_max_input_signal = max;
2403         caps->aux_min_input_signal = min;
2404 }
2405
2406 void amdgpu_dm_update_connector_after_detect(
2407                 struct amdgpu_dm_connector *aconnector)
2408 {
2409         struct drm_connector *connector = &aconnector->base;
2410         struct drm_device *dev = connector->dev;
2411         struct dc_sink *sink;
2412
2413         /* MST handled by drm_mst framework */
2414         if (aconnector->mst_mgr.mst_state == true)
2415                 return;
2416
2417         sink = aconnector->dc_link->local_sink;
2418         if (sink)
2419                 dc_sink_retain(sink);
2420
2421         /*
2422          * Edid mgmt connector gets first update only in mode_valid hook and then
2423          * the connector sink is set to either fake or physical sink depends on link status.
2424          * Skip if already done during boot.
2425          */
2426         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427                         && aconnector->dc_em_sink) {
2428
2429                 /*
2430                  * For S3 resume with headless use eml_sink to fake stream
2431                  * because on resume connector->sink is set to NULL
2432                  */
2433                 mutex_lock(&dev->mode_config.mutex);
2434
2435                 if (sink) {
2436                         if (aconnector->dc_sink) {
2437                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2438                                 /*
2439                                  * retain and release below are used to
2440                                  * bump up refcount for sink because the link doesn't point
2441                                  * to it anymore after disconnect, so on next crtc to connector
2442                                  * reshuffle by UMD we will get into unwanted dc_sink release
2443                                  */
2444                                 dc_sink_release(aconnector->dc_sink);
2445                         }
2446                         aconnector->dc_sink = sink;
2447                         dc_sink_retain(aconnector->dc_sink);
2448                         amdgpu_dm_update_freesync_caps(connector,
2449                                         aconnector->edid);
2450                 } else {
2451                         amdgpu_dm_update_freesync_caps(connector, NULL);
2452                         if (!aconnector->dc_sink) {
2453                                 aconnector->dc_sink = aconnector->dc_em_sink;
2454                                 dc_sink_retain(aconnector->dc_sink);
2455                         }
2456                 }
2457
2458                 mutex_unlock(&dev->mode_config.mutex);
2459
2460                 if (sink)
2461                         dc_sink_release(sink);
2462                 return;
2463         }
2464
2465         /*
2466          * TODO: temporary guard to look for proper fix
2467          * if this sink is MST sink, we should not do anything
2468          */
2469         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470                 dc_sink_release(sink);
2471                 return;
2472         }
2473
2474         if (aconnector->dc_sink == sink) {
2475                 /*
2476                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477                  * Do nothing!!
2478                  */
2479                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480                                 aconnector->connector_id);
2481                 if (sink)
2482                         dc_sink_release(sink);
2483                 return;
2484         }
2485
2486         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487                 aconnector->connector_id, aconnector->dc_sink, sink);
2488
2489         mutex_lock(&dev->mode_config.mutex);
2490
2491         /*
2492          * 1. Update status of the drm connector
2493          * 2. Send an event and let userspace tell us what to do
2494          */
2495         if (sink) {
2496                 /*
2497                  * TODO: check if we still need the S3 mode update workaround.
2498                  * If yes, put it here.
2499                  */
2500                 if (aconnector->dc_sink) {
2501                         amdgpu_dm_update_freesync_caps(connector, NULL);
2502                         dc_sink_release(aconnector->dc_sink);
2503                 }
2504
2505                 aconnector->dc_sink = sink;
2506                 dc_sink_retain(aconnector->dc_sink);
2507                 if (sink->dc_edid.length == 0) {
2508                         aconnector->edid = NULL;
2509                         if (aconnector->dc_link->aux_mode) {
2510                                 drm_dp_cec_unset_edid(
2511                                         &aconnector->dm_dp_aux.aux);
2512                         }
2513                 } else {
2514                         aconnector->edid =
2515                                 (struct edid *)sink->dc_edid.raw_edid;
2516
2517                         drm_connector_update_edid_property(connector,
2518                                                            aconnector->edid);
2519                         if (aconnector->dc_link->aux_mode)
2520                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521                                                     aconnector->edid);
2522                 }
2523
2524                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525                 update_connector_ext_caps(aconnector);
2526         } else {
2527                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528                 amdgpu_dm_update_freesync_caps(connector, NULL);
2529                 drm_connector_update_edid_property(connector, NULL);
2530                 aconnector->num_modes = 0;
2531                 dc_sink_release(aconnector->dc_sink);
2532                 aconnector->dc_sink = NULL;
2533                 aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539         }
2540
2541         mutex_unlock(&dev->mode_config.mutex);
2542
2543         update_subconnector_property(aconnector);
2544
2545         if (sink)
2546                 dc_sink_release(sink);
2547 }
2548
2549 static void handle_hpd_irq(void *param)
2550 {
2551         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552         struct drm_connector *connector = &aconnector->base;
2553         struct drm_device *dev = connector->dev;
2554         enum dc_connection_type new_connection_type = dc_connection_none;
2555         struct amdgpu_device *adev = drm_to_adev(dev);
2556 #ifdef CONFIG_DRM_AMD_DC_HDCP
2557         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559
2560         if (adev->dm.disable_hpd_irq)
2561                 return;
2562
2563         /*
2564          * In case of failure or MST no need to update connector status or notify the OS
2565          * since (for MST case) MST does this in its own context.
2566          */
2567         mutex_lock(&aconnector->hpd_lock);
2568
2569 #ifdef CONFIG_DRM_AMD_DC_HDCP
2570         if (adev->dm.hdcp_workqueue) {
2571                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2572                 dm_con_state->update_hdcp = true;
2573         }
2574 #endif
2575         if (aconnector->fake_enable)
2576                 aconnector->fake_enable = false;
2577
2578         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2579                 DRM_ERROR("KMS: Failed to detect connector\n");
2580
2581         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2582                 emulated_link_detect(aconnector->dc_link);
2583
2584
2585                 drm_modeset_lock_all(dev);
2586                 dm_restore_drm_connector_state(dev, connector);
2587                 drm_modeset_unlock_all(dev);
2588
2589                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2590                         drm_kms_helper_hotplug_event(dev);
2591
2592         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2593                 if (new_connection_type == dc_connection_none &&
2594                     aconnector->dc_link->type == dc_connection_none)
2595                         dm_set_dpms_off(aconnector->dc_link);
2596
2597                 amdgpu_dm_update_connector_after_detect(aconnector);
2598
2599                 drm_modeset_lock_all(dev);
2600                 dm_restore_drm_connector_state(dev, connector);
2601                 drm_modeset_unlock_all(dev);
2602
2603                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2604                         drm_kms_helper_hotplug_event(dev);
2605         }
2606         mutex_unlock(&aconnector->hpd_lock);
2607
2608 }
2609
2610 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2611 {
2612         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2613         uint8_t dret;
2614         bool new_irq_handled = false;
2615         int dpcd_addr;
2616         int dpcd_bytes_to_read;
2617
2618         const int max_process_count = 30;
2619         int process_count = 0;
2620
2621         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2622
2623         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2624                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2625                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2626                 dpcd_addr = DP_SINK_COUNT;
2627         } else {
2628                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2629                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2630                 dpcd_addr = DP_SINK_COUNT_ESI;
2631         }
2632
2633         dret = drm_dp_dpcd_read(
2634                 &aconnector->dm_dp_aux.aux,
2635                 dpcd_addr,
2636                 esi,
2637                 dpcd_bytes_to_read);
2638
2639         while (dret == dpcd_bytes_to_read &&
2640                 process_count < max_process_count) {
2641                 uint8_t retry;
2642                 dret = 0;
2643
2644                 process_count++;
2645
2646                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2647                 /* handle HPD short pulse irq */
2648                 if (aconnector->mst_mgr.mst_state)
2649                         drm_dp_mst_hpd_irq(
2650                                 &aconnector->mst_mgr,
2651                                 esi,
2652                                 &new_irq_handled);
2653
2654                 if (new_irq_handled) {
2655                         /* ACK at DPCD to notify down stream */
2656                         const int ack_dpcd_bytes_to_write =
2657                                 dpcd_bytes_to_read - 1;
2658
2659                         for (retry = 0; retry < 3; retry++) {
2660                                 uint8_t wret;
2661
2662                                 wret = drm_dp_dpcd_write(
2663                                         &aconnector->dm_dp_aux.aux,
2664                                         dpcd_addr + 1,
2665                                         &esi[1],
2666                                         ack_dpcd_bytes_to_write);
2667                                 if (wret == ack_dpcd_bytes_to_write)
2668                                         break;
2669                         }
2670
2671                         /* check if there is new irq to be handled */
2672                         dret = drm_dp_dpcd_read(
2673                                 &aconnector->dm_dp_aux.aux,
2674                                 dpcd_addr,
2675                                 esi,
2676                                 dpcd_bytes_to_read);
2677
2678                         new_irq_handled = false;
2679                 } else {
2680                         break;
2681                 }
2682         }
2683
2684         if (process_count == max_process_count)
2685                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2686 }
2687
2688 static void handle_hpd_rx_irq(void *param)
2689 {
2690         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2691         struct drm_connector *connector = &aconnector->base;
2692         struct drm_device *dev = connector->dev;
2693         struct dc_link *dc_link = aconnector->dc_link;
2694         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2695         bool result = false;
2696         enum dc_connection_type new_connection_type = dc_connection_none;
2697         struct amdgpu_device *adev = drm_to_adev(dev);
2698         union hpd_irq_data hpd_irq_data;
2699
2700         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2701
2702         if (adev->dm.disable_hpd_irq)
2703                 return;
2704
2705
2706         /*
2707          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2708          * conflict, after implement i2c helper, this mutex should be
2709          * retired.
2710          */
2711         if (dc_link->type != dc_connection_mst_branch)
2712                 mutex_lock(&aconnector->hpd_lock);
2713
2714         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2715
2716         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2717                 (dc_link->type == dc_connection_mst_branch)) {
2718                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2719                         result = true;
2720                         dm_handle_hpd_rx_irq(aconnector);
2721                         goto out;
2722                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2723                         result = false;
2724                         dm_handle_hpd_rx_irq(aconnector);
2725                         goto out;
2726                 }
2727         }
2728
2729         mutex_lock(&adev->dm.dc_lock);
2730 #ifdef CONFIG_DRM_AMD_DC_HDCP
2731         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2732 #else
2733         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2734 #endif
2735         mutex_unlock(&adev->dm.dc_lock);
2736
2737 out:
2738         if (result && !is_mst_root_connector) {
2739                 /* Downstream Port status changed. */
2740                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2741                         DRM_ERROR("KMS: Failed to detect connector\n");
2742
2743                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2744                         emulated_link_detect(dc_link);
2745
2746                         if (aconnector->fake_enable)
2747                                 aconnector->fake_enable = false;
2748
2749                         amdgpu_dm_update_connector_after_detect(aconnector);
2750
2751
2752                         drm_modeset_lock_all(dev);
2753                         dm_restore_drm_connector_state(dev, connector);
2754                         drm_modeset_unlock_all(dev);
2755
2756                         drm_kms_helper_hotplug_event(dev);
2757                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2758
2759                         if (aconnector->fake_enable)
2760                                 aconnector->fake_enable = false;
2761
2762                         amdgpu_dm_update_connector_after_detect(aconnector);
2763
2764
2765                         drm_modeset_lock_all(dev);
2766                         dm_restore_drm_connector_state(dev, connector);
2767                         drm_modeset_unlock_all(dev);
2768
2769                         drm_kms_helper_hotplug_event(dev);
2770                 }
2771         }
2772 #ifdef CONFIG_DRM_AMD_DC_HDCP
2773         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2774                 if (adev->dm.hdcp_workqueue)
2775                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2776         }
2777 #endif
2778
2779         if (dc_link->type != dc_connection_mst_branch) {
2780                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2781                 mutex_unlock(&aconnector->hpd_lock);
2782         }
2783 }
2784
2785 static void register_hpd_handlers(struct amdgpu_device *adev)
2786 {
2787         struct drm_device *dev = adev_to_drm(adev);
2788         struct drm_connector *connector;
2789         struct amdgpu_dm_connector *aconnector;
2790         const struct dc_link *dc_link;
2791         struct dc_interrupt_params int_params = {0};
2792
2793         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2794         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2795
2796         list_for_each_entry(connector,
2797                         &dev->mode_config.connector_list, head) {
2798
2799                 aconnector = to_amdgpu_dm_connector(connector);
2800                 dc_link = aconnector->dc_link;
2801
2802                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2803                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2804                         int_params.irq_source = dc_link->irq_source_hpd;
2805
2806                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2807                                         handle_hpd_irq,
2808                                         (void *) aconnector);
2809                 }
2810
2811                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2812
2813                         /* Also register for DP short pulse (hpd_rx). */
2814                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2815                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2816
2817                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2818                                         handle_hpd_rx_irq,
2819                                         (void *) aconnector);
2820                 }
2821         }
2822 }
2823
2824 #if defined(CONFIG_DRM_AMD_DC_SI)
2825 /* Register IRQ sources and initialize IRQ callbacks */
2826 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2827 {
2828         struct dc *dc = adev->dm.dc;
2829         struct common_irq_params *c_irq_params;
2830         struct dc_interrupt_params int_params = {0};
2831         int r;
2832         int i;
2833         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2834
2835         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2836         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2837
2838         /*
2839          * Actions of amdgpu_irq_add_id():
2840          * 1. Register a set() function with base driver.
2841          *    Base driver will call set() function to enable/disable an
2842          *    interrupt in DC hardware.
2843          * 2. Register amdgpu_dm_irq_handler().
2844          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2845          *    coming from DC hardware.
2846          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2847          *    for acknowledging and handling. */
2848
2849         /* Use VBLANK interrupt */
2850         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2851                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2852                 if (r) {
2853                         DRM_ERROR("Failed to add crtc irq id!\n");
2854                         return r;
2855                 }
2856
2857                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2858                 int_params.irq_source =
2859                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2860
2861                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2862
2863                 c_irq_params->adev = adev;
2864                 c_irq_params->irq_src = int_params.irq_source;
2865
2866                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2867                                 dm_crtc_high_irq, c_irq_params);
2868         }
2869
2870         /* Use GRPH_PFLIP interrupt */
2871         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2872                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2873                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2874                 if (r) {
2875                         DRM_ERROR("Failed to add page flip irq id!\n");
2876                         return r;
2877                 }
2878
2879                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2880                 int_params.irq_source =
2881                         dc_interrupt_to_irq_source(dc, i, 0);
2882
2883                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2884
2885                 c_irq_params->adev = adev;
2886                 c_irq_params->irq_src = int_params.irq_source;
2887
2888                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2889                                 dm_pflip_high_irq, c_irq_params);
2890
2891         }
2892
2893         /* HPD */
2894         r = amdgpu_irq_add_id(adev, client_id,
2895                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2896         if (r) {
2897                 DRM_ERROR("Failed to add hpd irq id!\n");
2898                 return r;
2899         }
2900
2901         register_hpd_handlers(adev);
2902
2903         return 0;
2904 }
2905 #endif
2906
2907 /* Register IRQ sources and initialize IRQ callbacks */
2908 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2909 {
2910         struct dc *dc = adev->dm.dc;
2911         struct common_irq_params *c_irq_params;
2912         struct dc_interrupt_params int_params = {0};
2913         int r;
2914         int i;
2915         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2916
2917         if (adev->asic_type >= CHIP_VEGA10)
2918                 client_id = SOC15_IH_CLIENTID_DCE;
2919
2920         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2921         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2922
2923         /*
2924          * Actions of amdgpu_irq_add_id():
2925          * 1. Register a set() function with base driver.
2926          *    Base driver will call set() function to enable/disable an
2927          *    interrupt in DC hardware.
2928          * 2. Register amdgpu_dm_irq_handler().
2929          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2930          *    coming from DC hardware.
2931          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2932          *    for acknowledging and handling. */
2933
2934         /* Use VBLANK interrupt */
2935         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2936                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2937                 if (r) {
2938                         DRM_ERROR("Failed to add crtc irq id!\n");
2939                         return r;
2940                 }
2941
2942                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2943                 int_params.irq_source =
2944                         dc_interrupt_to_irq_source(dc, i, 0);
2945
2946                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2947
2948                 c_irq_params->adev = adev;
2949                 c_irq_params->irq_src = int_params.irq_source;
2950
2951                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2952                                 dm_crtc_high_irq, c_irq_params);
2953         }
2954
2955         /* Use VUPDATE interrupt */
2956         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2957                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2958                 if (r) {
2959                         DRM_ERROR("Failed to add vupdate irq id!\n");
2960                         return r;
2961                 }
2962
2963                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2964                 int_params.irq_source =
2965                         dc_interrupt_to_irq_source(dc, i, 0);
2966
2967                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2968
2969                 c_irq_params->adev = adev;
2970                 c_irq_params->irq_src = int_params.irq_source;
2971
2972                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2973                                 dm_vupdate_high_irq, c_irq_params);
2974         }
2975
2976         /* Use GRPH_PFLIP interrupt */
2977         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2978                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2979                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2980                 if (r) {
2981                         DRM_ERROR("Failed to add page flip irq id!\n");
2982                         return r;
2983                 }
2984
2985                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2986                 int_params.irq_source =
2987                         dc_interrupt_to_irq_source(dc, i, 0);
2988
2989                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2990
2991                 c_irq_params->adev = adev;
2992                 c_irq_params->irq_src = int_params.irq_source;
2993
2994                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2995                                 dm_pflip_high_irq, c_irq_params);
2996
2997         }
2998
2999         /* HPD */
3000         r = amdgpu_irq_add_id(adev, client_id,
3001                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3002         if (r) {
3003                 DRM_ERROR("Failed to add hpd irq id!\n");
3004                 return r;
3005         }
3006
3007         register_hpd_handlers(adev);
3008
3009         return 0;
3010 }
3011
3012 #if defined(CONFIG_DRM_AMD_DC_DCN)
3013 /* Register IRQ sources and initialize IRQ callbacks */
3014 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3015 {
3016         struct dc *dc = adev->dm.dc;
3017         struct common_irq_params *c_irq_params;
3018         struct dc_interrupt_params int_params = {0};
3019         int r;
3020         int i;
3021 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3022         static const unsigned int vrtl_int_srcid[] = {
3023                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3024                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3025                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3026                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3027                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3028                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3029         };
3030 #endif
3031
3032         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3033         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3034
3035         /*
3036          * Actions of amdgpu_irq_add_id():
3037          * 1. Register a set() function with base driver.
3038          *    Base driver will call set() function to enable/disable an
3039          *    interrupt in DC hardware.
3040          * 2. Register amdgpu_dm_irq_handler().
3041          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3042          *    coming from DC hardware.
3043          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3044          *    for acknowledging and handling.
3045          */
3046
3047         /* Use VSTARTUP interrupt */
3048         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3049                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3050                         i++) {
3051                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3052
3053                 if (r) {
3054                         DRM_ERROR("Failed to add crtc irq id!\n");
3055                         return r;
3056                 }
3057
3058                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3059                 int_params.irq_source =
3060                         dc_interrupt_to_irq_source(dc, i, 0);
3061
3062                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3063
3064                 c_irq_params->adev = adev;
3065                 c_irq_params->irq_src = int_params.irq_source;
3066
3067                 amdgpu_dm_irq_register_interrupt(
3068                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3069         }
3070
3071         /* Use otg vertical line interrupt */
3072 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3073         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3074                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3075                                 vrtl_int_srcid[i], &adev->vline0_irq);
3076
3077                 if (r) {
3078                         DRM_ERROR("Failed to add vline0 irq id!\n");
3079                         return r;
3080                 }
3081
3082                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3083                 int_params.irq_source =
3084                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3085
3086                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3087                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3088                         break;
3089                 }
3090
3091                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3092                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3093
3094                 c_irq_params->adev = adev;
3095                 c_irq_params->irq_src = int_params.irq_source;
3096
3097                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3098                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3099         }
3100 #endif
3101
3102         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3103          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3104          * to trigger at end of each vblank, regardless of state of the lock,
3105          * matching DCE behaviour.
3106          */
3107         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3108              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3109              i++) {
3110                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3111
3112                 if (r) {
3113                         DRM_ERROR("Failed to add vupdate irq id!\n");
3114                         return r;
3115                 }
3116
3117                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3118                 int_params.irq_source =
3119                         dc_interrupt_to_irq_source(dc, i, 0);
3120
3121                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3122
3123                 c_irq_params->adev = adev;
3124                 c_irq_params->irq_src = int_params.irq_source;
3125
3126                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3127                                 dm_vupdate_high_irq, c_irq_params);
3128         }
3129
3130         /* Use GRPH_PFLIP interrupt */
3131         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3132                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3133                         i++) {
3134                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3135                 if (r) {
3136                         DRM_ERROR("Failed to add page flip irq id!\n");
3137                         return r;
3138                 }
3139
3140                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3141                 int_params.irq_source =
3142                         dc_interrupt_to_irq_source(dc, i, 0);
3143
3144                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3145
3146                 c_irq_params->adev = adev;
3147                 c_irq_params->irq_src = int_params.irq_source;
3148
3149                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3150                                 dm_pflip_high_irq, c_irq_params);
3151
3152         }
3153
3154         if (dc->ctx->dmub_srv) {
3155                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3156                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3157
3158                 if (r) {
3159                         DRM_ERROR("Failed to add dmub trace irq id!\n");
3160                         return r;
3161                 }
3162
3163                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3164                 int_params.irq_source =
3165                         dc_interrupt_to_irq_source(dc, i, 0);
3166
3167                 c_irq_params = &adev->dm.dmub_trace_params[0];
3168
3169                 c_irq_params->adev = adev;
3170                 c_irq_params->irq_src = int_params.irq_source;
3171
3172                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3173                                 dm_dmub_trace_high_irq, c_irq_params);
3174         }
3175
3176         /* HPD */
3177         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3178                         &adev->hpd_irq);
3179         if (r) {
3180                 DRM_ERROR("Failed to add hpd irq id!\n");
3181                 return r;
3182         }
3183
3184         register_hpd_handlers(adev);
3185
3186         return 0;
3187 }
3188 #endif
3189
3190 /*
3191  * Acquires the lock for the atomic state object and returns
3192  * the new atomic state.
3193  *
3194  * This should only be called during atomic check.
3195  */
3196 static int dm_atomic_get_state(struct drm_atomic_state *state,
3197                                struct dm_atomic_state **dm_state)
3198 {
3199         struct drm_device *dev = state->dev;
3200         struct amdgpu_device *adev = drm_to_adev(dev);
3201         struct amdgpu_display_manager *dm = &adev->dm;
3202         struct drm_private_state *priv_state;
3203
3204         if (*dm_state)
3205                 return 0;
3206
3207         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3208         if (IS_ERR(priv_state))
3209                 return PTR_ERR(priv_state);
3210
3211         *dm_state = to_dm_atomic_state(priv_state);
3212
3213         return 0;
3214 }
3215
3216 static struct dm_atomic_state *
3217 dm_atomic_get_new_state(struct drm_atomic_state *state)
3218 {
3219         struct drm_device *dev = state->dev;
3220         struct amdgpu_device *adev = drm_to_adev(dev);
3221         struct amdgpu_display_manager *dm = &adev->dm;
3222         struct drm_private_obj *obj;
3223         struct drm_private_state *new_obj_state;
3224         int i;
3225
3226         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3227                 if (obj->funcs == dm->atomic_obj.funcs)
3228                         return to_dm_atomic_state(new_obj_state);
3229         }
3230
3231         return NULL;
3232 }
3233
3234 static struct drm_private_state *
3235 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3236 {
3237         struct dm_atomic_state *old_state, *new_state;
3238
3239         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3240         if (!new_state)
3241                 return NULL;
3242
3243         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3244
3245         old_state = to_dm_atomic_state(obj->state);
3246
3247         if (old_state && old_state->context)
3248                 new_state->context = dc_copy_state(old_state->context);
3249
3250         if (!new_state->context) {
3251                 kfree(new_state);
3252                 return NULL;
3253         }
3254
3255         return &new_state->base;
3256 }
3257
3258 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3259                                     struct drm_private_state *state)
3260 {
3261         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3262
3263         if (dm_state && dm_state->context)
3264                 dc_release_state(dm_state->context);
3265
3266         kfree(dm_state);
3267 }
3268
3269 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3270         .atomic_duplicate_state = dm_atomic_duplicate_state,
3271         .atomic_destroy_state = dm_atomic_destroy_state,
3272 };
3273
3274 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3275 {
3276         struct dm_atomic_state *state;
3277         int r;
3278
3279         adev->mode_info.mode_config_initialized = true;
3280
3281         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3282         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3283
3284         adev_to_drm(adev)->mode_config.max_width = 16384;
3285         adev_to_drm(adev)->mode_config.max_height = 16384;
3286
3287         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3288         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3289         /* indicates support for immediate flip */
3290         adev_to_drm(adev)->mode_config.async_page_flip = true;
3291
3292         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3293
3294         state = kzalloc(sizeof(*state), GFP_KERNEL);
3295         if (!state)
3296                 return -ENOMEM;
3297
3298         state->context = dc_create_state(adev->dm.dc);
3299         if (!state->context) {
3300                 kfree(state);
3301                 return -ENOMEM;
3302         }
3303
3304         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3305
3306         drm_atomic_private_obj_init(adev_to_drm(adev),
3307                                     &adev->dm.atomic_obj,
3308                                     &state->base,
3309                                     &dm_atomic_state_funcs);
3310
3311         r = amdgpu_display_modeset_create_props(adev);
3312         if (r) {
3313                 dc_release_state(state->context);
3314                 kfree(state);
3315                 return r;
3316         }
3317
3318         r = amdgpu_dm_audio_init(adev);
3319         if (r) {
3320                 dc_release_state(state->context);
3321                 kfree(state);
3322                 return r;
3323         }
3324
3325         return 0;
3326 }
3327
3328 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3329 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3330 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3331
3332 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3333         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3334
3335 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3336 {
3337 #if defined(CONFIG_ACPI)
3338         struct amdgpu_dm_backlight_caps caps;
3339
3340         memset(&caps, 0, sizeof(caps));
3341
3342         if (dm->backlight_caps.caps_valid)
3343                 return;
3344
3345         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3346         if (caps.caps_valid) {
3347                 dm->backlight_caps.caps_valid = true;
3348                 if (caps.aux_support)
3349                         return;
3350                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3351                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3352         } else {
3353                 dm->backlight_caps.min_input_signal =
3354                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3355                 dm->backlight_caps.max_input_signal =
3356                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3357         }
3358 #else
3359         if (dm->backlight_caps.aux_support)
3360                 return;
3361
3362         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3363         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3364 #endif
3365 }
3366
3367 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3368                                 unsigned *min, unsigned *max)
3369 {
3370         if (!caps)
3371                 return 0;
3372
3373         if (caps->aux_support) {
3374                 // Firmware limits are in nits, DC API wants millinits.
3375                 *max = 1000 * caps->aux_max_input_signal;
3376                 *min = 1000 * caps->aux_min_input_signal;
3377         } else {
3378                 // Firmware limits are 8-bit, PWM control is 16-bit.
3379                 *max = 0x101 * caps->max_input_signal;
3380                 *min = 0x101 * caps->min_input_signal;
3381         }
3382         return 1;
3383 }
3384
3385 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3386                                         uint32_t brightness)
3387 {
3388         unsigned min, max;
3389
3390         if (!get_brightness_range(caps, &min, &max))
3391                 return brightness;
3392
3393         // Rescale 0..255 to min..max
3394         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3395                                        AMDGPU_MAX_BL_LEVEL);
3396 }
3397
3398 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3399                                       uint32_t brightness)
3400 {
3401         unsigned min, max;
3402
3403         if (!get_brightness_range(caps, &min, &max))
3404                 return brightness;
3405
3406         if (brightness < min)
3407                 return 0;
3408         // Rescale min..max to 0..255
3409         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3410                                  max - min);
3411 }
3412
3413 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3414 {
3415         struct amdgpu_display_manager *dm = bl_get_data(bd);
3416         struct amdgpu_dm_backlight_caps caps;
3417         struct dc_link *link = NULL;
3418         u32 brightness;
3419         bool rc;
3420
3421         amdgpu_dm_update_backlight_caps(dm);
3422         caps = dm->backlight_caps;
3423
3424         link = (struct dc_link *)dm->backlight_link;
3425
3426         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3427         // Change brightness based on AUX property
3428         if (caps.aux_support)
3429                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3430                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3431         else
3432                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3433
3434         return rc ? 0 : 1;
3435 }
3436
3437 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3438 {
3439         struct amdgpu_display_manager *dm = bl_get_data(bd);
3440         struct amdgpu_dm_backlight_caps caps;
3441
3442         amdgpu_dm_update_backlight_caps(dm);
3443         caps = dm->backlight_caps;
3444
3445         if (caps.aux_support) {
3446                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3447                 u32 avg, peak;
3448                 bool rc;
3449
3450                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3451                 if (!rc)
3452                         return bd->props.brightness;
3453                 return convert_brightness_to_user(&caps, avg);
3454         } else {
3455                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3456
3457                 if (ret == DC_ERROR_UNEXPECTED)
3458                         return bd->props.brightness;
3459                 return convert_brightness_to_user(&caps, ret);
3460         }
3461 }
3462
3463 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3464         .options = BL_CORE_SUSPENDRESUME,
3465         .get_brightness = amdgpu_dm_backlight_get_brightness,
3466         .update_status  = amdgpu_dm_backlight_update_status,
3467 };
3468
3469 static void
3470 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3471 {
3472         char bl_name[16];
3473         struct backlight_properties props = { 0 };
3474
3475         amdgpu_dm_update_backlight_caps(dm);
3476
3477         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3478         props.brightness = AMDGPU_MAX_BL_LEVEL;
3479         props.type = BACKLIGHT_RAW;
3480
3481         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3482                  adev_to_drm(dm->adev)->primary->index);
3483
3484         dm->backlight_dev = backlight_device_register(bl_name,
3485                                                       adev_to_drm(dm->adev)->dev,
3486                                                       dm,
3487                                                       &amdgpu_dm_backlight_ops,
3488                                                       &props);
3489
3490         if (IS_ERR(dm->backlight_dev))
3491                 DRM_ERROR("DM: Backlight registration failed!\n");
3492         else
3493                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3494 }
3495
3496 #endif
3497
3498 static int initialize_plane(struct amdgpu_display_manager *dm,
3499                             struct amdgpu_mode_info *mode_info, int plane_id,
3500                             enum drm_plane_type plane_type,
3501                             const struct dc_plane_cap *plane_cap)
3502 {
3503         struct drm_plane *plane;
3504         unsigned long possible_crtcs;
3505         int ret = 0;
3506
3507         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3508         if (!plane) {
3509                 DRM_ERROR("KMS: Failed to allocate plane\n");
3510                 return -ENOMEM;
3511         }
3512         plane->type = plane_type;
3513
3514         /*
3515          * HACK: IGT tests expect that the primary plane for a CRTC
3516          * can only have one possible CRTC. Only expose support for
3517          * any CRTC if they're not going to be used as a primary plane
3518          * for a CRTC - like overlay or underlay planes.
3519          */
3520         possible_crtcs = 1 << plane_id;
3521         if (plane_id >= dm->dc->caps.max_streams)
3522                 possible_crtcs = 0xff;
3523
3524         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3525
3526         if (ret) {
3527                 DRM_ERROR("KMS: Failed to initialize plane\n");
3528                 kfree(plane);
3529                 return ret;
3530         }
3531
3532         if (mode_info)
3533                 mode_info->planes[plane_id] = plane;
3534
3535         return ret;
3536 }
3537
3538
3539 static void register_backlight_device(struct amdgpu_display_manager *dm,
3540                                       struct dc_link *link)
3541 {
3542 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3543         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3544
3545         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3546             link->type != dc_connection_none) {
3547                 /*
3548                  * Event if registration failed, we should continue with
3549                  * DM initialization because not having a backlight control
3550                  * is better then a black screen.
3551                  */
3552                 amdgpu_dm_register_backlight_device(dm);
3553
3554                 if (dm->backlight_dev)
3555                         dm->backlight_link = link;
3556         }
3557 #endif
3558 }
3559
3560
3561 /*
3562  * In this architecture, the association
3563  * connector -> encoder -> crtc
3564  * id not really requried. The crtc and connector will hold the
3565  * display_index as an abstraction to use with DAL component
3566  *
3567  * Returns 0 on success
3568  */
3569 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3570 {
3571         struct amdgpu_display_manager *dm = &adev->dm;
3572         int32_t i;
3573         struct amdgpu_dm_connector *aconnector = NULL;
3574         struct amdgpu_encoder *aencoder = NULL;
3575         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3576         uint32_t link_cnt;
3577         int32_t primary_planes;
3578         enum dc_connection_type new_connection_type = dc_connection_none;
3579         const struct dc_plane_cap *plane;
3580
3581         dm->display_indexes_num = dm->dc->caps.max_streams;
3582         /* Update the actual used number of crtc */
3583         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3584
3585         link_cnt = dm->dc->caps.max_links;
3586         if (amdgpu_dm_mode_config_init(dm->adev)) {
3587                 DRM_ERROR("DM: Failed to initialize mode config\n");
3588                 return -EINVAL;
3589         }
3590
3591         /* There is one primary plane per CRTC */
3592         primary_planes = dm->dc->caps.max_streams;
3593         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3594
3595         /*
3596          * Initialize primary planes, implicit planes for legacy IOCTLS.
3597          * Order is reversed to match iteration order in atomic check.
3598          */
3599         for (i = (primary_planes - 1); i >= 0; i--) {
3600                 plane = &dm->dc->caps.planes[i];
3601
3602                 if (initialize_plane(dm, mode_info, i,
3603                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3604                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3605                         goto fail;
3606                 }
3607         }
3608
3609         /*
3610          * Initialize overlay planes, index starting after primary planes.
3611          * These planes have a higher DRM index than the primary planes since
3612          * they should be considered as having a higher z-order.
3613          * Order is reversed to match iteration order in atomic check.
3614          *
3615          * Only support DCN for now, and only expose one so we don't encourage
3616          * userspace to use up all the pipes.
3617          */
3618         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3619                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3620
3621                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3622                         continue;
3623
3624                 if (!plane->blends_with_above || !plane->blends_with_below)
3625                         continue;
3626
3627                 if (!plane->pixel_format_support.argb8888)
3628                         continue;
3629
3630                 if (initialize_plane(dm, NULL, primary_planes + i,
3631                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3632                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3633                         goto fail;
3634                 }
3635
3636                 /* Only create one overlay plane. */
3637                 break;
3638         }
3639
3640         for (i = 0; i < dm->dc->caps.max_streams; i++)
3641                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3642                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3643                         goto fail;
3644                 }
3645
3646         /* loops over all connectors on the board */
3647         for (i = 0; i < link_cnt; i++) {
3648                 struct dc_link *link = NULL;
3649
3650                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3651                         DRM_ERROR(
3652                                 "KMS: Cannot support more than %d display indexes\n",
3653                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3654                         continue;
3655                 }
3656
3657                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3658                 if (!aconnector)
3659                         goto fail;
3660
3661                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3662                 if (!aencoder)
3663                         goto fail;
3664
3665                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3666                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3667                         goto fail;
3668                 }
3669
3670                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3671                         DRM_ERROR("KMS: Failed to initialize connector\n");
3672                         goto fail;
3673                 }
3674
3675                 link = dc_get_link_at_index(dm->dc, i);
3676
3677                 if (!dc_link_detect_sink(link, &new_connection_type))
3678                         DRM_ERROR("KMS: Failed to detect connector\n");
3679
3680                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3681                         emulated_link_detect(link);
3682                         amdgpu_dm_update_connector_after_detect(aconnector);
3683
3684                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3685                         amdgpu_dm_update_connector_after_detect(aconnector);
3686                         register_backlight_device(dm, link);
3687                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3688                                 amdgpu_dm_set_psr_caps(link);
3689                 }
3690
3691
3692         }
3693
3694         /* Software is initialized. Now we can register interrupt handlers. */
3695         switch (adev->asic_type) {
3696 #if defined(CONFIG_DRM_AMD_DC_SI)
3697         case CHIP_TAHITI:
3698         case CHIP_PITCAIRN:
3699         case CHIP_VERDE:
3700         case CHIP_OLAND:
3701                 if (dce60_register_irq_handlers(dm->adev)) {
3702                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3703                         goto fail;
3704                 }
3705                 break;
3706 #endif
3707         case CHIP_BONAIRE:
3708         case CHIP_HAWAII:
3709         case CHIP_KAVERI:
3710         case CHIP_KABINI:
3711         case CHIP_MULLINS:
3712         case CHIP_TONGA:
3713         case CHIP_FIJI:
3714         case CHIP_CARRIZO:
3715         case CHIP_STONEY:
3716         case CHIP_POLARIS11:
3717         case CHIP_POLARIS10:
3718         case CHIP_POLARIS12:
3719         case CHIP_VEGAM:
3720         case CHIP_VEGA10:
3721         case CHIP_VEGA12:
3722         case CHIP_VEGA20:
3723                 if (dce110_register_irq_handlers(dm->adev)) {
3724                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3725                         goto fail;
3726                 }
3727                 break;
3728 #if defined(CONFIG_DRM_AMD_DC_DCN)
3729         case CHIP_RAVEN:
3730         case CHIP_NAVI12:
3731         case CHIP_NAVI10:
3732         case CHIP_NAVI14:
3733         case CHIP_RENOIR:
3734         case CHIP_SIENNA_CICHLID:
3735         case CHIP_NAVY_FLOUNDER:
3736         case CHIP_DIMGREY_CAVEFISH:
3737         case CHIP_VANGOGH:
3738                 if (dcn10_register_irq_handlers(dm->adev)) {
3739                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3740                         goto fail;
3741                 }
3742                 break;
3743 #endif
3744         default:
3745                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3746                 goto fail;
3747         }
3748
3749         return 0;
3750 fail:
3751         kfree(aencoder);
3752         kfree(aconnector);
3753
3754         return -EINVAL;
3755 }
3756
3757 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3758 {
3759         drm_mode_config_cleanup(dm->ddev);
3760         drm_atomic_private_obj_fini(&dm->atomic_obj);
3761         return;
3762 }
3763
3764 /******************************************************************************
3765  * amdgpu_display_funcs functions
3766  *****************************************************************************/
3767
3768 /*
3769  * dm_bandwidth_update - program display watermarks
3770  *
3771  * @adev: amdgpu_device pointer
3772  *
3773  * Calculate and program the display watermarks and line buffer allocation.
3774  */
3775 static void dm_bandwidth_update(struct amdgpu_device *adev)
3776 {
3777         /* TODO: implement later */
3778 }
3779
3780 static const struct amdgpu_display_funcs dm_display_funcs = {
3781         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3782         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3783         .backlight_set_level = NULL, /* never called for DC */
3784         .backlight_get_level = NULL, /* never called for DC */
3785         .hpd_sense = NULL,/* called unconditionally */
3786         .hpd_set_polarity = NULL, /* called unconditionally */
3787         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3788         .page_flip_get_scanoutpos =
3789                 dm_crtc_get_scanoutpos,/* called unconditionally */
3790         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3791         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3792 };
3793
3794 #if defined(CONFIG_DEBUG_KERNEL_DC)
3795
3796 static ssize_t s3_debug_store(struct device *device,
3797                               struct device_attribute *attr,
3798                               const char *buf,
3799                               size_t count)
3800 {
3801         int ret;
3802         int s3_state;
3803         struct drm_device *drm_dev = dev_get_drvdata(device);
3804         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3805
3806         ret = kstrtoint(buf, 0, &s3_state);
3807
3808         if (ret == 0) {
3809                 if (s3_state) {
3810                         dm_resume(adev);
3811                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3812                 } else
3813                         dm_suspend(adev);
3814         }
3815
3816         return ret == 0 ? count : 0;
3817 }
3818
3819 DEVICE_ATTR_WO(s3_debug);
3820
3821 #endif
3822
3823 static int dm_early_init(void *handle)
3824 {
3825         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3826
3827         switch (adev->asic_type) {
3828 #if defined(CONFIG_DRM_AMD_DC_SI)
3829         case CHIP_TAHITI:
3830         case CHIP_PITCAIRN:
3831         case CHIP_VERDE:
3832                 adev->mode_info.num_crtc = 6;
3833                 adev->mode_info.num_hpd = 6;
3834                 adev->mode_info.num_dig = 6;
3835                 break;
3836         case CHIP_OLAND:
3837                 adev->mode_info.num_crtc = 2;
3838                 adev->mode_info.num_hpd = 2;
3839                 adev->mode_info.num_dig = 2;
3840                 break;
3841 #endif
3842         case CHIP_BONAIRE:
3843         case CHIP_HAWAII:
3844                 adev->mode_info.num_crtc = 6;
3845                 adev->mode_info.num_hpd = 6;
3846                 adev->mode_info.num_dig = 6;
3847                 break;
3848         case CHIP_KAVERI:
3849                 adev->mode_info.num_crtc = 4;
3850                 adev->mode_info.num_hpd = 6;
3851                 adev->mode_info.num_dig = 7;
3852                 break;
3853         case CHIP_KABINI:
3854         case CHIP_MULLINS:
3855                 adev->mode_info.num_crtc = 2;
3856                 adev->mode_info.num_hpd = 6;
3857                 adev->mode_info.num_dig = 6;
3858                 break;
3859         case CHIP_FIJI:
3860         case CHIP_TONGA:
3861                 adev->mode_info.num_crtc = 6;
3862                 adev->mode_info.num_hpd = 6;
3863                 adev->mode_info.num_dig = 7;
3864                 break;
3865         case CHIP_CARRIZO:
3866                 adev->mode_info.num_crtc = 3;
3867                 adev->mode_info.num_hpd = 6;
3868                 adev->mode_info.num_dig = 9;
3869                 break;
3870         case CHIP_STONEY:
3871                 adev->mode_info.num_crtc = 2;
3872                 adev->mode_info.num_hpd = 6;
3873                 adev->mode_info.num_dig = 9;
3874                 break;
3875         case CHIP_POLARIS11:
3876         case CHIP_POLARIS12:
3877                 adev->mode_info.num_crtc = 5;
3878                 adev->mode_info.num_hpd = 5;
3879                 adev->mode_info.num_dig = 5;
3880                 break;
3881         case CHIP_POLARIS10:
3882         case CHIP_VEGAM:
3883                 adev->mode_info.num_crtc = 6;
3884                 adev->mode_info.num_hpd = 6;
3885                 adev->mode_info.num_dig = 6;
3886                 break;
3887         case CHIP_VEGA10:
3888         case CHIP_VEGA12:
3889         case CHIP_VEGA20:
3890                 adev->mode_info.num_crtc = 6;
3891                 adev->mode_info.num_hpd = 6;
3892                 adev->mode_info.num_dig = 6;
3893                 break;
3894 #if defined(CONFIG_DRM_AMD_DC_DCN)
3895         case CHIP_RAVEN:
3896         case CHIP_RENOIR:
3897         case CHIP_VANGOGH:
3898                 adev->mode_info.num_crtc = 4;
3899                 adev->mode_info.num_hpd = 4;
3900                 adev->mode_info.num_dig = 4;
3901                 break;
3902         case CHIP_NAVI10:
3903         case CHIP_NAVI12:
3904         case CHIP_SIENNA_CICHLID:
3905         case CHIP_NAVY_FLOUNDER:
3906                 adev->mode_info.num_crtc = 6;
3907                 adev->mode_info.num_hpd = 6;
3908                 adev->mode_info.num_dig = 6;
3909                 break;
3910         case CHIP_NAVI14:
3911         case CHIP_DIMGREY_CAVEFISH:
3912                 adev->mode_info.num_crtc = 5;
3913                 adev->mode_info.num_hpd = 5;
3914                 adev->mode_info.num_dig = 5;
3915                 break;
3916 #endif
3917         default:
3918                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3919                 return -EINVAL;
3920         }
3921
3922         amdgpu_dm_set_irq_funcs(adev);
3923
3924         if (adev->mode_info.funcs == NULL)
3925                 adev->mode_info.funcs = &dm_display_funcs;
3926
3927         /*
3928          * Note: Do NOT change adev->audio_endpt_rreg and
3929          * adev->audio_endpt_wreg because they are initialised in
3930          * amdgpu_device_init()
3931          */
3932 #if defined(CONFIG_DEBUG_KERNEL_DC)
3933         device_create_file(
3934                 adev_to_drm(adev)->dev,
3935                 &dev_attr_s3_debug);
3936 #endif
3937
3938         return 0;
3939 }
3940
3941 static bool modeset_required(struct drm_crtc_state *crtc_state,
3942                              struct dc_stream_state *new_stream,
3943                              struct dc_stream_state *old_stream)
3944 {
3945         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3946 }
3947
3948 static bool modereset_required(struct drm_crtc_state *crtc_state)
3949 {
3950         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3951 }
3952
3953 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3954 {
3955         drm_encoder_cleanup(encoder);
3956         kfree(encoder);
3957 }
3958
3959 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3960         .destroy = amdgpu_dm_encoder_destroy,
3961 };
3962
3963
3964 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3965                                          struct drm_framebuffer *fb,
3966                                          int *min_downscale, int *max_upscale)
3967 {
3968         struct amdgpu_device *adev = drm_to_adev(dev);
3969         struct dc *dc = adev->dm.dc;
3970         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3971         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3972
3973         switch (fb->format->format) {
3974         case DRM_FORMAT_P010:
3975         case DRM_FORMAT_NV12:
3976         case DRM_FORMAT_NV21:
3977                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3978                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3979                 break;
3980
3981         case DRM_FORMAT_XRGB16161616F:
3982         case DRM_FORMAT_ARGB16161616F:
3983         case DRM_FORMAT_XBGR16161616F:
3984         case DRM_FORMAT_ABGR16161616F:
3985                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3986                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3987                 break;
3988
3989         default:
3990                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3991                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3992                 break;
3993         }
3994
3995         /*
3996          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3997          * scaling factor of 1.0 == 1000 units.
3998          */
3999         if (*max_upscale == 1)
4000                 *max_upscale = 1000;
4001
4002         if (*min_downscale == 1)
4003                 *min_downscale = 1000;
4004 }
4005
4006
4007 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4008                                 struct dc_scaling_info *scaling_info)
4009 {
4010         int scale_w, scale_h, min_downscale, max_upscale;
4011
4012         memset(scaling_info, 0, sizeof(*scaling_info));
4013
4014         /* Source is fixed 16.16 but we ignore mantissa for now... */
4015         scaling_info->src_rect.x = state->src_x >> 16;
4016         scaling_info->src_rect.y = state->src_y >> 16;
4017
4018         /*
4019          * For reasons we don't (yet) fully understand a non-zero
4020          * src_y coordinate into an NV12 buffer can cause a
4021          * system hang. To avoid hangs (and maybe be overly cautious)
4022          * let's reject both non-zero src_x and src_y.
4023          *
4024          * We currently know of only one use-case to reproduce a
4025          * scenario with non-zero src_x and src_y for NV12, which
4026          * is to gesture the YouTube Android app into full screen
4027          * on ChromeOS.
4028          */
4029         if (state->fb &&
4030             state->fb->format->format == DRM_FORMAT_NV12 &&
4031             (scaling_info->src_rect.x != 0 ||
4032              scaling_info->src_rect.y != 0))
4033                 return -EINVAL;
4034
4035         scaling_info->src_rect.width = state->src_w >> 16;
4036         if (scaling_info->src_rect.width == 0)
4037                 return -EINVAL;
4038
4039         scaling_info->src_rect.height = state->src_h >> 16;
4040         if (scaling_info->src_rect.height == 0)
4041                 return -EINVAL;
4042
4043         scaling_info->dst_rect.x = state->crtc_x;
4044         scaling_info->dst_rect.y = state->crtc_y;
4045
4046         if (state->crtc_w == 0)
4047                 return -EINVAL;
4048
4049         scaling_info->dst_rect.width = state->crtc_w;
4050
4051         if (state->crtc_h == 0)
4052                 return -EINVAL;
4053
4054         scaling_info->dst_rect.height = state->crtc_h;
4055
4056         /* DRM doesn't specify clipping on destination output. */
4057         scaling_info->clip_rect = scaling_info->dst_rect;
4058
4059         /* Validate scaling per-format with DC plane caps */
4060         if (state->plane && state->plane->dev && state->fb) {
4061                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4062                                              &min_downscale, &max_upscale);
4063         } else {
4064                 min_downscale = 250;
4065                 max_upscale = 16000;
4066         }
4067
4068         scale_w = scaling_info->dst_rect.width * 1000 /
4069                   scaling_info->src_rect.width;
4070
4071         if (scale_w < min_downscale || scale_w > max_upscale)
4072                 return -EINVAL;
4073
4074         scale_h = scaling_info->dst_rect.height * 1000 /
4075                   scaling_info->src_rect.height;
4076
4077         if (scale_h < min_downscale || scale_h > max_upscale)
4078                 return -EINVAL;
4079
4080         /*
4081          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4082          * assume reasonable defaults based on the format.
4083          */
4084
4085         return 0;
4086 }
4087
4088 static void
4089 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4090                                  uint64_t tiling_flags)
4091 {
4092         /* Fill GFX8 params */
4093         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4094                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4095
4096                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4097                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4098                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4099                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4100                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4101
4102                 /* XXX fix me for VI */
4103                 tiling_info->gfx8.num_banks = num_banks;
4104                 tiling_info->gfx8.array_mode =
4105                                 DC_ARRAY_2D_TILED_THIN1;
4106                 tiling_info->gfx8.tile_split = tile_split;
4107                 tiling_info->gfx8.bank_width = bankw;
4108                 tiling_info->gfx8.bank_height = bankh;
4109                 tiling_info->gfx8.tile_aspect = mtaspect;
4110                 tiling_info->gfx8.tile_mode =
4111                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4112         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4113                         == DC_ARRAY_1D_TILED_THIN1) {
4114                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4115         }
4116
4117         tiling_info->gfx8.pipe_config =
4118                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4119 }
4120
4121 static void
4122 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4123                                   union dc_tiling_info *tiling_info)
4124 {
4125         tiling_info->gfx9.num_pipes =
4126                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4127         tiling_info->gfx9.num_banks =
4128                 adev->gfx.config.gb_addr_config_fields.num_banks;
4129         tiling_info->gfx9.pipe_interleave =
4130                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4131         tiling_info->gfx9.num_shader_engines =
4132                 adev->gfx.config.gb_addr_config_fields.num_se;
4133         tiling_info->gfx9.max_compressed_frags =
4134                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4135         tiling_info->gfx9.num_rb_per_se =
4136                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4137         tiling_info->gfx9.shaderEnable = 1;
4138         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4139             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4140             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4141             adev->asic_type == CHIP_VANGOGH)
4142                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4143 }
4144
4145 static int
4146 validate_dcc(struct amdgpu_device *adev,
4147              const enum surface_pixel_format format,
4148              const enum dc_rotation_angle rotation,
4149              const union dc_tiling_info *tiling_info,
4150              const struct dc_plane_dcc_param *dcc,
4151              const struct dc_plane_address *address,
4152              const struct plane_size *plane_size)
4153 {
4154         struct dc *dc = adev->dm.dc;
4155         struct dc_dcc_surface_param input;
4156         struct dc_surface_dcc_cap output;
4157
4158         memset(&input, 0, sizeof(input));
4159         memset(&output, 0, sizeof(output));
4160
4161         if (!dcc->enable)
4162                 return 0;
4163
4164         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4165             !dc->cap_funcs.get_dcc_compression_cap)
4166                 return -EINVAL;
4167
4168         input.format = format;
4169         input.surface_size.width = plane_size->surface_size.width;
4170         input.surface_size.height = plane_size->surface_size.height;
4171         input.swizzle_mode = tiling_info->gfx9.swizzle;
4172
4173         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4174                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4175         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4176                 input.scan = SCAN_DIRECTION_VERTICAL;
4177
4178         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4179                 return -EINVAL;
4180
4181         if (!output.capable)
4182                 return -EINVAL;
4183
4184         if (dcc->independent_64b_blks == 0 &&
4185             output.grph.rgb.independent_64b_blks != 0)
4186                 return -EINVAL;
4187
4188         return 0;
4189 }
4190
4191 static bool
4192 modifier_has_dcc(uint64_t modifier)
4193 {
4194         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4195 }
4196
4197 static unsigned
4198 modifier_gfx9_swizzle_mode(uint64_t modifier)
4199 {
4200         if (modifier == DRM_FORMAT_MOD_LINEAR)
4201                 return 0;
4202
4203         return AMD_FMT_MOD_GET(TILE, modifier);
4204 }
4205
4206 static const struct drm_format_info *
4207 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4208 {
4209         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4210 }
4211
4212 static void
4213 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4214                                     union dc_tiling_info *tiling_info,
4215                                     uint64_t modifier)
4216 {
4217         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4218         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4219         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4220         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4221
4222         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4223
4224         if (!IS_AMD_FMT_MOD(modifier))
4225                 return;
4226
4227         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4228         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4229
4230         if (adev->family >= AMDGPU_FAMILY_NV) {
4231                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4232         } else {
4233                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4234
4235                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4236         }
4237 }
4238
4239 enum dm_micro_swizzle {
4240         MICRO_SWIZZLE_Z = 0,
4241         MICRO_SWIZZLE_S = 1,
4242         MICRO_SWIZZLE_D = 2,
4243         MICRO_SWIZZLE_R = 3
4244 };
4245
4246 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4247                                           uint32_t format,
4248                                           uint64_t modifier)
4249 {
4250         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4251         const struct drm_format_info *info = drm_format_info(format);
4252         int i;
4253
4254         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4255
4256         if (!info)
4257                 return false;
4258
4259         /*
4260          * We always have to allow these modifiers:
4261          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4262          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4263          */
4264         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4265             modifier == DRM_FORMAT_MOD_INVALID) {
4266                 return true;
4267         }
4268
4269         /* Check that the modifier is on the list of the plane's supported modifiers. */
4270         for (i = 0; i < plane->modifier_count; i++) {
4271                 if (modifier == plane->modifiers[i])
4272                         break;
4273         }
4274         if (i == plane->modifier_count)
4275                 return false;
4276
4277         /*
4278          * For D swizzle the canonical modifier depends on the bpp, so check
4279          * it here.
4280          */
4281         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4282             adev->family >= AMDGPU_FAMILY_NV) {
4283                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4284                         return false;
4285         }
4286
4287         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4288             info->cpp[0] < 8)
4289                 return false;
4290
4291         if (modifier_has_dcc(modifier)) {
4292                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4293                 if (info->cpp[0] != 4)
4294                         return false;
4295                 /* We support multi-planar formats, but not when combined with
4296                  * additional DCC metadata planes. */
4297                 if (info->num_planes > 1)
4298                         return false;
4299         }
4300
4301         return true;
4302 }
4303
4304 static void
4305 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4306 {
4307         if (!*mods)
4308                 return;
4309
4310         if (*cap - *size < 1) {
4311                 uint64_t new_cap = *cap * 2;
4312                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4313
4314                 if (!new_mods) {
4315                         kfree(*mods);
4316                         *mods = NULL;
4317                         return;
4318                 }
4319
4320                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4321                 kfree(*mods);
4322                 *mods = new_mods;
4323                 *cap = new_cap;
4324         }
4325
4326         (*mods)[*size] = mod;
4327         *size += 1;
4328 }
4329
4330 static void
4331 add_gfx9_modifiers(const struct amdgpu_device *adev,
4332                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4333 {
4334         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4335         int pipe_xor_bits = min(8, pipes +
4336                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4337         int bank_xor_bits = min(8 - pipe_xor_bits,
4338                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4339         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4340                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4341
4342
4343         if (adev->family == AMDGPU_FAMILY_RV) {
4344                 /* Raven2 and later */
4345                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4346
4347                 /*
4348                  * No _D DCC swizzles yet because we only allow 32bpp, which
4349                  * doesn't support _D on DCN
4350                  */
4351
4352                 if (has_constant_encode) {
4353                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4354                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4355                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4356                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4357                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4358                                     AMD_FMT_MOD_SET(DCC, 1) |
4359                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4360                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4361                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4362                 }
4363
4364                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4365                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4366                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4367                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4368                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4369                             AMD_FMT_MOD_SET(DCC, 1) |
4370                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4371                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4372                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4373
4374                 if (has_constant_encode) {
4375                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4376                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4377                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4378                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4379                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4380                                     AMD_FMT_MOD_SET(DCC, 1) |
4381                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4382                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4383                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4384
4385                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4386                                     AMD_FMT_MOD_SET(RB, rb) |
4387                                     AMD_FMT_MOD_SET(PIPE, pipes));
4388                 }
4389
4390                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4391                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4392                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4393                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4394                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4395                             AMD_FMT_MOD_SET(DCC, 1) |
4396                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4397                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4398                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4399                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4400                             AMD_FMT_MOD_SET(RB, rb) |
4401                             AMD_FMT_MOD_SET(PIPE, pipes));
4402         }
4403
4404         /*
4405          * Only supported for 64bpp on Raven, will be filtered on format in
4406          * dm_plane_format_mod_supported.
4407          */
4408         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4409                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4410                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4411                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4412                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4413
4414         if (adev->family == AMDGPU_FAMILY_RV) {
4415                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4416                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4417                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4418                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4419                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4420         }
4421
4422         /*
4423          * Only supported for 64bpp on Raven, will be filtered on format in
4424          * dm_plane_format_mod_supported.
4425          */
4426         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4427                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4428                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4429
4430         if (adev->family == AMDGPU_FAMILY_RV) {
4431                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4432                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4433                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4434         }
4435 }
4436
4437 static void
4438 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4439                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4440 {
4441         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4442
4443         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4444                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4445                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4446                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4447                     AMD_FMT_MOD_SET(DCC, 1) |
4448                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4449                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4450                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4451
4452         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4453                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4454                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4455                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4456                     AMD_FMT_MOD_SET(DCC, 1) |
4457                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4458                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4459                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4460                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4461
4462         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4463                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4464                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4465                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4466
4467         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4468                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4469                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4470                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4471
4472
4473         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4474         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4475                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4476                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4477
4478         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4479                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4480                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4481 }
4482
4483 static void
4484 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4485                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4486 {
4487         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4488         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4489
4490         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4491                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4492                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4493                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4494                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4495                     AMD_FMT_MOD_SET(DCC, 1) |
4496                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4497                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4498                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4499                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4500
4501         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4502                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4503                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4504                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4505                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4506                     AMD_FMT_MOD_SET(DCC, 1) |
4507                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4508                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4509                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4510                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4511                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4512
4513         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4514                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4515                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4516                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4517                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4518
4519         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4520                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4521                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4522                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4523                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4524
4525         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4526         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4527                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4528                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4529
4530         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4531                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4532                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4533 }
4534
4535 static int
4536 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4537 {
4538         uint64_t size = 0, capacity = 128;
4539         *mods = NULL;
4540
4541         /* We have not hooked up any pre-GFX9 modifiers. */
4542         if (adev->family < AMDGPU_FAMILY_AI)
4543                 return 0;
4544
4545         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4546
4547         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4548                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4549                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4550                 return *mods ? 0 : -ENOMEM;
4551         }
4552
4553         switch (adev->family) {
4554         case AMDGPU_FAMILY_AI:
4555         case AMDGPU_FAMILY_RV:
4556                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4557                 break;
4558         case AMDGPU_FAMILY_NV:
4559         case AMDGPU_FAMILY_VGH:
4560                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4561                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4562                 else
4563                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4564                 break;
4565         }
4566
4567         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4568
4569         /* INVALID marks the end of the list. */
4570         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4571
4572         if (!*mods)
4573                 return -ENOMEM;
4574
4575         return 0;
4576 }
4577
4578 static int
4579 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4580                                           const struct amdgpu_framebuffer *afb,
4581                                           const enum surface_pixel_format format,
4582                                           const enum dc_rotation_angle rotation,
4583                                           const struct plane_size *plane_size,
4584                                           union dc_tiling_info *tiling_info,
4585                                           struct dc_plane_dcc_param *dcc,
4586                                           struct dc_plane_address *address,
4587                                           const bool force_disable_dcc)
4588 {
4589         const uint64_t modifier = afb->base.modifier;
4590         int ret;
4591
4592         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4593         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4594
4595         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4596                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4597
4598                 dcc->enable = 1;
4599                 dcc->meta_pitch = afb->base.pitches[1];
4600                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4601
4602                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4603                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4604         }
4605
4606         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4607         if (ret)
4608                 return ret;
4609
4610         return 0;
4611 }
4612
4613 static int
4614 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4615                              const struct amdgpu_framebuffer *afb,
4616                              const enum surface_pixel_format format,
4617                              const enum dc_rotation_angle rotation,
4618                              const uint64_t tiling_flags,
4619                              union dc_tiling_info *tiling_info,
4620                              struct plane_size *plane_size,
4621                              struct dc_plane_dcc_param *dcc,
4622                              struct dc_plane_address *address,
4623                              bool tmz_surface,
4624                              bool force_disable_dcc)
4625 {
4626         const struct drm_framebuffer *fb = &afb->base;
4627         int ret;
4628
4629         memset(tiling_info, 0, sizeof(*tiling_info));
4630         memset(plane_size, 0, sizeof(*plane_size));
4631         memset(dcc, 0, sizeof(*dcc));
4632         memset(address, 0, sizeof(*address));
4633
4634         address->tmz_surface = tmz_surface;
4635
4636         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4637                 uint64_t addr = afb->address + fb->offsets[0];
4638
4639                 plane_size->surface_size.x = 0;
4640                 plane_size->surface_size.y = 0;
4641                 plane_size->surface_size.width = fb->width;
4642                 plane_size->surface_size.height = fb->height;
4643                 plane_size->surface_pitch =
4644                         fb->pitches[0] / fb->format->cpp[0];
4645
4646                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4647                 address->grph.addr.low_part = lower_32_bits(addr);
4648                 address->grph.addr.high_part = upper_32_bits(addr);
4649         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4650                 uint64_t luma_addr = afb->address + fb->offsets[0];
4651                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4652
4653                 plane_size->surface_size.x = 0;
4654                 plane_size->surface_size.y = 0;
4655                 plane_size->surface_size.width = fb->width;
4656                 plane_size->surface_size.height = fb->height;
4657                 plane_size->surface_pitch =
4658                         fb->pitches[0] / fb->format->cpp[0];
4659
4660                 plane_size->chroma_size.x = 0;
4661                 plane_size->chroma_size.y = 0;
4662                 /* TODO: set these based on surface format */
4663                 plane_size->chroma_size.width = fb->width / 2;
4664                 plane_size->chroma_size.height = fb->height / 2;
4665
4666                 plane_size->chroma_pitch =
4667                         fb->pitches[1] / fb->format->cpp[1];
4668
4669                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4670                 address->video_progressive.luma_addr.low_part =
4671                         lower_32_bits(luma_addr);
4672                 address->video_progressive.luma_addr.high_part =
4673                         upper_32_bits(luma_addr);
4674                 address->video_progressive.chroma_addr.low_part =
4675                         lower_32_bits(chroma_addr);
4676                 address->video_progressive.chroma_addr.high_part =
4677                         upper_32_bits(chroma_addr);
4678         }
4679
4680         if (adev->family >= AMDGPU_FAMILY_AI) {
4681                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4682                                                                 rotation, plane_size,
4683                                                                 tiling_info, dcc,
4684                                                                 address,
4685                                                                 force_disable_dcc);
4686                 if (ret)
4687                         return ret;
4688         } else {
4689                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4690         }
4691
4692         return 0;
4693 }
4694
4695 static void
4696 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4697                                bool *per_pixel_alpha, bool *global_alpha,
4698                                int *global_alpha_value)
4699 {
4700         *per_pixel_alpha = false;
4701         *global_alpha = false;
4702         *global_alpha_value = 0xff;
4703
4704         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4705                 return;
4706
4707         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4708                 static const uint32_t alpha_formats[] = {
4709                         DRM_FORMAT_ARGB8888,
4710                         DRM_FORMAT_RGBA8888,
4711                         DRM_FORMAT_ABGR8888,
4712                 };
4713                 uint32_t format = plane_state->fb->format->format;
4714                 unsigned int i;
4715
4716                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4717                         if (format == alpha_formats[i]) {
4718                                 *per_pixel_alpha = true;
4719                                 break;
4720                         }
4721                 }
4722         }
4723
4724         if (plane_state->alpha < 0xffff) {
4725                 *global_alpha = true;
4726                 *global_alpha_value = plane_state->alpha >> 8;
4727         }
4728 }
4729
4730 static int
4731 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4732                             const enum surface_pixel_format format,
4733                             enum dc_color_space *color_space)
4734 {
4735         bool full_range;
4736
4737         *color_space = COLOR_SPACE_SRGB;
4738
4739         /* DRM color properties only affect non-RGB formats. */
4740         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4741                 return 0;
4742
4743         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4744
4745         switch (plane_state->color_encoding) {
4746         case DRM_COLOR_YCBCR_BT601:
4747                 if (full_range)
4748                         *color_space = COLOR_SPACE_YCBCR601;
4749                 else
4750                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4751                 break;
4752
4753         case DRM_COLOR_YCBCR_BT709:
4754                 if (full_range)
4755                         *color_space = COLOR_SPACE_YCBCR709;
4756                 else
4757                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4758                 break;
4759
4760         case DRM_COLOR_YCBCR_BT2020:
4761                 if (full_range)
4762                         *color_space = COLOR_SPACE_2020_YCBCR;
4763                 else
4764                         return -EINVAL;
4765                 break;
4766
4767         default:
4768                 return -EINVAL;
4769         }
4770
4771         return 0;
4772 }
4773
4774 static int
4775 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4776                             const struct drm_plane_state *plane_state,
4777                             const uint64_t tiling_flags,
4778                             struct dc_plane_info *plane_info,
4779                             struct dc_plane_address *address,
4780                             bool tmz_surface,
4781                             bool force_disable_dcc)
4782 {
4783         const struct drm_framebuffer *fb = plane_state->fb;
4784         const struct amdgpu_framebuffer *afb =
4785                 to_amdgpu_framebuffer(plane_state->fb);
4786         int ret;
4787
4788         memset(plane_info, 0, sizeof(*plane_info));
4789
4790         switch (fb->format->format) {
4791         case DRM_FORMAT_C8:
4792                 plane_info->format =
4793                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4794                 break;
4795         case DRM_FORMAT_RGB565:
4796                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4797                 break;
4798         case DRM_FORMAT_XRGB8888:
4799         case DRM_FORMAT_ARGB8888:
4800                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4801                 break;
4802         case DRM_FORMAT_XRGB2101010:
4803         case DRM_FORMAT_ARGB2101010:
4804                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4805                 break;
4806         case DRM_FORMAT_XBGR2101010:
4807         case DRM_FORMAT_ABGR2101010:
4808                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4809                 break;
4810         case DRM_FORMAT_XBGR8888:
4811         case DRM_FORMAT_ABGR8888:
4812                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4813                 break;
4814         case DRM_FORMAT_NV21:
4815                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4816                 break;
4817         case DRM_FORMAT_NV12:
4818                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4819                 break;
4820         case DRM_FORMAT_P010:
4821                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4822                 break;
4823         case DRM_FORMAT_XRGB16161616F:
4824         case DRM_FORMAT_ARGB16161616F:
4825                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4826                 break;
4827         case DRM_FORMAT_XBGR16161616F:
4828         case DRM_FORMAT_ABGR16161616F:
4829                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4830                 break;
4831         default:
4832                 DRM_ERROR(
4833                         "Unsupported screen format %p4cc\n",
4834                         &fb->format->format);
4835                 return -EINVAL;
4836         }
4837
4838         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4839         case DRM_MODE_ROTATE_0:
4840                 plane_info->rotation = ROTATION_ANGLE_0;
4841                 break;
4842         case DRM_MODE_ROTATE_90:
4843                 plane_info->rotation = ROTATION_ANGLE_90;
4844                 break;
4845         case DRM_MODE_ROTATE_180:
4846                 plane_info->rotation = ROTATION_ANGLE_180;
4847                 break;
4848         case DRM_MODE_ROTATE_270:
4849                 plane_info->rotation = ROTATION_ANGLE_270;
4850                 break;
4851         default:
4852                 plane_info->rotation = ROTATION_ANGLE_0;
4853                 break;
4854         }
4855
4856         plane_info->visible = true;
4857         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4858
4859         plane_info->layer_index = 0;
4860
4861         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4862                                           &plane_info->color_space);
4863         if (ret)
4864                 return ret;
4865
4866         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4867                                            plane_info->rotation, tiling_flags,
4868                                            &plane_info->tiling_info,
4869                                            &plane_info->plane_size,
4870                                            &plane_info->dcc, address, tmz_surface,
4871                                            force_disable_dcc);
4872         if (ret)
4873                 return ret;
4874
4875         fill_blending_from_plane_state(
4876                 plane_state, &plane_info->per_pixel_alpha,
4877                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4878
4879         return 0;
4880 }
4881
4882 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4883                                     struct dc_plane_state *dc_plane_state,
4884                                     struct drm_plane_state *plane_state,
4885                                     struct drm_crtc_state *crtc_state)
4886 {
4887         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4888         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4889         struct dc_scaling_info scaling_info;
4890         struct dc_plane_info plane_info;
4891         int ret;
4892         bool force_disable_dcc = false;
4893
4894         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4895         if (ret)
4896                 return ret;
4897
4898         dc_plane_state->src_rect = scaling_info.src_rect;
4899         dc_plane_state->dst_rect = scaling_info.dst_rect;
4900         dc_plane_state->clip_rect = scaling_info.clip_rect;
4901         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4902
4903         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4904         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4905                                           afb->tiling_flags,
4906                                           &plane_info,
4907                                           &dc_plane_state->address,
4908                                           afb->tmz_surface,
4909                                           force_disable_dcc);
4910         if (ret)
4911                 return ret;
4912
4913         dc_plane_state->format = plane_info.format;
4914         dc_plane_state->color_space = plane_info.color_space;
4915         dc_plane_state->format = plane_info.format;
4916         dc_plane_state->plane_size = plane_info.plane_size;
4917         dc_plane_state->rotation = plane_info.rotation;
4918         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4919         dc_plane_state->stereo_format = plane_info.stereo_format;
4920         dc_plane_state->tiling_info = plane_info.tiling_info;
4921         dc_plane_state->visible = plane_info.visible;
4922         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4923         dc_plane_state->global_alpha = plane_info.global_alpha;
4924         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4925         dc_plane_state->dcc = plane_info.dcc;
4926         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4927         dc_plane_state->flip_int_enabled = true;
4928
4929         /*
4930          * Always set input transfer function, since plane state is refreshed
4931          * every time.
4932          */
4933         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4934         if (ret)
4935                 return ret;
4936
4937         return 0;
4938 }
4939
4940 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4941                                            const struct dm_connector_state *dm_state,
4942                                            struct dc_stream_state *stream)
4943 {
4944         enum amdgpu_rmx_type rmx_type;
4945
4946         struct rect src = { 0 }; /* viewport in composition space*/
4947         struct rect dst = { 0 }; /* stream addressable area */
4948
4949         /* no mode. nothing to be done */
4950         if (!mode)
4951                 return;
4952
4953         /* Full screen scaling by default */
4954         src.width = mode->hdisplay;
4955         src.height = mode->vdisplay;
4956         dst.width = stream->timing.h_addressable;
4957         dst.height = stream->timing.v_addressable;
4958
4959         if (dm_state) {
4960                 rmx_type = dm_state->scaling;
4961                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4962                         if (src.width * dst.height <
4963                                         src.height * dst.width) {
4964                                 /* height needs less upscaling/more downscaling */
4965                                 dst.width = src.width *
4966                                                 dst.height / src.height;
4967                         } else {
4968                                 /* width needs less upscaling/more downscaling */
4969                                 dst.height = src.height *
4970                                                 dst.width / src.width;
4971                         }
4972                 } else if (rmx_type == RMX_CENTER) {
4973                         dst = src;
4974                 }
4975
4976                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4977                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4978
4979                 if (dm_state->underscan_enable) {
4980                         dst.x += dm_state->underscan_hborder / 2;
4981                         dst.y += dm_state->underscan_vborder / 2;
4982                         dst.width -= dm_state->underscan_hborder;
4983                         dst.height -= dm_state->underscan_vborder;
4984                 }
4985         }
4986
4987         stream->src = src;
4988         stream->dst = dst;
4989
4990         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4991                       dst.x, dst.y, dst.width, dst.height);
4992
4993 }
4994
4995 static enum dc_color_depth
4996 convert_color_depth_from_display_info(const struct drm_connector *connector,
4997                                       bool is_y420, int requested_bpc)
4998 {
4999         uint8_t bpc;
5000
5001         if (is_y420) {
5002                 bpc = 8;
5003
5004                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5005                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5006                         bpc = 16;
5007                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5008                         bpc = 12;
5009                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5010                         bpc = 10;
5011         } else {
5012                 bpc = (uint8_t)connector->display_info.bpc;
5013                 /* Assume 8 bpc by default if no bpc is specified. */
5014                 bpc = bpc ? bpc : 8;
5015         }
5016
5017         if (requested_bpc > 0) {
5018                 /*
5019                  * Cap display bpc based on the user requested value.
5020                  *
5021                  * The value for state->max_bpc may not correctly updated
5022                  * depending on when the connector gets added to the state
5023                  * or if this was called outside of atomic check, so it
5024                  * can't be used directly.
5025                  */
5026                 bpc = min_t(u8, bpc, requested_bpc);
5027
5028                 /* Round down to the nearest even number. */
5029                 bpc = bpc - (bpc & 1);
5030         }
5031
5032         switch (bpc) {
5033         case 0:
5034                 /*
5035                  * Temporary Work around, DRM doesn't parse color depth for
5036                  * EDID revision before 1.4
5037                  * TODO: Fix edid parsing
5038                  */
5039                 return COLOR_DEPTH_888;
5040         case 6:
5041                 return COLOR_DEPTH_666;
5042         case 8:
5043                 return COLOR_DEPTH_888;
5044         case 10:
5045                 return COLOR_DEPTH_101010;
5046         case 12:
5047                 return COLOR_DEPTH_121212;
5048         case 14:
5049                 return COLOR_DEPTH_141414;
5050         case 16:
5051                 return COLOR_DEPTH_161616;
5052         default:
5053                 return COLOR_DEPTH_UNDEFINED;
5054         }
5055 }
5056
5057 static enum dc_aspect_ratio
5058 get_aspect_ratio(const struct drm_display_mode *mode_in)
5059 {
5060         /* 1-1 mapping, since both enums follow the HDMI spec. */
5061         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5062 }
5063
5064 static enum dc_color_space
5065 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5066 {
5067         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5068
5069         switch (dc_crtc_timing->pixel_encoding) {
5070         case PIXEL_ENCODING_YCBCR422:
5071         case PIXEL_ENCODING_YCBCR444:
5072         case PIXEL_ENCODING_YCBCR420:
5073         {
5074                 /*
5075                  * 27030khz is the separation point between HDTV and SDTV
5076                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5077                  * respectively
5078                  */
5079                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5080                         if (dc_crtc_timing->flags.Y_ONLY)
5081                                 color_space =
5082                                         COLOR_SPACE_YCBCR709_LIMITED;
5083                         else
5084                                 color_space = COLOR_SPACE_YCBCR709;
5085                 } else {
5086                         if (dc_crtc_timing->flags.Y_ONLY)
5087                                 color_space =
5088                                         COLOR_SPACE_YCBCR601_LIMITED;
5089                         else
5090                                 color_space = COLOR_SPACE_YCBCR601;
5091                 }
5092
5093         }
5094         break;
5095         case PIXEL_ENCODING_RGB:
5096                 color_space = COLOR_SPACE_SRGB;
5097                 break;
5098
5099         default:
5100                 WARN_ON(1);
5101                 break;
5102         }
5103
5104         return color_space;
5105 }
5106
5107 static bool adjust_colour_depth_from_display_info(
5108         struct dc_crtc_timing *timing_out,
5109         const struct drm_display_info *info)
5110 {
5111         enum dc_color_depth depth = timing_out->display_color_depth;
5112         int normalized_clk;
5113         do {
5114                 normalized_clk = timing_out->pix_clk_100hz / 10;
5115                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5116                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5117                         normalized_clk /= 2;
5118                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5119                 switch (depth) {
5120                 case COLOR_DEPTH_888:
5121                         break;
5122                 case COLOR_DEPTH_101010:
5123                         normalized_clk = (normalized_clk * 30) / 24;
5124                         break;
5125                 case COLOR_DEPTH_121212:
5126                         normalized_clk = (normalized_clk * 36) / 24;
5127                         break;
5128                 case COLOR_DEPTH_161616:
5129                         normalized_clk = (normalized_clk * 48) / 24;
5130                         break;
5131                 default:
5132                         /* The above depths are the only ones valid for HDMI. */
5133                         return false;
5134                 }
5135                 if (normalized_clk <= info->max_tmds_clock) {
5136                         timing_out->display_color_depth = depth;
5137                         return true;
5138                 }
5139         } while (--depth > COLOR_DEPTH_666);
5140         return false;
5141 }
5142
5143 static void fill_stream_properties_from_drm_display_mode(
5144         struct dc_stream_state *stream,
5145         const struct drm_display_mode *mode_in,
5146         const struct drm_connector *connector,
5147         const struct drm_connector_state *connector_state,
5148         const struct dc_stream_state *old_stream,
5149         int requested_bpc)
5150 {
5151         struct dc_crtc_timing *timing_out = &stream->timing;
5152         const struct drm_display_info *info = &connector->display_info;
5153         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5154         struct hdmi_vendor_infoframe hv_frame;
5155         struct hdmi_avi_infoframe avi_frame;
5156
5157         memset(&hv_frame, 0, sizeof(hv_frame));
5158         memset(&avi_frame, 0, sizeof(avi_frame));
5159
5160         timing_out->h_border_left = 0;
5161         timing_out->h_border_right = 0;
5162         timing_out->v_border_top = 0;
5163         timing_out->v_border_bottom = 0;
5164         /* TODO: un-hardcode */
5165         if (drm_mode_is_420_only(info, mode_in)
5166                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5167                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5168         else if (drm_mode_is_420_also(info, mode_in)
5169                         && aconnector->force_yuv420_output)
5170                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5171         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5172                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5173                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5174         else
5175                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5176
5177         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5178         timing_out->display_color_depth = convert_color_depth_from_display_info(
5179                 connector,
5180                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5181                 requested_bpc);
5182         timing_out->scan_type = SCANNING_TYPE_NODATA;
5183         timing_out->hdmi_vic = 0;
5184
5185         if(old_stream) {
5186                 timing_out->vic = old_stream->timing.vic;
5187                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5188                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5189         } else {
5190                 timing_out->vic = drm_match_cea_mode(mode_in);
5191                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5192                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5193                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5194                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5195         }
5196
5197         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5198                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5199                 timing_out->vic = avi_frame.video_code;
5200                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5201                 timing_out->hdmi_vic = hv_frame.vic;
5202         }
5203
5204         if (is_freesync_video_mode(mode_in, aconnector)) {
5205                 timing_out->h_addressable = mode_in->hdisplay;
5206                 timing_out->h_total = mode_in->htotal;
5207                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5208                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5209                 timing_out->v_total = mode_in->vtotal;
5210                 timing_out->v_addressable = mode_in->vdisplay;
5211                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5212                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5213                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5214         } else {
5215                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5216                 timing_out->h_total = mode_in->crtc_htotal;
5217                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5218                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5219                 timing_out->v_total = mode_in->crtc_vtotal;
5220                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5221                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5222                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5223                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5224         }
5225
5226         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5227
5228         stream->output_color_space = get_output_color_space(timing_out);
5229
5230         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5231         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5232         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5233                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5234                     drm_mode_is_420_also(info, mode_in) &&
5235                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5236                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5237                         adjust_colour_depth_from_display_info(timing_out, info);
5238                 }
5239         }
5240 }
5241
5242 static void fill_audio_info(struct audio_info *audio_info,
5243                             const struct drm_connector *drm_connector,
5244                             const struct dc_sink *dc_sink)
5245 {
5246         int i = 0;
5247         int cea_revision = 0;
5248         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5249
5250         audio_info->manufacture_id = edid_caps->manufacturer_id;
5251         audio_info->product_id = edid_caps->product_id;
5252
5253         cea_revision = drm_connector->display_info.cea_rev;
5254
5255         strscpy(audio_info->display_name,
5256                 edid_caps->display_name,
5257                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5258
5259         if (cea_revision >= 3) {
5260                 audio_info->mode_count = edid_caps->audio_mode_count;
5261
5262                 for (i = 0; i < audio_info->mode_count; ++i) {
5263                         audio_info->modes[i].format_code =
5264                                         (enum audio_format_code)
5265                                         (edid_caps->audio_modes[i].format_code);
5266                         audio_info->modes[i].channel_count =
5267                                         edid_caps->audio_modes[i].channel_count;
5268                         audio_info->modes[i].sample_rates.all =
5269                                         edid_caps->audio_modes[i].sample_rate;
5270                         audio_info->modes[i].sample_size =
5271                                         edid_caps->audio_modes[i].sample_size;
5272                 }
5273         }
5274
5275         audio_info->flags.all = edid_caps->speaker_flags;
5276
5277         /* TODO: We only check for the progressive mode, check for interlace mode too */
5278         if (drm_connector->latency_present[0]) {
5279                 audio_info->video_latency = drm_connector->video_latency[0];
5280                 audio_info->audio_latency = drm_connector->audio_latency[0];
5281         }
5282
5283         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5284
5285 }
5286
5287 static void
5288 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5289                                       struct drm_display_mode *dst_mode)
5290 {
5291         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5292         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5293         dst_mode->crtc_clock = src_mode->crtc_clock;
5294         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5295         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5296         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5297         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5298         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5299         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5300         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5301         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5302         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5303         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5304         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5305 }
5306
5307 static void
5308 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5309                                         const struct drm_display_mode *native_mode,
5310                                         bool scale_enabled)
5311 {
5312         if (scale_enabled) {
5313                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5314         } else if (native_mode->clock == drm_mode->clock &&
5315                         native_mode->htotal == drm_mode->htotal &&
5316                         native_mode->vtotal == drm_mode->vtotal) {
5317                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5318         } else {
5319                 /* no scaling nor amdgpu inserted, no need to patch */
5320         }
5321 }
5322
5323 static struct dc_sink *
5324 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5325 {
5326         struct dc_sink_init_data sink_init_data = { 0 };
5327         struct dc_sink *sink = NULL;
5328         sink_init_data.link = aconnector->dc_link;
5329         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5330
5331         sink = dc_sink_create(&sink_init_data);
5332         if (!sink) {
5333                 DRM_ERROR("Failed to create sink!\n");
5334                 return NULL;
5335         }
5336         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5337
5338         return sink;
5339 }
5340
5341 static void set_multisync_trigger_params(
5342                 struct dc_stream_state *stream)
5343 {
5344         struct dc_stream_state *master = NULL;
5345
5346         if (stream->triggered_crtc_reset.enabled) {
5347                 master = stream->triggered_crtc_reset.event_source;
5348                 stream->triggered_crtc_reset.event =
5349                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5350                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5351                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5352         }
5353 }
5354
5355 static void set_master_stream(struct dc_stream_state *stream_set[],
5356                               int stream_count)
5357 {
5358         int j, highest_rfr = 0, master_stream = 0;
5359
5360         for (j = 0;  j < stream_count; j++) {
5361                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5362                         int refresh_rate = 0;
5363
5364                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5365                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5366                         if (refresh_rate > highest_rfr) {
5367                                 highest_rfr = refresh_rate;
5368                                 master_stream = j;
5369                         }
5370                 }
5371         }
5372         for (j = 0;  j < stream_count; j++) {
5373                 if (stream_set[j])
5374                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5375         }
5376 }
5377
5378 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5379 {
5380         int i = 0;
5381         struct dc_stream_state *stream;
5382
5383         if (context->stream_count < 2)
5384                 return;
5385         for (i = 0; i < context->stream_count ; i++) {
5386                 if (!context->streams[i])
5387                         continue;
5388                 /*
5389                  * TODO: add a function to read AMD VSDB bits and set
5390                  * crtc_sync_master.multi_sync_enabled flag
5391                  * For now it's set to false
5392                  */
5393         }
5394
5395         set_master_stream(context->streams, context->stream_count);
5396
5397         for (i = 0; i < context->stream_count ; i++) {
5398                 stream = context->streams[i];
5399
5400                 if (!stream)
5401                         continue;
5402
5403                 set_multisync_trigger_params(stream);
5404         }
5405 }
5406
5407 static struct drm_display_mode *
5408 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5409                           bool use_probed_modes)
5410 {
5411         struct drm_display_mode *m, *m_pref = NULL;
5412         u16 current_refresh, highest_refresh;
5413         struct list_head *list_head = use_probed_modes ?
5414                                                     &aconnector->base.probed_modes :
5415                                                     &aconnector->base.modes;
5416
5417         if (aconnector->freesync_vid_base.clock != 0)
5418                 return &aconnector->freesync_vid_base;
5419
5420         /* Find the preferred mode */
5421         list_for_each_entry (m, list_head, head) {
5422                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5423                         m_pref = m;
5424                         break;
5425                 }
5426         }
5427
5428         if (!m_pref) {
5429                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5430                 m_pref = list_first_entry_or_null(
5431                         &aconnector->base.modes, struct drm_display_mode, head);
5432                 if (!m_pref) {
5433                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5434                         return NULL;
5435                 }
5436         }
5437
5438         highest_refresh = drm_mode_vrefresh(m_pref);
5439
5440         /*
5441          * Find the mode with highest refresh rate with same resolution.
5442          * For some monitors, preferred mode is not the mode with highest
5443          * supported refresh rate.
5444          */
5445         list_for_each_entry (m, list_head, head) {
5446                 current_refresh  = drm_mode_vrefresh(m);
5447
5448                 if (m->hdisplay == m_pref->hdisplay &&
5449                     m->vdisplay == m_pref->vdisplay &&
5450                     highest_refresh < current_refresh) {
5451                         highest_refresh = current_refresh;
5452                         m_pref = m;
5453                 }
5454         }
5455
5456         aconnector->freesync_vid_base = *m_pref;
5457         return m_pref;
5458 }
5459
5460 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5461                                    struct amdgpu_dm_connector *aconnector)
5462 {
5463         struct drm_display_mode *high_mode;
5464         int timing_diff;
5465
5466         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5467         if (!high_mode || !mode)
5468                 return false;
5469
5470         timing_diff = high_mode->vtotal - mode->vtotal;
5471
5472         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5473             high_mode->hdisplay != mode->hdisplay ||
5474             high_mode->vdisplay != mode->vdisplay ||
5475             high_mode->hsync_start != mode->hsync_start ||
5476             high_mode->hsync_end != mode->hsync_end ||
5477             high_mode->htotal != mode->htotal ||
5478             high_mode->hskew != mode->hskew ||
5479             high_mode->vscan != mode->vscan ||
5480             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5481             high_mode->vsync_end - mode->vsync_end != timing_diff)
5482                 return false;
5483         else
5484                 return true;
5485 }
5486
5487 static struct dc_stream_state *
5488 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5489                        const struct drm_display_mode *drm_mode,
5490                        const struct dm_connector_state *dm_state,
5491                        const struct dc_stream_state *old_stream,
5492                        int requested_bpc)
5493 {
5494         struct drm_display_mode *preferred_mode = NULL;
5495         struct drm_connector *drm_connector;
5496         const struct drm_connector_state *con_state =
5497                 dm_state ? &dm_state->base : NULL;
5498         struct dc_stream_state *stream = NULL;
5499         struct drm_display_mode mode = *drm_mode;
5500         struct drm_display_mode saved_mode;
5501         struct drm_display_mode *freesync_mode = NULL;
5502         bool native_mode_found = false;
5503         bool recalculate_timing = false;
5504         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5505         int mode_refresh;
5506         int preferred_refresh = 0;
5507 #if defined(CONFIG_DRM_AMD_DC_DCN)
5508         struct dsc_dec_dpcd_caps dsc_caps;
5509         uint32_t link_bandwidth_kbps;
5510 #endif
5511         struct dc_sink *sink = NULL;
5512
5513         memset(&saved_mode, 0, sizeof(saved_mode));
5514
5515         if (aconnector == NULL) {
5516                 DRM_ERROR("aconnector is NULL!\n");
5517                 return stream;
5518         }
5519
5520         drm_connector = &aconnector->base;
5521
5522         if (!aconnector->dc_sink) {
5523                 sink = create_fake_sink(aconnector);
5524                 if (!sink)
5525                         return stream;
5526         } else {
5527                 sink = aconnector->dc_sink;
5528                 dc_sink_retain(sink);
5529         }
5530
5531         stream = dc_create_stream_for_sink(sink);
5532
5533         if (stream == NULL) {
5534                 DRM_ERROR("Failed to create stream for sink!\n");
5535                 goto finish;
5536         }
5537
5538         stream->dm_stream_context = aconnector;
5539
5540         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5541                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5542
5543         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5544                 /* Search for preferred mode */
5545                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5546                         native_mode_found = true;
5547                         break;
5548                 }
5549         }
5550         if (!native_mode_found)
5551                 preferred_mode = list_first_entry_or_null(
5552                                 &aconnector->base.modes,
5553                                 struct drm_display_mode,
5554                                 head);
5555
5556         mode_refresh = drm_mode_vrefresh(&mode);
5557
5558         if (preferred_mode == NULL) {
5559                 /*
5560                  * This may not be an error, the use case is when we have no
5561                  * usermode calls to reset and set mode upon hotplug. In this
5562                  * case, we call set mode ourselves to restore the previous mode
5563                  * and the modelist may not be filled in in time.
5564                  */
5565                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5566         } else {
5567                 recalculate_timing = amdgpu_freesync_vid_mode &&
5568                                  is_freesync_video_mode(&mode, aconnector);
5569                 if (recalculate_timing) {
5570                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5571                         saved_mode = mode;
5572                         mode = *freesync_mode;
5573                 } else {
5574                         decide_crtc_timing_for_drm_display_mode(
5575                                 &mode, preferred_mode, scale);
5576
5577                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
5578                 }
5579         }
5580
5581         if (recalculate_timing)
5582                 drm_mode_set_crtcinfo(&saved_mode, 0);
5583         else if (!dm_state)
5584                 drm_mode_set_crtcinfo(&mode, 0);
5585
5586        /*
5587         * If scaling is enabled and refresh rate didn't change
5588         * we copy the vic and polarities of the old timings
5589         */
5590         if (!scale || mode_refresh != preferred_refresh)
5591                 fill_stream_properties_from_drm_display_mode(
5592                         stream, &mode, &aconnector->base, con_state, NULL,
5593                         requested_bpc);
5594         else
5595                 fill_stream_properties_from_drm_display_mode(
5596                         stream, &mode, &aconnector->base, con_state, old_stream,
5597                         requested_bpc);
5598
5599         stream->timing.flags.DSC = 0;
5600
5601         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5602 #if defined(CONFIG_DRM_AMD_DC_DCN)
5603                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5604                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5605                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5606                                       &dsc_caps);
5607                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5608                                                              dc_link_get_link_cap(aconnector->dc_link));
5609
5610                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5611                         /* Set DSC policy according to dsc_clock_en */
5612                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5613                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5614
5615                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5616                                                   &dsc_caps,
5617                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5618                                                   0,
5619                                                   link_bandwidth_kbps,
5620                                                   &stream->timing,
5621                                                   &stream->timing.dsc_cfg))
5622                                 stream->timing.flags.DSC = 1;
5623                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5624                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5625                                 stream->timing.flags.DSC = 1;
5626
5627                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5628                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5629
5630                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5631                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5632
5633                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5634                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5635                 }
5636 #endif
5637         }
5638
5639         update_stream_scaling_settings(&mode, dm_state, stream);
5640
5641         fill_audio_info(
5642                 &stream->audio_info,
5643                 drm_connector,
5644                 sink);
5645
5646         update_stream_signal(stream, sink);
5647
5648         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5649                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5650
5651         if (stream->link->psr_settings.psr_feature_enabled) {
5652                 //
5653                 // should decide stream support vsc sdp colorimetry capability
5654                 // before building vsc info packet
5655                 //
5656                 stream->use_vsc_sdp_for_colorimetry = false;
5657                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5658                         stream->use_vsc_sdp_for_colorimetry =
5659                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5660                 } else {
5661                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5662                                 stream->use_vsc_sdp_for_colorimetry = true;
5663                 }
5664                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5665         }
5666 finish:
5667         dc_sink_release(sink);
5668
5669         return stream;
5670 }
5671
5672 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5673 {
5674         drm_crtc_cleanup(crtc);
5675         kfree(crtc);
5676 }
5677
5678 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5679                                   struct drm_crtc_state *state)
5680 {
5681         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5682
5683         /* TODO Destroy dc_stream objects are stream object is flattened */
5684         if (cur->stream)
5685                 dc_stream_release(cur->stream);
5686
5687
5688         __drm_atomic_helper_crtc_destroy_state(state);
5689
5690
5691         kfree(state);
5692 }
5693
5694 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5695 {
5696         struct dm_crtc_state *state;
5697
5698         if (crtc->state)
5699                 dm_crtc_destroy_state(crtc, crtc->state);
5700
5701         state = kzalloc(sizeof(*state), GFP_KERNEL);
5702         if (WARN_ON(!state))
5703                 return;
5704
5705         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5706 }
5707
5708 static struct drm_crtc_state *
5709 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5710 {
5711         struct dm_crtc_state *state, *cur;
5712
5713         cur = to_dm_crtc_state(crtc->state);
5714
5715         if (WARN_ON(!crtc->state))
5716                 return NULL;
5717
5718         state = kzalloc(sizeof(*state), GFP_KERNEL);
5719         if (!state)
5720                 return NULL;
5721
5722         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5723
5724         if (cur->stream) {
5725                 state->stream = cur->stream;
5726                 dc_stream_retain(state->stream);
5727         }
5728
5729         state->active_planes = cur->active_planes;
5730         state->vrr_infopacket = cur->vrr_infopacket;
5731         state->abm_level = cur->abm_level;
5732         state->vrr_supported = cur->vrr_supported;
5733         state->freesync_config = cur->freesync_config;
5734         state->cm_has_degamma = cur->cm_has_degamma;
5735         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5736         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5737
5738         return &state->base;
5739 }
5740
5741 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5742 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5743 {
5744         crtc_debugfs_init(crtc);
5745
5746         return 0;
5747 }
5748 #endif
5749
5750 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5751 {
5752         enum dc_irq_source irq_source;
5753         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5754         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5755         int rc;
5756
5757         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5758
5759         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5760
5761         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5762                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5763         return rc;
5764 }
5765
5766 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5767 {
5768         enum dc_irq_source irq_source;
5769         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5770         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5771         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5772 #if defined(CONFIG_DRM_AMD_DC_DCN)
5773         struct amdgpu_display_manager *dm = &adev->dm;
5774         unsigned long flags;
5775 #endif
5776         int rc = 0;
5777
5778         if (enable) {
5779                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5780                 if (amdgpu_dm_vrr_active(acrtc_state))
5781                         rc = dm_set_vupdate_irq(crtc, true);
5782         } else {
5783                 /* vblank irq off -> vupdate irq off */
5784                 rc = dm_set_vupdate_irq(crtc, false);
5785         }
5786
5787         if (rc)
5788                 return rc;
5789
5790         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5791
5792         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5793                 return -EBUSY;
5794
5795         if (amdgpu_in_reset(adev))
5796                 return 0;
5797
5798 #if defined(CONFIG_DRM_AMD_DC_DCN)
5799         spin_lock_irqsave(&dm->vblank_lock, flags);
5800         dm->vblank_workqueue->dm = dm;
5801         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5802         dm->vblank_workqueue->enable = enable;
5803         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5804         schedule_work(&dm->vblank_workqueue->mall_work);
5805 #endif
5806
5807         return 0;
5808 }
5809
5810 static int dm_enable_vblank(struct drm_crtc *crtc)
5811 {
5812         return dm_set_vblank(crtc, true);
5813 }
5814
5815 static void dm_disable_vblank(struct drm_crtc *crtc)
5816 {
5817         dm_set_vblank(crtc, false);
5818 }
5819
5820 /* Implemented only the options currently availible for the driver */
5821 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5822         .reset = dm_crtc_reset_state,
5823         .destroy = amdgpu_dm_crtc_destroy,
5824         .set_config = drm_atomic_helper_set_config,
5825         .page_flip = drm_atomic_helper_page_flip,
5826         .atomic_duplicate_state = dm_crtc_duplicate_state,
5827         .atomic_destroy_state = dm_crtc_destroy_state,
5828         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5829         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5830         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5831         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5832         .enable_vblank = dm_enable_vblank,
5833         .disable_vblank = dm_disable_vblank,
5834         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5835 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5836         .late_register = amdgpu_dm_crtc_late_register,
5837 #endif
5838 };
5839
5840 static enum drm_connector_status
5841 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5842 {
5843         bool connected;
5844         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5845
5846         /*
5847          * Notes:
5848          * 1. This interface is NOT called in context of HPD irq.
5849          * 2. This interface *is called* in context of user-mode ioctl. Which
5850          * makes it a bad place for *any* MST-related activity.
5851          */
5852
5853         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5854             !aconnector->fake_enable)
5855                 connected = (aconnector->dc_sink != NULL);
5856         else
5857                 connected = (aconnector->base.force == DRM_FORCE_ON);
5858
5859         update_subconnector_property(aconnector);
5860
5861         return (connected ? connector_status_connected :
5862                         connector_status_disconnected);
5863 }
5864
5865 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5866                                             struct drm_connector_state *connector_state,
5867                                             struct drm_property *property,
5868                                             uint64_t val)
5869 {
5870         struct drm_device *dev = connector->dev;
5871         struct amdgpu_device *adev = drm_to_adev(dev);
5872         struct dm_connector_state *dm_old_state =
5873                 to_dm_connector_state(connector->state);
5874         struct dm_connector_state *dm_new_state =
5875                 to_dm_connector_state(connector_state);
5876
5877         int ret = -EINVAL;
5878
5879         if (property == dev->mode_config.scaling_mode_property) {
5880                 enum amdgpu_rmx_type rmx_type;
5881
5882                 switch (val) {
5883                 case DRM_MODE_SCALE_CENTER:
5884                         rmx_type = RMX_CENTER;
5885                         break;
5886                 case DRM_MODE_SCALE_ASPECT:
5887                         rmx_type = RMX_ASPECT;
5888                         break;
5889                 case DRM_MODE_SCALE_FULLSCREEN:
5890                         rmx_type = RMX_FULL;
5891                         break;
5892                 case DRM_MODE_SCALE_NONE:
5893                 default:
5894                         rmx_type = RMX_OFF;
5895                         break;
5896                 }
5897
5898                 if (dm_old_state->scaling == rmx_type)
5899                         return 0;
5900
5901                 dm_new_state->scaling = rmx_type;
5902                 ret = 0;
5903         } else if (property == adev->mode_info.underscan_hborder_property) {
5904                 dm_new_state->underscan_hborder = val;
5905                 ret = 0;
5906         } else if (property == adev->mode_info.underscan_vborder_property) {
5907                 dm_new_state->underscan_vborder = val;
5908                 ret = 0;
5909         } else if (property == adev->mode_info.underscan_property) {
5910                 dm_new_state->underscan_enable = val;
5911                 ret = 0;
5912         } else if (property == adev->mode_info.abm_level_property) {
5913                 dm_new_state->abm_level = val;
5914                 ret = 0;
5915         }
5916
5917         return ret;
5918 }
5919
5920 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5921                                             const struct drm_connector_state *state,
5922                                             struct drm_property *property,
5923                                             uint64_t *val)
5924 {
5925         struct drm_device *dev = connector->dev;
5926         struct amdgpu_device *adev = drm_to_adev(dev);
5927         struct dm_connector_state *dm_state =
5928                 to_dm_connector_state(state);
5929         int ret = -EINVAL;
5930
5931         if (property == dev->mode_config.scaling_mode_property) {
5932                 switch (dm_state->scaling) {
5933                 case RMX_CENTER:
5934                         *val = DRM_MODE_SCALE_CENTER;
5935                         break;
5936                 case RMX_ASPECT:
5937                         *val = DRM_MODE_SCALE_ASPECT;
5938                         break;
5939                 case RMX_FULL:
5940                         *val = DRM_MODE_SCALE_FULLSCREEN;
5941                         break;
5942                 case RMX_OFF:
5943                 default:
5944                         *val = DRM_MODE_SCALE_NONE;
5945                         break;
5946                 }
5947                 ret = 0;
5948         } else if (property == adev->mode_info.underscan_hborder_property) {
5949                 *val = dm_state->underscan_hborder;
5950                 ret = 0;
5951         } else if (property == adev->mode_info.underscan_vborder_property) {
5952                 *val = dm_state->underscan_vborder;
5953                 ret = 0;
5954         } else if (property == adev->mode_info.underscan_property) {
5955                 *val = dm_state->underscan_enable;
5956                 ret = 0;
5957         } else if (property == adev->mode_info.abm_level_property) {
5958                 *val = dm_state->abm_level;
5959                 ret = 0;
5960         }
5961
5962         return ret;
5963 }
5964
5965 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5966 {
5967         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5968
5969         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5970 }
5971
5972 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5973 {
5974         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5975         const struct dc_link *link = aconnector->dc_link;
5976         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5977         struct amdgpu_display_manager *dm = &adev->dm;
5978
5979         /*
5980          * Call only if mst_mgr was iniitalized before since it's not done
5981          * for all connector types.
5982          */
5983         if (aconnector->mst_mgr.dev)
5984                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5985
5986 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5987         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5988
5989         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5990             link->type != dc_connection_none &&
5991             dm->backlight_dev) {
5992                 backlight_device_unregister(dm->backlight_dev);
5993                 dm->backlight_dev = NULL;
5994         }
5995 #endif
5996
5997         if (aconnector->dc_em_sink)
5998                 dc_sink_release(aconnector->dc_em_sink);
5999         aconnector->dc_em_sink = NULL;
6000         if (aconnector->dc_sink)
6001                 dc_sink_release(aconnector->dc_sink);
6002         aconnector->dc_sink = NULL;
6003
6004         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6005         drm_connector_unregister(connector);
6006         drm_connector_cleanup(connector);
6007         if (aconnector->i2c) {
6008                 i2c_del_adapter(&aconnector->i2c->base);
6009                 kfree(aconnector->i2c);
6010         }
6011         kfree(aconnector->dm_dp_aux.aux.name);
6012
6013         kfree(connector);
6014 }
6015
6016 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6017 {
6018         struct dm_connector_state *state =
6019                 to_dm_connector_state(connector->state);
6020
6021         if (connector->state)
6022                 __drm_atomic_helper_connector_destroy_state(connector->state);
6023
6024         kfree(state);
6025
6026         state = kzalloc(sizeof(*state), GFP_KERNEL);
6027
6028         if (state) {
6029                 state->scaling = RMX_OFF;
6030                 state->underscan_enable = false;
6031                 state->underscan_hborder = 0;
6032                 state->underscan_vborder = 0;
6033                 state->base.max_requested_bpc = 8;
6034                 state->vcpi_slots = 0;
6035                 state->pbn = 0;
6036                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6037                         state->abm_level = amdgpu_dm_abm_level;
6038
6039                 __drm_atomic_helper_connector_reset(connector, &state->base);
6040         }
6041 }
6042
6043 struct drm_connector_state *
6044 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6045 {
6046         struct dm_connector_state *state =
6047                 to_dm_connector_state(connector->state);
6048
6049         struct dm_connector_state *new_state =
6050                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6051
6052         if (!new_state)
6053                 return NULL;
6054
6055         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6056
6057         new_state->freesync_capable = state->freesync_capable;
6058         new_state->abm_level = state->abm_level;
6059         new_state->scaling = state->scaling;
6060         new_state->underscan_enable = state->underscan_enable;
6061         new_state->underscan_hborder = state->underscan_hborder;
6062         new_state->underscan_vborder = state->underscan_vborder;
6063         new_state->vcpi_slots = state->vcpi_slots;
6064         new_state->pbn = state->pbn;
6065         return &new_state->base;
6066 }
6067
6068 static int
6069 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6070 {
6071         struct amdgpu_dm_connector *amdgpu_dm_connector =
6072                 to_amdgpu_dm_connector(connector);
6073         int r;
6074
6075         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6076             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6077                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6078                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6079                 if (r)
6080                         return r;
6081         }
6082
6083 #if defined(CONFIG_DEBUG_FS)
6084         connector_debugfs_init(amdgpu_dm_connector);
6085 #endif
6086
6087         return 0;
6088 }
6089
6090 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6091         .reset = amdgpu_dm_connector_funcs_reset,
6092         .detect = amdgpu_dm_connector_detect,
6093         .fill_modes = drm_helper_probe_single_connector_modes,
6094         .destroy = amdgpu_dm_connector_destroy,
6095         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6096         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6097         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6098         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6099         .late_register = amdgpu_dm_connector_late_register,
6100         .early_unregister = amdgpu_dm_connector_unregister
6101 };
6102
6103 static int get_modes(struct drm_connector *connector)
6104 {
6105         return amdgpu_dm_connector_get_modes(connector);
6106 }
6107
6108 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6109 {
6110         struct dc_sink_init_data init_params = {
6111                         .link = aconnector->dc_link,
6112                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6113         };
6114         struct edid *edid;
6115
6116         if (!aconnector->base.edid_blob_ptr) {
6117                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6118                                 aconnector->base.name);
6119
6120                 aconnector->base.force = DRM_FORCE_OFF;
6121                 aconnector->base.override_edid = false;
6122                 return;
6123         }
6124
6125         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6126
6127         aconnector->edid = edid;
6128
6129         aconnector->dc_em_sink = dc_link_add_remote_sink(
6130                 aconnector->dc_link,
6131                 (uint8_t *)edid,
6132                 (edid->extensions + 1) * EDID_LENGTH,
6133                 &init_params);
6134
6135         if (aconnector->base.force == DRM_FORCE_ON) {
6136                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6137                 aconnector->dc_link->local_sink :
6138                 aconnector->dc_em_sink;
6139                 dc_sink_retain(aconnector->dc_sink);
6140         }
6141 }
6142
6143 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6144 {
6145         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6146
6147         /*
6148          * In case of headless boot with force on for DP managed connector
6149          * Those settings have to be != 0 to get initial modeset
6150          */
6151         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6152                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6153                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6154         }
6155
6156
6157         aconnector->base.override_edid = true;
6158         create_eml_sink(aconnector);
6159 }
6160
6161 static struct dc_stream_state *
6162 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6163                                 const struct drm_display_mode *drm_mode,
6164                                 const struct dm_connector_state *dm_state,
6165                                 const struct dc_stream_state *old_stream)
6166 {
6167         struct drm_connector *connector = &aconnector->base;
6168         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6169         struct dc_stream_state *stream;
6170         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6171         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6172         enum dc_status dc_result = DC_OK;
6173
6174         do {
6175                 stream = create_stream_for_sink(aconnector, drm_mode,
6176                                                 dm_state, old_stream,
6177                                                 requested_bpc);
6178                 if (stream == NULL) {
6179                         DRM_ERROR("Failed to create stream for sink!\n");
6180                         break;
6181                 }
6182
6183                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6184
6185                 if (dc_result != DC_OK) {
6186                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6187                                       drm_mode->hdisplay,
6188                                       drm_mode->vdisplay,
6189                                       drm_mode->clock,
6190                                       dc_result,
6191                                       dc_status_to_str(dc_result));
6192
6193                         dc_stream_release(stream);
6194                         stream = NULL;
6195                         requested_bpc -= 2; /* lower bpc to retry validation */
6196                 }
6197
6198         } while (stream == NULL && requested_bpc >= 6);
6199
6200         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6201                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6202
6203                 aconnector->force_yuv420_output = true;
6204                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6205                                                 dm_state, old_stream);
6206                 aconnector->force_yuv420_output = false;
6207         }
6208
6209         return stream;
6210 }
6211
6212 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6213                                    struct drm_display_mode *mode)
6214 {
6215         int result = MODE_ERROR;
6216         struct dc_sink *dc_sink;
6217         /* TODO: Unhardcode stream count */
6218         struct dc_stream_state *stream;
6219         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6220
6221         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6222                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6223                 return result;
6224
6225         /*
6226          * Only run this the first time mode_valid is called to initilialize
6227          * EDID mgmt
6228          */
6229         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6230                 !aconnector->dc_em_sink)
6231                 handle_edid_mgmt(aconnector);
6232
6233         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6234
6235         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6236                                 aconnector->base.force != DRM_FORCE_ON) {
6237                 DRM_ERROR("dc_sink is NULL!\n");
6238                 goto fail;
6239         }
6240
6241         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6242         if (stream) {
6243                 dc_stream_release(stream);
6244                 result = MODE_OK;
6245         }
6246
6247 fail:
6248         /* TODO: error handling*/
6249         return result;
6250 }
6251
6252 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6253                                 struct dc_info_packet *out)
6254 {
6255         struct hdmi_drm_infoframe frame;
6256         unsigned char buf[30]; /* 26 + 4 */
6257         ssize_t len;
6258         int ret, i;
6259
6260         memset(out, 0, sizeof(*out));
6261
6262         if (!state->hdr_output_metadata)
6263                 return 0;
6264
6265         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6266         if (ret)
6267                 return ret;
6268
6269         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6270         if (len < 0)
6271                 return (int)len;
6272
6273         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6274         if (len != 30)
6275                 return -EINVAL;
6276
6277         /* Prepare the infopacket for DC. */
6278         switch (state->connector->connector_type) {
6279         case DRM_MODE_CONNECTOR_HDMIA:
6280                 out->hb0 = 0x87; /* type */
6281                 out->hb1 = 0x01; /* version */
6282                 out->hb2 = 0x1A; /* length */
6283                 out->sb[0] = buf[3]; /* checksum */
6284                 i = 1;
6285                 break;
6286
6287         case DRM_MODE_CONNECTOR_DisplayPort:
6288         case DRM_MODE_CONNECTOR_eDP:
6289                 out->hb0 = 0x00; /* sdp id, zero */
6290                 out->hb1 = 0x87; /* type */
6291                 out->hb2 = 0x1D; /* payload len - 1 */
6292                 out->hb3 = (0x13 << 2); /* sdp version */
6293                 out->sb[0] = 0x01; /* version */
6294                 out->sb[1] = 0x1A; /* length */
6295                 i = 2;
6296                 break;
6297
6298         default:
6299                 return -EINVAL;
6300         }
6301
6302         memcpy(&out->sb[i], &buf[4], 26);
6303         out->valid = true;
6304
6305         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6306                        sizeof(out->sb), false);
6307
6308         return 0;
6309 }
6310
6311 static bool
6312 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6313                           const struct drm_connector_state *new_state)
6314 {
6315         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6316         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6317
6318         if (old_blob != new_blob) {
6319                 if (old_blob && new_blob &&
6320                     old_blob->length == new_blob->length)
6321                         return memcmp(old_blob->data, new_blob->data,
6322                                       old_blob->length);
6323
6324                 return true;
6325         }
6326
6327         return false;
6328 }
6329
6330 static int
6331 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6332                                  struct drm_atomic_state *state)
6333 {
6334         struct drm_connector_state *new_con_state =
6335                 drm_atomic_get_new_connector_state(state, conn);
6336         struct drm_connector_state *old_con_state =
6337                 drm_atomic_get_old_connector_state(state, conn);
6338         struct drm_crtc *crtc = new_con_state->crtc;
6339         struct drm_crtc_state *new_crtc_state;
6340         int ret;
6341
6342         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6343
6344         if (!crtc)
6345                 return 0;
6346
6347         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6348                 struct dc_info_packet hdr_infopacket;
6349
6350                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6351                 if (ret)
6352                         return ret;
6353
6354                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6355                 if (IS_ERR(new_crtc_state))
6356                         return PTR_ERR(new_crtc_state);
6357
6358                 /*
6359                  * DC considers the stream backends changed if the
6360                  * static metadata changes. Forcing the modeset also
6361                  * gives a simple way for userspace to switch from
6362                  * 8bpc to 10bpc when setting the metadata to enter
6363                  * or exit HDR.
6364                  *
6365                  * Changing the static metadata after it's been
6366                  * set is permissible, however. So only force a
6367                  * modeset if we're entering or exiting HDR.
6368                  */
6369                 new_crtc_state->mode_changed =
6370                         !old_con_state->hdr_output_metadata ||
6371                         !new_con_state->hdr_output_metadata;
6372         }
6373
6374         return 0;
6375 }
6376
6377 static const struct drm_connector_helper_funcs
6378 amdgpu_dm_connector_helper_funcs = {
6379         /*
6380          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6381          * modes will be filtered by drm_mode_validate_size(), and those modes
6382          * are missing after user start lightdm. So we need to renew modes list.
6383          * in get_modes call back, not just return the modes count
6384          */
6385         .get_modes = get_modes,
6386         .mode_valid = amdgpu_dm_connector_mode_valid,
6387         .atomic_check = amdgpu_dm_connector_atomic_check,
6388 };
6389
6390 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6391 {
6392 }
6393
6394 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6395 {
6396         struct drm_atomic_state *state = new_crtc_state->state;
6397         struct drm_plane *plane;
6398         int num_active = 0;
6399
6400         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6401                 struct drm_plane_state *new_plane_state;
6402
6403                 /* Cursor planes are "fake". */
6404                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6405                         continue;
6406
6407                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6408
6409                 if (!new_plane_state) {
6410                         /*
6411                          * The plane is enable on the CRTC and hasn't changed
6412                          * state. This means that it previously passed
6413                          * validation and is therefore enabled.
6414                          */
6415                         num_active += 1;
6416                         continue;
6417                 }
6418
6419                 /* We need a framebuffer to be considered enabled. */
6420                 num_active += (new_plane_state->fb != NULL);
6421         }
6422
6423         return num_active;
6424 }
6425
6426 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6427                                          struct drm_crtc_state *new_crtc_state)
6428 {
6429         struct dm_crtc_state *dm_new_crtc_state =
6430                 to_dm_crtc_state(new_crtc_state);
6431
6432         dm_new_crtc_state->active_planes = 0;
6433
6434         if (!dm_new_crtc_state->stream)
6435                 return;
6436
6437         dm_new_crtc_state->active_planes =
6438                 count_crtc_active_planes(new_crtc_state);
6439 }
6440
6441 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6442                                        struct drm_atomic_state *state)
6443 {
6444         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6445                                                                           crtc);
6446         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6447         struct dc *dc = adev->dm.dc;
6448         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6449         int ret = -EINVAL;
6450
6451         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6452
6453         dm_update_crtc_active_planes(crtc, crtc_state);
6454
6455         if (unlikely(!dm_crtc_state->stream &&
6456                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6457                 WARN_ON(1);
6458                 return ret;
6459         }
6460
6461         /*
6462          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6463          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6464          * planes are disabled, which is not supported by the hardware. And there is legacy
6465          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6466          */
6467         if (crtc_state->enable &&
6468             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6469                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6470                 return -EINVAL;
6471         }
6472
6473         /* In some use cases, like reset, no stream is attached */
6474         if (!dm_crtc_state->stream)
6475                 return 0;
6476
6477         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6478                 return 0;
6479
6480         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6481         return ret;
6482 }
6483
6484 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6485                                       const struct drm_display_mode *mode,
6486                                       struct drm_display_mode *adjusted_mode)
6487 {
6488         return true;
6489 }
6490
6491 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6492         .disable = dm_crtc_helper_disable,
6493         .atomic_check = dm_crtc_helper_atomic_check,
6494         .mode_fixup = dm_crtc_helper_mode_fixup,
6495         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6496 };
6497
6498 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6499 {
6500
6501 }
6502
6503 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6504 {
6505         switch (display_color_depth) {
6506                 case COLOR_DEPTH_666:
6507                         return 6;
6508                 case COLOR_DEPTH_888:
6509                         return 8;
6510                 case COLOR_DEPTH_101010:
6511                         return 10;
6512                 case COLOR_DEPTH_121212:
6513                         return 12;
6514                 case COLOR_DEPTH_141414:
6515                         return 14;
6516                 case COLOR_DEPTH_161616:
6517                         return 16;
6518                 default:
6519                         break;
6520                 }
6521         return 0;
6522 }
6523
6524 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6525                                           struct drm_crtc_state *crtc_state,
6526                                           struct drm_connector_state *conn_state)
6527 {
6528         struct drm_atomic_state *state = crtc_state->state;
6529         struct drm_connector *connector = conn_state->connector;
6530         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6531         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6532         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6533         struct drm_dp_mst_topology_mgr *mst_mgr;
6534         struct drm_dp_mst_port *mst_port;
6535         enum dc_color_depth color_depth;
6536         int clock, bpp = 0;
6537         bool is_y420 = false;
6538
6539         if (!aconnector->port || !aconnector->dc_sink)
6540                 return 0;
6541
6542         mst_port = aconnector->port;
6543         mst_mgr = &aconnector->mst_port->mst_mgr;
6544
6545         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6546                 return 0;
6547
6548         if (!state->duplicated) {
6549                 int max_bpc = conn_state->max_requested_bpc;
6550                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6551                                 aconnector->force_yuv420_output;
6552                 color_depth = convert_color_depth_from_display_info(connector,
6553                                                                     is_y420,
6554                                                                     max_bpc);
6555                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6556                 clock = adjusted_mode->clock;
6557                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6558         }
6559         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6560                                                                            mst_mgr,
6561                                                                            mst_port,
6562                                                                            dm_new_connector_state->pbn,
6563                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6564         if (dm_new_connector_state->vcpi_slots < 0) {
6565                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6566                 return dm_new_connector_state->vcpi_slots;
6567         }
6568         return 0;
6569 }
6570
6571 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6572         .disable = dm_encoder_helper_disable,
6573         .atomic_check = dm_encoder_helper_atomic_check
6574 };
6575
6576 #if defined(CONFIG_DRM_AMD_DC_DCN)
6577 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6578                                             struct dc_state *dc_state)
6579 {
6580         struct dc_stream_state *stream = NULL;
6581         struct drm_connector *connector;
6582         struct drm_connector_state *new_con_state, *old_con_state;
6583         struct amdgpu_dm_connector *aconnector;
6584         struct dm_connector_state *dm_conn_state;
6585         int i, j, clock, bpp;
6586         int vcpi, pbn_div, pbn = 0;
6587
6588         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6589
6590                 aconnector = to_amdgpu_dm_connector(connector);
6591
6592                 if (!aconnector->port)
6593                         continue;
6594
6595                 if (!new_con_state || !new_con_state->crtc)
6596                         continue;
6597
6598                 dm_conn_state = to_dm_connector_state(new_con_state);
6599
6600                 for (j = 0; j < dc_state->stream_count; j++) {
6601                         stream = dc_state->streams[j];
6602                         if (!stream)
6603                                 continue;
6604
6605                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6606                                 break;
6607
6608                         stream = NULL;
6609                 }
6610
6611                 if (!stream)
6612                         continue;
6613
6614                 if (stream->timing.flags.DSC != 1) {
6615                         drm_dp_mst_atomic_enable_dsc(state,
6616                                                      aconnector->port,
6617                                                      dm_conn_state->pbn,
6618                                                      0,
6619                                                      false);
6620                         continue;
6621                 }
6622
6623                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6624                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6625                 clock = stream->timing.pix_clk_100hz / 10;
6626                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6627                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6628                                                     aconnector->port,
6629                                                     pbn, pbn_div,
6630                                                     true);
6631                 if (vcpi < 0)
6632                         return vcpi;
6633
6634                 dm_conn_state->pbn = pbn;
6635                 dm_conn_state->vcpi_slots = vcpi;
6636         }
6637         return 0;
6638 }
6639 #endif
6640
6641 static void dm_drm_plane_reset(struct drm_plane *plane)
6642 {
6643         struct dm_plane_state *amdgpu_state = NULL;
6644
6645         if (plane->state)
6646                 plane->funcs->atomic_destroy_state(plane, plane->state);
6647
6648         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6649         WARN_ON(amdgpu_state == NULL);
6650
6651         if (amdgpu_state)
6652                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6653 }
6654
6655 static struct drm_plane_state *
6656 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6657 {
6658         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6659
6660         old_dm_plane_state = to_dm_plane_state(plane->state);
6661         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6662         if (!dm_plane_state)
6663                 return NULL;
6664
6665         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6666
6667         if (old_dm_plane_state->dc_state) {
6668                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6669                 dc_plane_state_retain(dm_plane_state->dc_state);
6670         }
6671
6672         return &dm_plane_state->base;
6673 }
6674
6675 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6676                                 struct drm_plane_state *state)
6677 {
6678         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6679
6680         if (dm_plane_state->dc_state)
6681                 dc_plane_state_release(dm_plane_state->dc_state);
6682
6683         drm_atomic_helper_plane_destroy_state(plane, state);
6684 }
6685
6686 static const struct drm_plane_funcs dm_plane_funcs = {
6687         .update_plane   = drm_atomic_helper_update_plane,
6688         .disable_plane  = drm_atomic_helper_disable_plane,
6689         .destroy        = drm_primary_helper_destroy,
6690         .reset = dm_drm_plane_reset,
6691         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6692         .atomic_destroy_state = dm_drm_plane_destroy_state,
6693         .format_mod_supported = dm_plane_format_mod_supported,
6694 };
6695
6696 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6697                                       struct drm_plane_state *new_state)
6698 {
6699         struct amdgpu_framebuffer *afb;
6700         struct drm_gem_object *obj;
6701         struct amdgpu_device *adev;
6702         struct amdgpu_bo *rbo;
6703         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6704         struct list_head list;
6705         struct ttm_validate_buffer tv;
6706         struct ww_acquire_ctx ticket;
6707         uint32_t domain;
6708         int r;
6709
6710         if (!new_state->fb) {
6711                 DRM_DEBUG_KMS("No FB bound\n");
6712                 return 0;
6713         }
6714
6715         afb = to_amdgpu_framebuffer(new_state->fb);
6716         obj = new_state->fb->obj[0];
6717         rbo = gem_to_amdgpu_bo(obj);
6718         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6719         INIT_LIST_HEAD(&list);
6720
6721         tv.bo = &rbo->tbo;
6722         tv.num_shared = 1;
6723         list_add(&tv.head, &list);
6724
6725         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6726         if (r) {
6727                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6728                 return r;
6729         }
6730
6731         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6732                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6733         else
6734                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6735
6736         r = amdgpu_bo_pin(rbo, domain);
6737         if (unlikely(r != 0)) {
6738                 if (r != -ERESTARTSYS)
6739                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6740                 ttm_eu_backoff_reservation(&ticket, &list);
6741                 return r;
6742         }
6743
6744         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6745         if (unlikely(r != 0)) {
6746                 amdgpu_bo_unpin(rbo);
6747                 ttm_eu_backoff_reservation(&ticket, &list);
6748                 DRM_ERROR("%p bind failed\n", rbo);
6749                 return r;
6750         }
6751
6752         ttm_eu_backoff_reservation(&ticket, &list);
6753
6754         afb->address = amdgpu_bo_gpu_offset(rbo);
6755
6756         amdgpu_bo_ref(rbo);
6757
6758         /**
6759          * We don't do surface updates on planes that have been newly created,
6760          * but we also don't have the afb->address during atomic check.
6761          *
6762          * Fill in buffer attributes depending on the address here, but only on
6763          * newly created planes since they're not being used by DC yet and this
6764          * won't modify global state.
6765          */
6766         dm_plane_state_old = to_dm_plane_state(plane->state);
6767         dm_plane_state_new = to_dm_plane_state(new_state);
6768
6769         if (dm_plane_state_new->dc_state &&
6770             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6771                 struct dc_plane_state *plane_state =
6772                         dm_plane_state_new->dc_state;
6773                 bool force_disable_dcc = !plane_state->dcc.enable;
6774
6775                 fill_plane_buffer_attributes(
6776                         adev, afb, plane_state->format, plane_state->rotation,
6777                         afb->tiling_flags,
6778                         &plane_state->tiling_info, &plane_state->plane_size,
6779                         &plane_state->dcc, &plane_state->address,
6780                         afb->tmz_surface, force_disable_dcc);
6781         }
6782
6783         return 0;
6784 }
6785
6786 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6787                                        struct drm_plane_state *old_state)
6788 {
6789         struct amdgpu_bo *rbo;
6790         int r;
6791
6792         if (!old_state->fb)
6793                 return;
6794
6795         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6796         r = amdgpu_bo_reserve(rbo, false);
6797         if (unlikely(r)) {
6798                 DRM_ERROR("failed to reserve rbo before unpin\n");
6799                 return;
6800         }
6801
6802         amdgpu_bo_unpin(rbo);
6803         amdgpu_bo_unreserve(rbo);
6804         amdgpu_bo_unref(&rbo);
6805 }
6806
6807 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6808                                        struct drm_crtc_state *new_crtc_state)
6809 {
6810         struct drm_framebuffer *fb = state->fb;
6811         int min_downscale, max_upscale;
6812         int min_scale = 0;
6813         int max_scale = INT_MAX;
6814
6815         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6816         if (fb && state->crtc) {
6817                 /* Validate viewport to cover the case when only the position changes */
6818                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6819                         int viewport_width = state->crtc_w;
6820                         int viewport_height = state->crtc_h;
6821
6822                         if (state->crtc_x < 0)
6823                                 viewport_width += state->crtc_x;
6824                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6825                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6826
6827                         if (state->crtc_y < 0)
6828                                 viewport_height += state->crtc_y;
6829                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6830                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6831
6832                         if (viewport_width < 0 || viewport_height < 0) {
6833                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6834                                 return -EINVAL;
6835                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6836                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6837                                 return -EINVAL;
6838                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6839                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6840                                 return -EINVAL;
6841                         }
6842
6843                 }
6844
6845                 /* Get min/max allowed scaling factors from plane caps. */
6846                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6847                                              &min_downscale, &max_upscale);
6848                 /*
6849                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6850                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6851                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6852                  */
6853                 min_scale = (1000 << 16) / max_upscale;
6854                 max_scale = (1000 << 16) / min_downscale;
6855         }
6856
6857         return drm_atomic_helper_check_plane_state(
6858                 state, new_crtc_state, min_scale, max_scale, true, true);
6859 }
6860
6861 static int dm_plane_atomic_check(struct drm_plane *plane,
6862                                  struct drm_atomic_state *state)
6863 {
6864         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6865                                                                                  plane);
6866         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6867         struct dc *dc = adev->dm.dc;
6868         struct dm_plane_state *dm_plane_state;
6869         struct dc_scaling_info scaling_info;
6870         struct drm_crtc_state *new_crtc_state;
6871         int ret;
6872
6873         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6874
6875         dm_plane_state = to_dm_plane_state(new_plane_state);
6876
6877         if (!dm_plane_state->dc_state)
6878                 return 0;
6879
6880         new_crtc_state =
6881                 drm_atomic_get_new_crtc_state(state,
6882                                               new_plane_state->crtc);
6883         if (!new_crtc_state)
6884                 return -EINVAL;
6885
6886         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6887         if (ret)
6888                 return ret;
6889
6890         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6891         if (ret)
6892                 return ret;
6893
6894         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6895                 return 0;
6896
6897         return -EINVAL;
6898 }
6899
6900 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6901                                        struct drm_atomic_state *state)
6902 {
6903         /* Only support async updates on cursor planes. */
6904         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6905                 return -EINVAL;
6906
6907         return 0;
6908 }
6909
6910 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6911                                          struct drm_atomic_state *state)
6912 {
6913         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6914                                                                            plane);
6915         struct drm_plane_state *old_state =
6916                 drm_atomic_get_old_plane_state(state, plane);
6917
6918         trace_amdgpu_dm_atomic_update_cursor(new_state);
6919
6920         swap(plane->state->fb, new_state->fb);
6921
6922         plane->state->src_x = new_state->src_x;
6923         plane->state->src_y = new_state->src_y;
6924         plane->state->src_w = new_state->src_w;
6925         plane->state->src_h = new_state->src_h;
6926         plane->state->crtc_x = new_state->crtc_x;
6927         plane->state->crtc_y = new_state->crtc_y;
6928         plane->state->crtc_w = new_state->crtc_w;
6929         plane->state->crtc_h = new_state->crtc_h;
6930
6931         handle_cursor_update(plane, old_state);
6932 }
6933
6934 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6935         .prepare_fb = dm_plane_helper_prepare_fb,
6936         .cleanup_fb = dm_plane_helper_cleanup_fb,
6937         .atomic_check = dm_plane_atomic_check,
6938         .atomic_async_check = dm_plane_atomic_async_check,
6939         .atomic_async_update = dm_plane_atomic_async_update
6940 };
6941
6942 /*
6943  * TODO: these are currently initialized to rgb formats only.
6944  * For future use cases we should either initialize them dynamically based on
6945  * plane capabilities, or initialize this array to all formats, so internal drm
6946  * check will succeed, and let DC implement proper check
6947  */
6948 static const uint32_t rgb_formats[] = {
6949         DRM_FORMAT_XRGB8888,
6950         DRM_FORMAT_ARGB8888,
6951         DRM_FORMAT_RGBA8888,
6952         DRM_FORMAT_XRGB2101010,
6953         DRM_FORMAT_XBGR2101010,
6954         DRM_FORMAT_ARGB2101010,
6955         DRM_FORMAT_ABGR2101010,
6956         DRM_FORMAT_XBGR8888,
6957         DRM_FORMAT_ABGR8888,
6958         DRM_FORMAT_RGB565,
6959 };
6960
6961 static const uint32_t overlay_formats[] = {
6962         DRM_FORMAT_XRGB8888,
6963         DRM_FORMAT_ARGB8888,
6964         DRM_FORMAT_RGBA8888,
6965         DRM_FORMAT_XBGR8888,
6966         DRM_FORMAT_ABGR8888,
6967         DRM_FORMAT_RGB565
6968 };
6969
6970 static const u32 cursor_formats[] = {
6971         DRM_FORMAT_ARGB8888
6972 };
6973
6974 static int get_plane_formats(const struct drm_plane *plane,
6975                              const struct dc_plane_cap *plane_cap,
6976                              uint32_t *formats, int max_formats)
6977 {
6978         int i, num_formats = 0;
6979
6980         /*
6981          * TODO: Query support for each group of formats directly from
6982          * DC plane caps. This will require adding more formats to the
6983          * caps list.
6984          */
6985
6986         switch (plane->type) {
6987         case DRM_PLANE_TYPE_PRIMARY:
6988                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6989                         if (num_formats >= max_formats)
6990                                 break;
6991
6992                         formats[num_formats++] = rgb_formats[i];
6993                 }
6994
6995                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6996                         formats[num_formats++] = DRM_FORMAT_NV12;
6997                 if (plane_cap && plane_cap->pixel_format_support.p010)
6998                         formats[num_formats++] = DRM_FORMAT_P010;
6999                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7000                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7001                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7002                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7003                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7004                 }
7005                 break;
7006
7007         case DRM_PLANE_TYPE_OVERLAY:
7008                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7009                         if (num_formats >= max_formats)
7010                                 break;
7011
7012                         formats[num_formats++] = overlay_formats[i];
7013                 }
7014                 break;
7015
7016         case DRM_PLANE_TYPE_CURSOR:
7017                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7018                         if (num_formats >= max_formats)
7019                                 break;
7020
7021                         formats[num_formats++] = cursor_formats[i];
7022                 }
7023                 break;
7024         }
7025
7026         return num_formats;
7027 }
7028
7029 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7030                                 struct drm_plane *plane,
7031                                 unsigned long possible_crtcs,
7032                                 const struct dc_plane_cap *plane_cap)
7033 {
7034         uint32_t formats[32];
7035         int num_formats;
7036         int res = -EPERM;
7037         unsigned int supported_rotations;
7038         uint64_t *modifiers = NULL;
7039
7040         num_formats = get_plane_formats(plane, plane_cap, formats,
7041                                         ARRAY_SIZE(formats));
7042
7043         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7044         if (res)
7045                 return res;
7046
7047         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7048                                        &dm_plane_funcs, formats, num_formats,
7049                                        modifiers, plane->type, NULL);
7050         kfree(modifiers);
7051         if (res)
7052                 return res;
7053
7054         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7055             plane_cap && plane_cap->per_pixel_alpha) {
7056                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7057                                           BIT(DRM_MODE_BLEND_PREMULTI);
7058
7059                 drm_plane_create_alpha_property(plane);
7060                 drm_plane_create_blend_mode_property(plane, blend_caps);
7061         }
7062
7063         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7064             plane_cap &&
7065             (plane_cap->pixel_format_support.nv12 ||
7066              plane_cap->pixel_format_support.p010)) {
7067                 /* This only affects YUV formats. */
7068                 drm_plane_create_color_properties(
7069                         plane,
7070                         BIT(DRM_COLOR_YCBCR_BT601) |
7071                         BIT(DRM_COLOR_YCBCR_BT709) |
7072                         BIT(DRM_COLOR_YCBCR_BT2020),
7073                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7074                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7075                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7076         }
7077
7078         supported_rotations =
7079                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7080                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7081
7082         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7083             plane->type != DRM_PLANE_TYPE_CURSOR)
7084                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7085                                                    supported_rotations);
7086
7087         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7088
7089         /* Create (reset) the plane state */
7090         if (plane->funcs->reset)
7091                 plane->funcs->reset(plane);
7092
7093         return 0;
7094 }
7095
7096 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7097                                struct drm_plane *plane,
7098                                uint32_t crtc_index)
7099 {
7100         struct amdgpu_crtc *acrtc = NULL;
7101         struct drm_plane *cursor_plane;
7102
7103         int res = -ENOMEM;
7104
7105         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7106         if (!cursor_plane)
7107                 goto fail;
7108
7109         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7110         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7111
7112         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7113         if (!acrtc)
7114                 goto fail;
7115
7116         res = drm_crtc_init_with_planes(
7117                         dm->ddev,
7118                         &acrtc->base,
7119                         plane,
7120                         cursor_plane,
7121                         &amdgpu_dm_crtc_funcs, NULL);
7122
7123         if (res)
7124                 goto fail;
7125
7126         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7127
7128         /* Create (reset) the plane state */
7129         if (acrtc->base.funcs->reset)
7130                 acrtc->base.funcs->reset(&acrtc->base);
7131
7132         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7133         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7134
7135         acrtc->crtc_id = crtc_index;
7136         acrtc->base.enabled = false;
7137         acrtc->otg_inst = -1;
7138
7139         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7140         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7141                                    true, MAX_COLOR_LUT_ENTRIES);
7142         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7143
7144         return 0;
7145
7146 fail:
7147         kfree(acrtc);
7148         kfree(cursor_plane);
7149         return res;
7150 }
7151
7152
7153 static int to_drm_connector_type(enum signal_type st)
7154 {
7155         switch (st) {
7156         case SIGNAL_TYPE_HDMI_TYPE_A:
7157                 return DRM_MODE_CONNECTOR_HDMIA;
7158         case SIGNAL_TYPE_EDP:
7159                 return DRM_MODE_CONNECTOR_eDP;
7160         case SIGNAL_TYPE_LVDS:
7161                 return DRM_MODE_CONNECTOR_LVDS;
7162         case SIGNAL_TYPE_RGB:
7163                 return DRM_MODE_CONNECTOR_VGA;
7164         case SIGNAL_TYPE_DISPLAY_PORT:
7165         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7166                 return DRM_MODE_CONNECTOR_DisplayPort;
7167         case SIGNAL_TYPE_DVI_DUAL_LINK:
7168         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7169                 return DRM_MODE_CONNECTOR_DVID;
7170         case SIGNAL_TYPE_VIRTUAL:
7171                 return DRM_MODE_CONNECTOR_VIRTUAL;
7172
7173         default:
7174                 return DRM_MODE_CONNECTOR_Unknown;
7175         }
7176 }
7177
7178 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7179 {
7180         struct drm_encoder *encoder;
7181
7182         /* There is only one encoder per connector */
7183         drm_connector_for_each_possible_encoder(connector, encoder)
7184                 return encoder;
7185
7186         return NULL;
7187 }
7188
7189 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7190 {
7191         struct drm_encoder *encoder;
7192         struct amdgpu_encoder *amdgpu_encoder;
7193
7194         encoder = amdgpu_dm_connector_to_encoder(connector);
7195
7196         if (encoder == NULL)
7197                 return;
7198
7199         amdgpu_encoder = to_amdgpu_encoder(encoder);
7200
7201         amdgpu_encoder->native_mode.clock = 0;
7202
7203         if (!list_empty(&connector->probed_modes)) {
7204                 struct drm_display_mode *preferred_mode = NULL;
7205
7206                 list_for_each_entry(preferred_mode,
7207                                     &connector->probed_modes,
7208                                     head) {
7209                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7210                                 amdgpu_encoder->native_mode = *preferred_mode;
7211
7212                         break;
7213                 }
7214
7215         }
7216 }
7217
7218 static struct drm_display_mode *
7219 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7220                              char *name,
7221                              int hdisplay, int vdisplay)
7222 {
7223         struct drm_device *dev = encoder->dev;
7224         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7225         struct drm_display_mode *mode = NULL;
7226         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7227
7228         mode = drm_mode_duplicate(dev, native_mode);
7229
7230         if (mode == NULL)
7231                 return NULL;
7232
7233         mode->hdisplay = hdisplay;
7234         mode->vdisplay = vdisplay;
7235         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7236         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7237
7238         return mode;
7239
7240 }
7241
7242 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7243                                                  struct drm_connector *connector)
7244 {
7245         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7246         struct drm_display_mode *mode = NULL;
7247         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7248         struct amdgpu_dm_connector *amdgpu_dm_connector =
7249                                 to_amdgpu_dm_connector(connector);
7250         int i;
7251         int n;
7252         struct mode_size {
7253                 char name[DRM_DISPLAY_MODE_LEN];
7254                 int w;
7255                 int h;
7256         } common_modes[] = {
7257                 {  "640x480",  640,  480},
7258                 {  "800x600",  800,  600},
7259                 { "1024x768", 1024,  768},
7260                 { "1280x720", 1280,  720},
7261                 { "1280x800", 1280,  800},
7262                 {"1280x1024", 1280, 1024},
7263                 { "1440x900", 1440,  900},
7264                 {"1680x1050", 1680, 1050},
7265                 {"1600x1200", 1600, 1200},
7266                 {"1920x1080", 1920, 1080},
7267                 {"1920x1200", 1920, 1200}
7268         };
7269
7270         n = ARRAY_SIZE(common_modes);
7271
7272         for (i = 0; i < n; i++) {
7273                 struct drm_display_mode *curmode = NULL;
7274                 bool mode_existed = false;
7275
7276                 if (common_modes[i].w > native_mode->hdisplay ||
7277                     common_modes[i].h > native_mode->vdisplay ||
7278                    (common_modes[i].w == native_mode->hdisplay &&
7279                     common_modes[i].h == native_mode->vdisplay))
7280                         continue;
7281
7282                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7283                         if (common_modes[i].w == curmode->hdisplay &&
7284                             common_modes[i].h == curmode->vdisplay) {
7285                                 mode_existed = true;
7286                                 break;
7287                         }
7288                 }
7289
7290                 if (mode_existed)
7291                         continue;
7292
7293                 mode = amdgpu_dm_create_common_mode(encoder,
7294                                 common_modes[i].name, common_modes[i].w,
7295                                 common_modes[i].h);
7296                 drm_mode_probed_add(connector, mode);
7297                 amdgpu_dm_connector->num_modes++;
7298         }
7299 }
7300
7301 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7302                                               struct edid *edid)
7303 {
7304         struct amdgpu_dm_connector *amdgpu_dm_connector =
7305                         to_amdgpu_dm_connector(connector);
7306
7307         if (edid) {
7308                 /* empty probed_modes */
7309                 INIT_LIST_HEAD(&connector->probed_modes);
7310                 amdgpu_dm_connector->num_modes =
7311                                 drm_add_edid_modes(connector, edid);
7312
7313                 /* sorting the probed modes before calling function
7314                  * amdgpu_dm_get_native_mode() since EDID can have
7315                  * more than one preferred mode. The modes that are
7316                  * later in the probed mode list could be of higher
7317                  * and preferred resolution. For example, 3840x2160
7318                  * resolution in base EDID preferred timing and 4096x2160
7319                  * preferred resolution in DID extension block later.
7320                  */
7321                 drm_mode_sort(&connector->probed_modes);
7322                 amdgpu_dm_get_native_mode(connector);
7323
7324                 /* Freesync capabilities are reset by calling
7325                  * drm_add_edid_modes() and need to be
7326                  * restored here.
7327                  */
7328                 amdgpu_dm_update_freesync_caps(connector, edid);
7329         } else {
7330                 amdgpu_dm_connector->num_modes = 0;
7331         }
7332 }
7333
7334 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7335                               struct drm_display_mode *mode)
7336 {
7337         struct drm_display_mode *m;
7338
7339         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7340                 if (drm_mode_equal(m, mode))
7341                         return true;
7342         }
7343
7344         return false;
7345 }
7346
7347 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7348 {
7349         const struct drm_display_mode *m;
7350         struct drm_display_mode *new_mode;
7351         uint i;
7352         uint32_t new_modes_count = 0;
7353
7354         /* Standard FPS values
7355          *
7356          * 23.976   - TV/NTSC
7357          * 24       - Cinema
7358          * 25       - TV/PAL
7359          * 29.97    - TV/NTSC
7360          * 30       - TV/NTSC
7361          * 48       - Cinema HFR
7362          * 50       - TV/PAL
7363          * 60       - Commonly used
7364          * 48,72,96 - Multiples of 24
7365          */
7366         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7367                                          48000, 50000, 60000, 72000, 96000 };
7368
7369         /*
7370          * Find mode with highest refresh rate with the same resolution
7371          * as the preferred mode. Some monitors report a preferred mode
7372          * with lower resolution than the highest refresh rate supported.
7373          */
7374
7375         m = get_highest_refresh_rate_mode(aconnector, true);
7376         if (!m)
7377                 return 0;
7378
7379         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7380                 uint64_t target_vtotal, target_vtotal_diff;
7381                 uint64_t num, den;
7382
7383                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7384                         continue;
7385
7386                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7387                     common_rates[i] > aconnector->max_vfreq * 1000)
7388                         continue;
7389
7390                 num = (unsigned long long)m->clock * 1000 * 1000;
7391                 den = common_rates[i] * (unsigned long long)m->htotal;
7392                 target_vtotal = div_u64(num, den);
7393                 target_vtotal_diff = target_vtotal - m->vtotal;
7394
7395                 /* Check for illegal modes */
7396                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7397                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7398                     m->vtotal + target_vtotal_diff < m->vsync_end)
7399                         continue;
7400
7401                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7402                 if (!new_mode)
7403                         goto out;
7404
7405                 new_mode->vtotal += (u16)target_vtotal_diff;
7406                 new_mode->vsync_start += (u16)target_vtotal_diff;
7407                 new_mode->vsync_end += (u16)target_vtotal_diff;
7408                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7409                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7410
7411                 if (!is_duplicate_mode(aconnector, new_mode)) {
7412                         drm_mode_probed_add(&aconnector->base, new_mode);
7413                         new_modes_count += 1;
7414                 } else
7415                         drm_mode_destroy(aconnector->base.dev, new_mode);
7416         }
7417  out:
7418         return new_modes_count;
7419 }
7420
7421 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7422                                                    struct edid *edid)
7423 {
7424         struct amdgpu_dm_connector *amdgpu_dm_connector =
7425                 to_amdgpu_dm_connector(connector);
7426
7427         if (!(amdgpu_freesync_vid_mode && edid))
7428                 return;
7429
7430         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7431                 amdgpu_dm_connector->num_modes +=
7432                         add_fs_modes(amdgpu_dm_connector);
7433 }
7434
7435 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7436 {
7437         struct amdgpu_dm_connector *amdgpu_dm_connector =
7438                         to_amdgpu_dm_connector(connector);
7439         struct drm_encoder *encoder;
7440         struct edid *edid = amdgpu_dm_connector->edid;
7441
7442         encoder = amdgpu_dm_connector_to_encoder(connector);
7443
7444         if (!drm_edid_is_valid(edid)) {
7445                 amdgpu_dm_connector->num_modes =
7446                                 drm_add_modes_noedid(connector, 640, 480);
7447         } else {
7448                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7449                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7450                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7451         }
7452         amdgpu_dm_fbc_init(connector);
7453
7454         return amdgpu_dm_connector->num_modes;
7455 }
7456
7457 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7458                                      struct amdgpu_dm_connector *aconnector,
7459                                      int connector_type,
7460                                      struct dc_link *link,
7461                                      int link_index)
7462 {
7463         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7464
7465         /*
7466          * Some of the properties below require access to state, like bpc.
7467          * Allocate some default initial connector state with our reset helper.
7468          */
7469         if (aconnector->base.funcs->reset)
7470                 aconnector->base.funcs->reset(&aconnector->base);
7471
7472         aconnector->connector_id = link_index;
7473         aconnector->dc_link = link;
7474         aconnector->base.interlace_allowed = false;
7475         aconnector->base.doublescan_allowed = false;
7476         aconnector->base.stereo_allowed = false;
7477         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7478         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7479         aconnector->audio_inst = -1;
7480         mutex_init(&aconnector->hpd_lock);
7481
7482         /*
7483          * configure support HPD hot plug connector_>polled default value is 0
7484          * which means HPD hot plug not supported
7485          */
7486         switch (connector_type) {
7487         case DRM_MODE_CONNECTOR_HDMIA:
7488                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7489                 aconnector->base.ycbcr_420_allowed =
7490                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7491                 break;
7492         case DRM_MODE_CONNECTOR_DisplayPort:
7493                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7494                 aconnector->base.ycbcr_420_allowed =
7495                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7496                 break;
7497         case DRM_MODE_CONNECTOR_DVID:
7498                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7499                 break;
7500         default:
7501                 break;
7502         }
7503
7504         drm_object_attach_property(&aconnector->base.base,
7505                                 dm->ddev->mode_config.scaling_mode_property,
7506                                 DRM_MODE_SCALE_NONE);
7507
7508         drm_object_attach_property(&aconnector->base.base,
7509                                 adev->mode_info.underscan_property,
7510                                 UNDERSCAN_OFF);
7511         drm_object_attach_property(&aconnector->base.base,
7512                                 adev->mode_info.underscan_hborder_property,
7513                                 0);
7514         drm_object_attach_property(&aconnector->base.base,
7515                                 adev->mode_info.underscan_vborder_property,
7516                                 0);
7517
7518         if (!aconnector->mst_port)
7519                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7520
7521         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7522         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7523         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7524
7525         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7526             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7527                 drm_object_attach_property(&aconnector->base.base,
7528                                 adev->mode_info.abm_level_property, 0);
7529         }
7530
7531         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7532             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7533             connector_type == DRM_MODE_CONNECTOR_eDP) {
7534                 drm_object_attach_property(
7535                         &aconnector->base.base,
7536                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7537
7538                 if (!aconnector->mst_port)
7539                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7540
7541 #ifdef CONFIG_DRM_AMD_DC_HDCP
7542                 if (adev->dm.hdcp_workqueue)
7543                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7544 #endif
7545         }
7546 }
7547
7548 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7549                               struct i2c_msg *msgs, int num)
7550 {
7551         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7552         struct ddc_service *ddc_service = i2c->ddc_service;
7553         struct i2c_command cmd;
7554         int i;
7555         int result = -EIO;
7556
7557         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7558
7559         if (!cmd.payloads)
7560                 return result;
7561
7562         cmd.number_of_payloads = num;
7563         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7564         cmd.speed = 100;
7565
7566         for (i = 0; i < num; i++) {
7567                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7568                 cmd.payloads[i].address = msgs[i].addr;
7569                 cmd.payloads[i].length = msgs[i].len;
7570                 cmd.payloads[i].data = msgs[i].buf;
7571         }
7572
7573         if (dc_submit_i2c(
7574                         ddc_service->ctx->dc,
7575                         ddc_service->ddc_pin->hw_info.ddc_channel,
7576                         &cmd))
7577                 result = num;
7578
7579         kfree(cmd.payloads);
7580         return result;
7581 }
7582
7583 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7584 {
7585         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7586 }
7587
7588 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7589         .master_xfer = amdgpu_dm_i2c_xfer,
7590         .functionality = amdgpu_dm_i2c_func,
7591 };
7592
7593 static struct amdgpu_i2c_adapter *
7594 create_i2c(struct ddc_service *ddc_service,
7595            int link_index,
7596            int *res)
7597 {
7598         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7599         struct amdgpu_i2c_adapter *i2c;
7600
7601         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7602         if (!i2c)
7603                 return NULL;
7604         i2c->base.owner = THIS_MODULE;
7605         i2c->base.class = I2C_CLASS_DDC;
7606         i2c->base.dev.parent = &adev->pdev->dev;
7607         i2c->base.algo = &amdgpu_dm_i2c_algo;
7608         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7609         i2c_set_adapdata(&i2c->base, i2c);
7610         i2c->ddc_service = ddc_service;
7611         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7612
7613         return i2c;
7614 }
7615
7616
7617 /*
7618  * Note: this function assumes that dc_link_detect() was called for the
7619  * dc_link which will be represented by this aconnector.
7620  */
7621 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7622                                     struct amdgpu_dm_connector *aconnector,
7623                                     uint32_t link_index,
7624                                     struct amdgpu_encoder *aencoder)
7625 {
7626         int res = 0;
7627         int connector_type;
7628         struct dc *dc = dm->dc;
7629         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7630         struct amdgpu_i2c_adapter *i2c;
7631
7632         link->priv = aconnector;
7633
7634         DRM_DEBUG_DRIVER("%s()\n", __func__);
7635
7636         i2c = create_i2c(link->ddc, link->link_index, &res);
7637         if (!i2c) {
7638                 DRM_ERROR("Failed to create i2c adapter data\n");
7639                 return -ENOMEM;
7640         }
7641
7642         aconnector->i2c = i2c;
7643         res = i2c_add_adapter(&i2c->base);
7644
7645         if (res) {
7646                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7647                 goto out_free;
7648         }
7649
7650         connector_type = to_drm_connector_type(link->connector_signal);
7651
7652         res = drm_connector_init_with_ddc(
7653                         dm->ddev,
7654                         &aconnector->base,
7655                         &amdgpu_dm_connector_funcs,
7656                         connector_type,
7657                         &i2c->base);
7658
7659         if (res) {
7660                 DRM_ERROR("connector_init failed\n");
7661                 aconnector->connector_id = -1;
7662                 goto out_free;
7663         }
7664
7665         drm_connector_helper_add(
7666                         &aconnector->base,
7667                         &amdgpu_dm_connector_helper_funcs);
7668
7669         amdgpu_dm_connector_init_helper(
7670                 dm,
7671                 aconnector,
7672                 connector_type,
7673                 link,
7674                 link_index);
7675
7676         drm_connector_attach_encoder(
7677                 &aconnector->base, &aencoder->base);
7678
7679         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7680                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7681                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7682
7683 out_free:
7684         if (res) {
7685                 kfree(i2c);
7686                 aconnector->i2c = NULL;
7687         }
7688         return res;
7689 }
7690
7691 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7692 {
7693         switch (adev->mode_info.num_crtc) {
7694         case 1:
7695                 return 0x1;
7696         case 2:
7697                 return 0x3;
7698         case 3:
7699                 return 0x7;
7700         case 4:
7701                 return 0xf;
7702         case 5:
7703                 return 0x1f;
7704         case 6:
7705         default:
7706                 return 0x3f;
7707         }
7708 }
7709
7710 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7711                                   struct amdgpu_encoder *aencoder,
7712                                   uint32_t link_index)
7713 {
7714         struct amdgpu_device *adev = drm_to_adev(dev);
7715
7716         int res = drm_encoder_init(dev,
7717                                    &aencoder->base,
7718                                    &amdgpu_dm_encoder_funcs,
7719                                    DRM_MODE_ENCODER_TMDS,
7720                                    NULL);
7721
7722         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7723
7724         if (!res)
7725                 aencoder->encoder_id = link_index;
7726         else
7727                 aencoder->encoder_id = -1;
7728
7729         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7730
7731         return res;
7732 }
7733
7734 static void manage_dm_interrupts(struct amdgpu_device *adev,
7735                                  struct amdgpu_crtc *acrtc,
7736                                  bool enable)
7737 {
7738         /*
7739          * We have no guarantee that the frontend index maps to the same
7740          * backend index - some even map to more than one.
7741          *
7742          * TODO: Use a different interrupt or check DC itself for the mapping.
7743          */
7744         int irq_type =
7745                 amdgpu_display_crtc_idx_to_irq_type(
7746                         adev,
7747                         acrtc->crtc_id);
7748
7749         if (enable) {
7750                 drm_crtc_vblank_on(&acrtc->base);
7751                 amdgpu_irq_get(
7752                         adev,
7753                         &adev->pageflip_irq,
7754                         irq_type);
7755 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7756                 amdgpu_irq_get(
7757                         adev,
7758                         &adev->vline0_irq,
7759                         irq_type);
7760 #endif
7761         } else {
7762 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7763                 amdgpu_irq_put(
7764                         adev,
7765                         &adev->vline0_irq,
7766                         irq_type);
7767 #endif
7768                 amdgpu_irq_put(
7769                         adev,
7770                         &adev->pageflip_irq,
7771                         irq_type);
7772                 drm_crtc_vblank_off(&acrtc->base);
7773         }
7774 }
7775
7776 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7777                                       struct amdgpu_crtc *acrtc)
7778 {
7779         int irq_type =
7780                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7781
7782         /**
7783          * This reads the current state for the IRQ and force reapplies
7784          * the setting to hardware.
7785          */
7786         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7787 }
7788
7789 static bool
7790 is_scaling_state_different(const struct dm_connector_state *dm_state,
7791                            const struct dm_connector_state *old_dm_state)
7792 {
7793         if (dm_state->scaling != old_dm_state->scaling)
7794                 return true;
7795         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7796                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7797                         return true;
7798         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7799                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7800                         return true;
7801         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7802                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7803                 return true;
7804         return false;
7805 }
7806
7807 #ifdef CONFIG_DRM_AMD_DC_HDCP
7808 static bool is_content_protection_different(struct drm_connector_state *state,
7809                                             const struct drm_connector_state *old_state,
7810                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7811 {
7812         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7813         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7814
7815         /* Handle: Type0/1 change */
7816         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7817             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7818                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7819                 return true;
7820         }
7821
7822         /* CP is being re enabled, ignore this
7823          *
7824          * Handles:     ENABLED -> DESIRED
7825          */
7826         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7827             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7828                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7829                 return false;
7830         }
7831
7832         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7833          *
7834          * Handles:     UNDESIRED -> ENABLED
7835          */
7836         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7837             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7838                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7839
7840         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7841          * hot-plug, headless s3, dpms
7842          *
7843          * Handles:     DESIRED -> DESIRED (Special case)
7844          */
7845         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7846             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7847                 dm_con_state->update_hdcp = false;
7848                 return true;
7849         }
7850
7851         /*
7852          * Handles:     UNDESIRED -> UNDESIRED
7853          *              DESIRED -> DESIRED
7854          *              ENABLED -> ENABLED
7855          */
7856         if (old_state->content_protection == state->content_protection)
7857                 return false;
7858
7859         /*
7860          * Handles:     UNDESIRED -> DESIRED
7861          *              DESIRED -> UNDESIRED
7862          *              ENABLED -> UNDESIRED
7863          */
7864         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7865                 return true;
7866
7867         /*
7868          * Handles:     DESIRED -> ENABLED
7869          */
7870         return false;
7871 }
7872
7873 #endif
7874 static void remove_stream(struct amdgpu_device *adev,
7875                           struct amdgpu_crtc *acrtc,
7876                           struct dc_stream_state *stream)
7877 {
7878         /* this is the update mode case */
7879
7880         acrtc->otg_inst = -1;
7881         acrtc->enabled = false;
7882 }
7883
7884 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7885                                struct dc_cursor_position *position)
7886 {
7887         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7888         int x, y;
7889         int xorigin = 0, yorigin = 0;
7890
7891         if (!crtc || !plane->state->fb)
7892                 return 0;
7893
7894         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7895             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7896                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7897                           __func__,
7898                           plane->state->crtc_w,
7899                           plane->state->crtc_h);
7900                 return -EINVAL;
7901         }
7902
7903         x = plane->state->crtc_x;
7904         y = plane->state->crtc_y;
7905
7906         if (x <= -amdgpu_crtc->max_cursor_width ||
7907             y <= -amdgpu_crtc->max_cursor_height)
7908                 return 0;
7909
7910         if (x < 0) {
7911                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7912                 x = 0;
7913         }
7914         if (y < 0) {
7915                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7916                 y = 0;
7917         }
7918         position->enable = true;
7919         position->translate_by_source = true;
7920         position->x = x;
7921         position->y = y;
7922         position->x_hotspot = xorigin;
7923         position->y_hotspot = yorigin;
7924
7925         return 0;
7926 }
7927
7928 static void handle_cursor_update(struct drm_plane *plane,
7929                                  struct drm_plane_state *old_plane_state)
7930 {
7931         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7932         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7933         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7934         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7935         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7936         uint64_t address = afb ? afb->address : 0;
7937         struct dc_cursor_position position = {0};
7938         struct dc_cursor_attributes attributes;
7939         int ret;
7940
7941         if (!plane->state->fb && !old_plane_state->fb)
7942                 return;
7943
7944         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7945                       __func__,
7946                       amdgpu_crtc->crtc_id,
7947                       plane->state->crtc_w,
7948                       plane->state->crtc_h);
7949
7950         ret = get_cursor_position(plane, crtc, &position);
7951         if (ret)
7952                 return;
7953
7954         if (!position.enable) {
7955                 /* turn off cursor */
7956                 if (crtc_state && crtc_state->stream) {
7957                         mutex_lock(&adev->dm.dc_lock);
7958                         dc_stream_set_cursor_position(crtc_state->stream,
7959                                                       &position);
7960                         mutex_unlock(&adev->dm.dc_lock);
7961                 }
7962                 return;
7963         }
7964
7965         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7966         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7967
7968         memset(&attributes, 0, sizeof(attributes));
7969         attributes.address.high_part = upper_32_bits(address);
7970         attributes.address.low_part  = lower_32_bits(address);
7971         attributes.width             = plane->state->crtc_w;
7972         attributes.height            = plane->state->crtc_h;
7973         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7974         attributes.rotation_angle    = 0;
7975         attributes.attribute_flags.value = 0;
7976
7977         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7978
7979         if (crtc_state->stream) {
7980                 mutex_lock(&adev->dm.dc_lock);
7981                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7982                                                          &attributes))
7983                         DRM_ERROR("DC failed to set cursor attributes\n");
7984
7985                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7986                                                    &position))
7987                         DRM_ERROR("DC failed to set cursor position\n");
7988                 mutex_unlock(&adev->dm.dc_lock);
7989         }
7990 }
7991
7992 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7993 {
7994
7995         assert_spin_locked(&acrtc->base.dev->event_lock);
7996         WARN_ON(acrtc->event);
7997
7998         acrtc->event = acrtc->base.state->event;
7999
8000         /* Set the flip status */
8001         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8002
8003         /* Mark this event as consumed */
8004         acrtc->base.state->event = NULL;
8005
8006         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8007                      acrtc->crtc_id);
8008 }
8009
8010 static void update_freesync_state_on_stream(
8011         struct amdgpu_display_manager *dm,
8012         struct dm_crtc_state *new_crtc_state,
8013         struct dc_stream_state *new_stream,
8014         struct dc_plane_state *surface,
8015         u32 flip_timestamp_in_us)
8016 {
8017         struct mod_vrr_params vrr_params;
8018         struct dc_info_packet vrr_infopacket = {0};
8019         struct amdgpu_device *adev = dm->adev;
8020         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8021         unsigned long flags;
8022         bool pack_sdp_v1_3 = false;
8023
8024         if (!new_stream)
8025                 return;
8026
8027         /*
8028          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8029          * For now it's sufficient to just guard against these conditions.
8030          */
8031
8032         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8033                 return;
8034
8035         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8036         vrr_params = acrtc->dm_irq_params.vrr_params;
8037
8038         if (surface) {
8039                 mod_freesync_handle_preflip(
8040                         dm->freesync_module,
8041                         surface,
8042                         new_stream,
8043                         flip_timestamp_in_us,
8044                         &vrr_params);
8045
8046                 if (adev->family < AMDGPU_FAMILY_AI &&
8047                     amdgpu_dm_vrr_active(new_crtc_state)) {
8048                         mod_freesync_handle_v_update(dm->freesync_module,
8049                                                      new_stream, &vrr_params);
8050
8051                         /* Need to call this before the frame ends. */
8052                         dc_stream_adjust_vmin_vmax(dm->dc,
8053                                                    new_crtc_state->stream,
8054                                                    &vrr_params.adjust);
8055                 }
8056         }
8057
8058         mod_freesync_build_vrr_infopacket(
8059                 dm->freesync_module,
8060                 new_stream,
8061                 &vrr_params,
8062                 PACKET_TYPE_VRR,
8063                 TRANSFER_FUNC_UNKNOWN,
8064                 &vrr_infopacket,
8065                 pack_sdp_v1_3);
8066
8067         new_crtc_state->freesync_timing_changed |=
8068                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8069                         &vrr_params.adjust,
8070                         sizeof(vrr_params.adjust)) != 0);
8071
8072         new_crtc_state->freesync_vrr_info_changed |=
8073                 (memcmp(&new_crtc_state->vrr_infopacket,
8074                         &vrr_infopacket,
8075                         sizeof(vrr_infopacket)) != 0);
8076
8077         acrtc->dm_irq_params.vrr_params = vrr_params;
8078         new_crtc_state->vrr_infopacket = vrr_infopacket;
8079
8080         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8081         new_stream->vrr_infopacket = vrr_infopacket;
8082
8083         if (new_crtc_state->freesync_vrr_info_changed)
8084                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8085                               new_crtc_state->base.crtc->base.id,
8086                               (int)new_crtc_state->base.vrr_enabled,
8087                               (int)vrr_params.state);
8088
8089         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8090 }
8091
8092 static void update_stream_irq_parameters(
8093         struct amdgpu_display_manager *dm,
8094         struct dm_crtc_state *new_crtc_state)
8095 {
8096         struct dc_stream_state *new_stream = new_crtc_state->stream;
8097         struct mod_vrr_params vrr_params;
8098         struct mod_freesync_config config = new_crtc_state->freesync_config;
8099         struct amdgpu_device *adev = dm->adev;
8100         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8101         unsigned long flags;
8102
8103         if (!new_stream)
8104                 return;
8105
8106         /*
8107          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8108          * For now it's sufficient to just guard against these conditions.
8109          */
8110         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8111                 return;
8112
8113         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8114         vrr_params = acrtc->dm_irq_params.vrr_params;
8115
8116         if (new_crtc_state->vrr_supported &&
8117             config.min_refresh_in_uhz &&
8118             config.max_refresh_in_uhz) {
8119                 /*
8120                  * if freesync compatible mode was set, config.state will be set
8121                  * in atomic check
8122                  */
8123                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8124                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8125                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8126                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8127                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8128                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8129                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8130                 } else {
8131                         config.state = new_crtc_state->base.vrr_enabled ?
8132                                                      VRR_STATE_ACTIVE_VARIABLE :
8133                                                      VRR_STATE_INACTIVE;
8134                 }
8135         } else {
8136                 config.state = VRR_STATE_UNSUPPORTED;
8137         }
8138
8139         mod_freesync_build_vrr_params(dm->freesync_module,
8140                                       new_stream,
8141                                       &config, &vrr_params);
8142
8143         new_crtc_state->freesync_timing_changed |=
8144                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8145                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8146
8147         new_crtc_state->freesync_config = config;
8148         /* Copy state for access from DM IRQ handler */
8149         acrtc->dm_irq_params.freesync_config = config;
8150         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8151         acrtc->dm_irq_params.vrr_params = vrr_params;
8152         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8153 }
8154
8155 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8156                                             struct dm_crtc_state *new_state)
8157 {
8158         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8159         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8160
8161         if (!old_vrr_active && new_vrr_active) {
8162                 /* Transition VRR inactive -> active:
8163                  * While VRR is active, we must not disable vblank irq, as a
8164                  * reenable after disable would compute bogus vblank/pflip
8165                  * timestamps if it likely happened inside display front-porch.
8166                  *
8167                  * We also need vupdate irq for the actual core vblank handling
8168                  * at end of vblank.
8169                  */
8170                 dm_set_vupdate_irq(new_state->base.crtc, true);
8171                 drm_crtc_vblank_get(new_state->base.crtc);
8172                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8173                                  __func__, new_state->base.crtc->base.id);
8174         } else if (old_vrr_active && !new_vrr_active) {
8175                 /* Transition VRR active -> inactive:
8176                  * Allow vblank irq disable again for fixed refresh rate.
8177                  */
8178                 dm_set_vupdate_irq(new_state->base.crtc, false);
8179                 drm_crtc_vblank_put(new_state->base.crtc);
8180                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8181                                  __func__, new_state->base.crtc->base.id);
8182         }
8183 }
8184
8185 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8186 {
8187         struct drm_plane *plane;
8188         struct drm_plane_state *old_plane_state, *new_plane_state;
8189         int i;
8190
8191         /*
8192          * TODO: Make this per-stream so we don't issue redundant updates for
8193          * commits with multiple streams.
8194          */
8195         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8196                                        new_plane_state, i)
8197                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8198                         handle_cursor_update(plane, old_plane_state);
8199 }
8200
8201 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8202                                     struct dc_state *dc_state,
8203                                     struct drm_device *dev,
8204                                     struct amdgpu_display_manager *dm,
8205                                     struct drm_crtc *pcrtc,
8206                                     bool wait_for_vblank)
8207 {
8208         uint32_t i;
8209         uint64_t timestamp_ns;
8210         struct drm_plane *plane;
8211         struct drm_plane_state *old_plane_state, *new_plane_state;
8212         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8213         struct drm_crtc_state *new_pcrtc_state =
8214                         drm_atomic_get_new_crtc_state(state, pcrtc);
8215         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8216         struct dm_crtc_state *dm_old_crtc_state =
8217                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8218         int planes_count = 0, vpos, hpos;
8219         long r;
8220         unsigned long flags;
8221         struct amdgpu_bo *abo;
8222         uint32_t target_vblank, last_flip_vblank;
8223         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8224         bool pflip_present = false;
8225         struct {
8226                 struct dc_surface_update surface_updates[MAX_SURFACES];
8227                 struct dc_plane_info plane_infos[MAX_SURFACES];
8228                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8229                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8230                 struct dc_stream_update stream_update;
8231         } *bundle;
8232
8233         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8234
8235         if (!bundle) {
8236                 dm_error("Failed to allocate update bundle\n");
8237                 goto cleanup;
8238         }
8239
8240         /*
8241          * Disable the cursor first if we're disabling all the planes.
8242          * It'll remain on the screen after the planes are re-enabled
8243          * if we don't.
8244          */
8245         if (acrtc_state->active_planes == 0)
8246                 amdgpu_dm_commit_cursors(state);
8247
8248         /* update planes when needed */
8249         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8250                 struct drm_crtc *crtc = new_plane_state->crtc;
8251                 struct drm_crtc_state *new_crtc_state;
8252                 struct drm_framebuffer *fb = new_plane_state->fb;
8253                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8254                 bool plane_needs_flip;
8255                 struct dc_plane_state *dc_plane;
8256                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8257
8258                 /* Cursor plane is handled after stream updates */
8259                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8260                         continue;
8261
8262                 if (!fb || !crtc || pcrtc != crtc)
8263                         continue;
8264
8265                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8266                 if (!new_crtc_state->active)
8267                         continue;
8268
8269                 dc_plane = dm_new_plane_state->dc_state;
8270
8271                 bundle->surface_updates[planes_count].surface = dc_plane;
8272                 if (new_pcrtc_state->color_mgmt_changed) {
8273                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8274                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8275                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8276                 }
8277
8278                 fill_dc_scaling_info(new_plane_state,
8279                                      &bundle->scaling_infos[planes_count]);
8280
8281                 bundle->surface_updates[planes_count].scaling_info =
8282                         &bundle->scaling_infos[planes_count];
8283
8284                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8285
8286                 pflip_present = pflip_present || plane_needs_flip;
8287
8288                 if (!plane_needs_flip) {
8289                         planes_count += 1;
8290                         continue;
8291                 }
8292
8293                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8294
8295                 /*
8296                  * Wait for all fences on this FB. Do limited wait to avoid
8297                  * deadlock during GPU reset when this fence will not signal
8298                  * but we hold reservation lock for the BO.
8299                  */
8300                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8301                                                         false,
8302                                                         msecs_to_jiffies(5000));
8303                 if (unlikely(r <= 0))
8304                         DRM_ERROR("Waiting for fences timed out!");
8305
8306                 fill_dc_plane_info_and_addr(
8307                         dm->adev, new_plane_state,
8308                         afb->tiling_flags,
8309                         &bundle->plane_infos[planes_count],
8310                         &bundle->flip_addrs[planes_count].address,
8311                         afb->tmz_surface, false);
8312
8313                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8314                                  new_plane_state->plane->index,
8315                                  bundle->plane_infos[planes_count].dcc.enable);
8316
8317                 bundle->surface_updates[planes_count].plane_info =
8318                         &bundle->plane_infos[planes_count];
8319
8320                 /*
8321                  * Only allow immediate flips for fast updates that don't
8322                  * change FB pitch, DCC state, rotation or mirroing.
8323                  */
8324                 bundle->flip_addrs[planes_count].flip_immediate =
8325                         crtc->state->async_flip &&
8326                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8327
8328                 timestamp_ns = ktime_get_ns();
8329                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8330                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8331                 bundle->surface_updates[planes_count].surface = dc_plane;
8332
8333                 if (!bundle->surface_updates[planes_count].surface) {
8334                         DRM_ERROR("No surface for CRTC: id=%d\n",
8335                                         acrtc_attach->crtc_id);
8336                         continue;
8337                 }
8338
8339                 if (plane == pcrtc->primary)
8340                         update_freesync_state_on_stream(
8341                                 dm,
8342                                 acrtc_state,
8343                                 acrtc_state->stream,
8344                                 dc_plane,
8345                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8346
8347                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8348                                  __func__,
8349                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8350                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8351
8352                 planes_count += 1;
8353
8354         }
8355
8356         if (pflip_present) {
8357                 if (!vrr_active) {
8358                         /* Use old throttling in non-vrr fixed refresh rate mode
8359                          * to keep flip scheduling based on target vblank counts
8360                          * working in a backwards compatible way, e.g., for
8361                          * clients using the GLX_OML_sync_control extension or
8362                          * DRI3/Present extension with defined target_msc.
8363                          */
8364                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8365                 }
8366                 else {
8367                         /* For variable refresh rate mode only:
8368                          * Get vblank of last completed flip to avoid > 1 vrr
8369                          * flips per video frame by use of throttling, but allow
8370                          * flip programming anywhere in the possibly large
8371                          * variable vrr vblank interval for fine-grained flip
8372                          * timing control and more opportunity to avoid stutter
8373                          * on late submission of flips.
8374                          */
8375                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8376                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8377                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8378                 }
8379
8380                 target_vblank = last_flip_vblank + wait_for_vblank;
8381
8382                 /*
8383                  * Wait until we're out of the vertical blank period before the one
8384                  * targeted by the flip
8385                  */
8386                 while ((acrtc_attach->enabled &&
8387                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8388                                                             0, &vpos, &hpos, NULL,
8389                                                             NULL, &pcrtc->hwmode)
8390                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8391                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8392                         (int)(target_vblank -
8393                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8394                         usleep_range(1000, 1100);
8395                 }
8396
8397                 /**
8398                  * Prepare the flip event for the pageflip interrupt to handle.
8399                  *
8400                  * This only works in the case where we've already turned on the
8401                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8402                  * from 0 -> n planes we have to skip a hardware generated event
8403                  * and rely on sending it from software.
8404                  */
8405                 if (acrtc_attach->base.state->event &&
8406                     acrtc_state->active_planes > 0) {
8407                         drm_crtc_vblank_get(pcrtc);
8408
8409                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8410
8411                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8412                         prepare_flip_isr(acrtc_attach);
8413
8414                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8415                 }
8416
8417                 if (acrtc_state->stream) {
8418                         if (acrtc_state->freesync_vrr_info_changed)
8419                                 bundle->stream_update.vrr_infopacket =
8420                                         &acrtc_state->stream->vrr_infopacket;
8421                 }
8422         }
8423
8424         /* Update the planes if changed or disable if we don't have any. */
8425         if ((planes_count || acrtc_state->active_planes == 0) &&
8426                 acrtc_state->stream) {
8427                 bundle->stream_update.stream = acrtc_state->stream;
8428                 if (new_pcrtc_state->mode_changed) {
8429                         bundle->stream_update.src = acrtc_state->stream->src;
8430                         bundle->stream_update.dst = acrtc_state->stream->dst;
8431                 }
8432
8433                 if (new_pcrtc_state->color_mgmt_changed) {
8434                         /*
8435                          * TODO: This isn't fully correct since we've actually
8436                          * already modified the stream in place.
8437                          */
8438                         bundle->stream_update.gamut_remap =
8439                                 &acrtc_state->stream->gamut_remap_matrix;
8440                         bundle->stream_update.output_csc_transform =
8441                                 &acrtc_state->stream->csc_color_matrix;
8442                         bundle->stream_update.out_transfer_func =
8443                                 acrtc_state->stream->out_transfer_func;
8444                 }
8445
8446                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8447                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8448                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8449
8450                 /*
8451                  * If FreeSync state on the stream has changed then we need to
8452                  * re-adjust the min/max bounds now that DC doesn't handle this
8453                  * as part of commit.
8454                  */
8455                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8456                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8457                         dc_stream_adjust_vmin_vmax(
8458                                 dm->dc, acrtc_state->stream,
8459                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8460                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8461                 }
8462                 mutex_lock(&dm->dc_lock);
8463                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8464                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8465                         amdgpu_dm_psr_disable(acrtc_state->stream);
8466
8467                 dc_commit_updates_for_stream(dm->dc,
8468                                                      bundle->surface_updates,
8469                                                      planes_count,
8470                                                      acrtc_state->stream,
8471                                                      &bundle->stream_update,
8472                                                      dc_state);
8473
8474                 /**
8475                  * Enable or disable the interrupts on the backend.
8476                  *
8477                  * Most pipes are put into power gating when unused.
8478                  *
8479                  * When power gating is enabled on a pipe we lose the
8480                  * interrupt enablement state when power gating is disabled.
8481                  *
8482                  * So we need to update the IRQ control state in hardware
8483                  * whenever the pipe turns on (since it could be previously
8484                  * power gated) or off (since some pipes can't be power gated
8485                  * on some ASICs).
8486                  */
8487                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8488                         dm_update_pflip_irq_state(drm_to_adev(dev),
8489                                                   acrtc_attach);
8490
8491                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8492                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8493                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8494                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8495                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8496                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8497                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8498                         amdgpu_dm_psr_enable(acrtc_state->stream);
8499                 }
8500
8501                 mutex_unlock(&dm->dc_lock);
8502         }
8503
8504         /*
8505          * Update cursor state *after* programming all the planes.
8506          * This avoids redundant programming in the case where we're going
8507          * to be disabling a single plane - those pipes are being disabled.
8508          */
8509         if (acrtc_state->active_planes)
8510                 amdgpu_dm_commit_cursors(state);
8511
8512 cleanup:
8513         kfree(bundle);
8514 }
8515
8516 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8517                                    struct drm_atomic_state *state)
8518 {
8519         struct amdgpu_device *adev = drm_to_adev(dev);
8520         struct amdgpu_dm_connector *aconnector;
8521         struct drm_connector *connector;
8522         struct drm_connector_state *old_con_state, *new_con_state;
8523         struct drm_crtc_state *new_crtc_state;
8524         struct dm_crtc_state *new_dm_crtc_state;
8525         const struct dc_stream_status *status;
8526         int i, inst;
8527
8528         /* Notify device removals. */
8529         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8530                 if (old_con_state->crtc != new_con_state->crtc) {
8531                         /* CRTC changes require notification. */
8532                         goto notify;
8533                 }
8534
8535                 if (!new_con_state->crtc)
8536                         continue;
8537
8538                 new_crtc_state = drm_atomic_get_new_crtc_state(
8539                         state, new_con_state->crtc);
8540
8541                 if (!new_crtc_state)
8542                         continue;
8543
8544                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8545                         continue;
8546
8547         notify:
8548                 aconnector = to_amdgpu_dm_connector(connector);
8549
8550                 mutex_lock(&adev->dm.audio_lock);
8551                 inst = aconnector->audio_inst;
8552                 aconnector->audio_inst = -1;
8553                 mutex_unlock(&adev->dm.audio_lock);
8554
8555                 amdgpu_dm_audio_eld_notify(adev, inst);
8556         }
8557
8558         /* Notify audio device additions. */
8559         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8560                 if (!new_con_state->crtc)
8561                         continue;
8562
8563                 new_crtc_state = drm_atomic_get_new_crtc_state(
8564                         state, new_con_state->crtc);
8565
8566                 if (!new_crtc_state)
8567                         continue;
8568
8569                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8570                         continue;
8571
8572                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8573                 if (!new_dm_crtc_state->stream)
8574                         continue;
8575
8576                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8577                 if (!status)
8578                         continue;
8579
8580                 aconnector = to_amdgpu_dm_connector(connector);
8581
8582                 mutex_lock(&adev->dm.audio_lock);
8583                 inst = status->audio_inst;
8584                 aconnector->audio_inst = inst;
8585                 mutex_unlock(&adev->dm.audio_lock);
8586
8587                 amdgpu_dm_audio_eld_notify(adev, inst);
8588         }
8589 }
8590
8591 /*
8592  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8593  * @crtc_state: the DRM CRTC state
8594  * @stream_state: the DC stream state.
8595  *
8596  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8597  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8598  */
8599 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8600                                                 struct dc_stream_state *stream_state)
8601 {
8602         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8603 }
8604
8605 /**
8606  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8607  * @state: The atomic state to commit
8608  *
8609  * This will tell DC to commit the constructed DC state from atomic_check,
8610  * programming the hardware. Any failures here implies a hardware failure, since
8611  * atomic check should have filtered anything non-kosher.
8612  */
8613 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8614 {
8615         struct drm_device *dev = state->dev;
8616         struct amdgpu_device *adev = drm_to_adev(dev);
8617         struct amdgpu_display_manager *dm = &adev->dm;
8618         struct dm_atomic_state *dm_state;
8619         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8620         uint32_t i, j;
8621         struct drm_crtc *crtc;
8622         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8623         unsigned long flags;
8624         bool wait_for_vblank = true;
8625         struct drm_connector *connector;
8626         struct drm_connector_state *old_con_state, *new_con_state;
8627         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8628         int crtc_disable_count = 0;
8629         bool mode_set_reset_required = false;
8630
8631         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8632
8633         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8634
8635         dm_state = dm_atomic_get_new_state(state);
8636         if (dm_state && dm_state->context) {
8637                 dc_state = dm_state->context;
8638         } else {
8639                 /* No state changes, retain current state. */
8640                 dc_state_temp = dc_create_state(dm->dc);
8641                 ASSERT(dc_state_temp);
8642                 dc_state = dc_state_temp;
8643                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8644         }
8645
8646         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8647                                        new_crtc_state, i) {
8648                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8649
8650                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8651
8652                 if (old_crtc_state->active &&
8653                     (!new_crtc_state->active ||
8654                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8655                         manage_dm_interrupts(adev, acrtc, false);
8656                         dc_stream_release(dm_old_crtc_state->stream);
8657                 }
8658         }
8659
8660         drm_atomic_helper_calc_timestamping_constants(state);
8661
8662         /* update changed items */
8663         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8664                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8665
8666                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8667                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8668
8669                 DRM_DEBUG_ATOMIC(
8670                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8671                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8672                         "connectors_changed:%d\n",
8673                         acrtc->crtc_id,
8674                         new_crtc_state->enable,
8675                         new_crtc_state->active,
8676                         new_crtc_state->planes_changed,
8677                         new_crtc_state->mode_changed,
8678                         new_crtc_state->active_changed,
8679                         new_crtc_state->connectors_changed);
8680
8681                 /* Disable cursor if disabling crtc */
8682                 if (old_crtc_state->active && !new_crtc_state->active) {
8683                         struct dc_cursor_position position;
8684
8685                         memset(&position, 0, sizeof(position));
8686                         mutex_lock(&dm->dc_lock);
8687                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8688                         mutex_unlock(&dm->dc_lock);
8689                 }
8690
8691                 /* Copy all transient state flags into dc state */
8692                 if (dm_new_crtc_state->stream) {
8693                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8694                                                             dm_new_crtc_state->stream);
8695                 }
8696
8697                 /* handles headless hotplug case, updating new_state and
8698                  * aconnector as needed
8699                  */
8700
8701                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8702
8703                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8704
8705                         if (!dm_new_crtc_state->stream) {
8706                                 /*
8707                                  * this could happen because of issues with
8708                                  * userspace notifications delivery.
8709                                  * In this case userspace tries to set mode on
8710                                  * display which is disconnected in fact.
8711                                  * dc_sink is NULL in this case on aconnector.
8712                                  * We expect reset mode will come soon.
8713                                  *
8714                                  * This can also happen when unplug is done
8715                                  * during resume sequence ended
8716                                  *
8717                                  * In this case, we want to pretend we still
8718                                  * have a sink to keep the pipe running so that
8719                                  * hw state is consistent with the sw state
8720                                  */
8721                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8722                                                 __func__, acrtc->base.base.id);
8723                                 continue;
8724                         }
8725
8726                         if (dm_old_crtc_state->stream)
8727                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8728
8729                         pm_runtime_get_noresume(dev->dev);
8730
8731                         acrtc->enabled = true;
8732                         acrtc->hw_mode = new_crtc_state->mode;
8733                         crtc->hwmode = new_crtc_state->mode;
8734                         mode_set_reset_required = true;
8735                 } else if (modereset_required(new_crtc_state)) {
8736                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8737                         /* i.e. reset mode */
8738                         if (dm_old_crtc_state->stream)
8739                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8740
8741                         mode_set_reset_required = true;
8742                 }
8743         } /* for_each_crtc_in_state() */
8744
8745         if (dc_state) {
8746                 /* if there mode set or reset, disable eDP PSR */
8747                 if (mode_set_reset_required)
8748                         amdgpu_dm_psr_disable_all(dm);
8749
8750                 dm_enable_per_frame_crtc_master_sync(dc_state);
8751                 mutex_lock(&dm->dc_lock);
8752                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8753 #if defined(CONFIG_DRM_AMD_DC_DCN)
8754                /* Allow idle optimization when vblank count is 0 for display off */
8755                if (dm->active_vblank_irq_count == 0)
8756                    dc_allow_idle_optimizations(dm->dc,true);
8757 #endif
8758                 mutex_unlock(&dm->dc_lock);
8759         }
8760
8761         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8762                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8763
8764                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8765
8766                 if (dm_new_crtc_state->stream != NULL) {
8767                         const struct dc_stream_status *status =
8768                                         dc_stream_get_status(dm_new_crtc_state->stream);
8769
8770                         if (!status)
8771                                 status = dc_stream_get_status_from_state(dc_state,
8772                                                                          dm_new_crtc_state->stream);
8773                         if (!status)
8774                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8775                         else
8776                                 acrtc->otg_inst = status->primary_otg_inst;
8777                 }
8778         }
8779 #ifdef CONFIG_DRM_AMD_DC_HDCP
8780         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8781                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8782                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8783                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8784
8785                 new_crtc_state = NULL;
8786
8787                 if (acrtc)
8788                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8789
8790                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8791
8792                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8793                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8794                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8795                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8796                         dm_new_con_state->update_hdcp = true;
8797                         continue;
8798                 }
8799
8800                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8801                         hdcp_update_display(
8802                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8803                                 new_con_state->hdcp_content_type,
8804                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8805         }
8806 #endif
8807
8808         /* Handle connector state changes */
8809         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8810                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8811                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8812                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8813                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8814                 struct dc_stream_update stream_update;
8815                 struct dc_info_packet hdr_packet;
8816                 struct dc_stream_status *status = NULL;
8817                 bool abm_changed, hdr_changed, scaling_changed;
8818
8819                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8820                 memset(&stream_update, 0, sizeof(stream_update));
8821
8822                 if (acrtc) {
8823                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8824                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8825                 }
8826
8827                 /* Skip any modesets/resets */
8828                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8829                         continue;
8830
8831                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8832                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8833
8834                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8835                                                              dm_old_con_state);
8836
8837                 abm_changed = dm_new_crtc_state->abm_level !=
8838                               dm_old_crtc_state->abm_level;
8839
8840                 hdr_changed =
8841                         is_hdr_metadata_different(old_con_state, new_con_state);
8842
8843                 if (!scaling_changed && !abm_changed && !hdr_changed)
8844                         continue;
8845
8846                 stream_update.stream = dm_new_crtc_state->stream;
8847                 if (scaling_changed) {
8848                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8849                                         dm_new_con_state, dm_new_crtc_state->stream);
8850
8851                         stream_update.src = dm_new_crtc_state->stream->src;
8852                         stream_update.dst = dm_new_crtc_state->stream->dst;
8853                 }
8854
8855                 if (abm_changed) {
8856                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8857
8858                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8859                 }
8860
8861                 if (hdr_changed) {
8862                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8863                         stream_update.hdr_static_metadata = &hdr_packet;
8864                 }
8865
8866                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8867                 WARN_ON(!status);
8868                 WARN_ON(!status->plane_count);
8869
8870                 /*
8871                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8872                  * Here we create an empty update on each plane.
8873                  * To fix this, DC should permit updating only stream properties.
8874                  */
8875                 for (j = 0; j < status->plane_count; j++)
8876                         dummy_updates[j].surface = status->plane_states[0];
8877
8878
8879                 mutex_lock(&dm->dc_lock);
8880                 dc_commit_updates_for_stream(dm->dc,
8881                                                      dummy_updates,
8882                                                      status->plane_count,
8883                                                      dm_new_crtc_state->stream,
8884                                                      &stream_update,
8885                                                      dc_state);
8886                 mutex_unlock(&dm->dc_lock);
8887         }
8888
8889         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8890         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8891                                       new_crtc_state, i) {
8892                 if (old_crtc_state->active && !new_crtc_state->active)
8893                         crtc_disable_count++;
8894
8895                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8896                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8897
8898                 /* For freesync config update on crtc state and params for irq */
8899                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8900
8901                 /* Handle vrr on->off / off->on transitions */
8902                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8903                                                 dm_new_crtc_state);
8904         }
8905
8906         /**
8907          * Enable interrupts for CRTCs that are newly enabled or went through
8908          * a modeset. It was intentionally deferred until after the front end
8909          * state was modified to wait until the OTG was on and so the IRQ
8910          * handlers didn't access stale or invalid state.
8911          */
8912         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8913                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8914 #ifdef CONFIG_DEBUG_FS
8915                 bool configure_crc = false;
8916                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8917 #endif
8918                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8919
8920                 if (new_crtc_state->active &&
8921                     (!old_crtc_state->active ||
8922                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8923                         dc_stream_retain(dm_new_crtc_state->stream);
8924                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8925                         manage_dm_interrupts(adev, acrtc, true);
8926
8927 #ifdef CONFIG_DEBUG_FS
8928                         /**
8929                          * Frontend may have changed so reapply the CRC capture
8930                          * settings for the stream.
8931                          */
8932                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8933                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8934                         cur_crc_src = acrtc->dm_irq_params.crc_src;
8935                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8936
8937                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8938                                 configure_crc = true;
8939 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8940                                 if (amdgpu_dm_crc_window_is_activated(crtc))
8941                                         configure_crc = false;
8942 #endif
8943                         }
8944
8945                         if (configure_crc)
8946                                 amdgpu_dm_crtc_configure_crc_source(
8947                                         crtc, dm_new_crtc_state, cur_crc_src);
8948 #endif
8949                 }
8950         }
8951
8952         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8953                 if (new_crtc_state->async_flip)
8954                         wait_for_vblank = false;
8955
8956         /* update planes when needed per crtc*/
8957         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8958                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8959
8960                 if (dm_new_crtc_state->stream)
8961                         amdgpu_dm_commit_planes(state, dc_state, dev,
8962                                                 dm, crtc, wait_for_vblank);
8963         }
8964
8965         /* Update audio instances for each connector. */
8966         amdgpu_dm_commit_audio(dev, state);
8967
8968         /*
8969          * send vblank event on all events not handled in flip and
8970          * mark consumed event for drm_atomic_helper_commit_hw_done
8971          */
8972         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8973         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8974
8975                 if (new_crtc_state->event)
8976                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8977
8978                 new_crtc_state->event = NULL;
8979         }
8980         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8981
8982         /* Signal HW programming completion */
8983         drm_atomic_helper_commit_hw_done(state);
8984
8985         if (wait_for_vblank)
8986                 drm_atomic_helper_wait_for_flip_done(dev, state);
8987
8988         drm_atomic_helper_cleanup_planes(dev, state);
8989
8990         /* return the stolen vga memory back to VRAM */
8991         if (!adev->mman.keep_stolen_vga_memory)
8992                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8993         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8994
8995         /*
8996          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8997          * so we can put the GPU into runtime suspend if we're not driving any
8998          * displays anymore
8999          */
9000         for (i = 0; i < crtc_disable_count; i++)
9001                 pm_runtime_put_autosuspend(dev->dev);
9002         pm_runtime_mark_last_busy(dev->dev);
9003
9004         if (dc_state_temp)
9005                 dc_release_state(dc_state_temp);
9006 }
9007
9008
9009 static int dm_force_atomic_commit(struct drm_connector *connector)
9010 {
9011         int ret = 0;
9012         struct drm_device *ddev = connector->dev;
9013         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9014         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9015         struct drm_plane *plane = disconnected_acrtc->base.primary;
9016         struct drm_connector_state *conn_state;
9017         struct drm_crtc_state *crtc_state;
9018         struct drm_plane_state *plane_state;
9019
9020         if (!state)
9021                 return -ENOMEM;
9022
9023         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9024
9025         /* Construct an atomic state to restore previous display setting */
9026
9027         /*
9028          * Attach connectors to drm_atomic_state
9029          */
9030         conn_state = drm_atomic_get_connector_state(state, connector);
9031
9032         ret = PTR_ERR_OR_ZERO(conn_state);
9033         if (ret)
9034                 goto out;
9035
9036         /* Attach crtc to drm_atomic_state*/
9037         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9038
9039         ret = PTR_ERR_OR_ZERO(crtc_state);
9040         if (ret)
9041                 goto out;
9042
9043         /* force a restore */
9044         crtc_state->mode_changed = true;
9045
9046         /* Attach plane to drm_atomic_state */
9047         plane_state = drm_atomic_get_plane_state(state, plane);
9048
9049         ret = PTR_ERR_OR_ZERO(plane_state);
9050         if (ret)
9051                 goto out;
9052
9053         /* Call commit internally with the state we just constructed */
9054         ret = drm_atomic_commit(state);
9055
9056 out:
9057         drm_atomic_state_put(state);
9058         if (ret)
9059                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9060
9061         return ret;
9062 }
9063
9064 /*
9065  * This function handles all cases when set mode does not come upon hotplug.
9066  * This includes when a display is unplugged then plugged back into the
9067  * same port and when running without usermode desktop manager supprot
9068  */
9069 void dm_restore_drm_connector_state(struct drm_device *dev,
9070                                     struct drm_connector *connector)
9071 {
9072         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9073         struct amdgpu_crtc *disconnected_acrtc;
9074         struct dm_crtc_state *acrtc_state;
9075
9076         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9077                 return;
9078
9079         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9080         if (!disconnected_acrtc)
9081                 return;
9082
9083         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9084         if (!acrtc_state->stream)
9085                 return;
9086
9087         /*
9088          * If the previous sink is not released and different from the current,
9089          * we deduce we are in a state where we can not rely on usermode call
9090          * to turn on the display, so we do it here
9091          */
9092         if (acrtc_state->stream->sink != aconnector->dc_sink)
9093                 dm_force_atomic_commit(&aconnector->base);
9094 }
9095
9096 /*
9097  * Grabs all modesetting locks to serialize against any blocking commits,
9098  * Waits for completion of all non blocking commits.
9099  */
9100 static int do_aquire_global_lock(struct drm_device *dev,
9101                                  struct drm_atomic_state *state)
9102 {
9103         struct drm_crtc *crtc;
9104         struct drm_crtc_commit *commit;
9105         long ret;
9106
9107         /*
9108          * Adding all modeset locks to aquire_ctx will
9109          * ensure that when the framework release it the
9110          * extra locks we are locking here will get released to
9111          */
9112         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9113         if (ret)
9114                 return ret;
9115
9116         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9117                 spin_lock(&crtc->commit_lock);
9118                 commit = list_first_entry_or_null(&crtc->commit_list,
9119                                 struct drm_crtc_commit, commit_entry);
9120                 if (commit)
9121                         drm_crtc_commit_get(commit);
9122                 spin_unlock(&crtc->commit_lock);
9123
9124                 if (!commit)
9125                         continue;
9126
9127                 /*
9128                  * Make sure all pending HW programming completed and
9129                  * page flips done
9130                  */
9131                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9132
9133                 if (ret > 0)
9134                         ret = wait_for_completion_interruptible_timeout(
9135                                         &commit->flip_done, 10*HZ);
9136
9137                 if (ret == 0)
9138                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9139                                   "timed out\n", crtc->base.id, crtc->name);
9140
9141                 drm_crtc_commit_put(commit);
9142         }
9143
9144         return ret < 0 ? ret : 0;
9145 }
9146
9147 static void get_freesync_config_for_crtc(
9148         struct dm_crtc_state *new_crtc_state,
9149         struct dm_connector_state *new_con_state)
9150 {
9151         struct mod_freesync_config config = {0};
9152         struct amdgpu_dm_connector *aconnector =
9153                         to_amdgpu_dm_connector(new_con_state->base.connector);
9154         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9155         int vrefresh = drm_mode_vrefresh(mode);
9156         bool fs_vid_mode = false;
9157
9158         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9159                                         vrefresh >= aconnector->min_vfreq &&
9160                                         vrefresh <= aconnector->max_vfreq;
9161
9162         if (new_crtc_state->vrr_supported) {
9163                 new_crtc_state->stream->ignore_msa_timing_param = true;
9164                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9165
9166                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9167                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9168                 config.vsif_supported = true;
9169                 config.btr = true;
9170
9171                 if (fs_vid_mode) {
9172                         config.state = VRR_STATE_ACTIVE_FIXED;
9173                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9174                         goto out;
9175                 } else if (new_crtc_state->base.vrr_enabled) {
9176                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9177                 } else {
9178                         config.state = VRR_STATE_INACTIVE;
9179                 }
9180         }
9181 out:
9182         new_crtc_state->freesync_config = config;
9183 }
9184
9185 static void reset_freesync_config_for_crtc(
9186         struct dm_crtc_state *new_crtc_state)
9187 {
9188         new_crtc_state->vrr_supported = false;
9189
9190         memset(&new_crtc_state->vrr_infopacket, 0,
9191                sizeof(new_crtc_state->vrr_infopacket));
9192 }
9193
9194 static bool
9195 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9196                                  struct drm_crtc_state *new_crtc_state)
9197 {
9198         struct drm_display_mode old_mode, new_mode;
9199
9200         if (!old_crtc_state || !new_crtc_state)
9201                 return false;
9202
9203         old_mode = old_crtc_state->mode;
9204         new_mode = new_crtc_state->mode;
9205
9206         if (old_mode.clock       == new_mode.clock &&
9207             old_mode.hdisplay    == new_mode.hdisplay &&
9208             old_mode.vdisplay    == new_mode.vdisplay &&
9209             old_mode.htotal      == new_mode.htotal &&
9210             old_mode.vtotal      != new_mode.vtotal &&
9211             old_mode.hsync_start == new_mode.hsync_start &&
9212             old_mode.vsync_start != new_mode.vsync_start &&
9213             old_mode.hsync_end   == new_mode.hsync_end &&
9214             old_mode.vsync_end   != new_mode.vsync_end &&
9215             old_mode.hskew       == new_mode.hskew &&
9216             old_mode.vscan       == new_mode.vscan &&
9217             (old_mode.vsync_end - old_mode.vsync_start) ==
9218             (new_mode.vsync_end - new_mode.vsync_start))
9219                 return true;
9220
9221         return false;
9222 }
9223
9224 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9225         uint64_t num, den, res;
9226         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9227
9228         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9229
9230         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9231         den = (unsigned long long)new_crtc_state->mode.htotal *
9232               (unsigned long long)new_crtc_state->mode.vtotal;
9233
9234         res = div_u64(num, den);
9235         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9236 }
9237
9238 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9239                                 struct drm_atomic_state *state,
9240                                 struct drm_crtc *crtc,
9241                                 struct drm_crtc_state *old_crtc_state,
9242                                 struct drm_crtc_state *new_crtc_state,
9243                                 bool enable,
9244                                 bool *lock_and_validation_needed)
9245 {
9246         struct dm_atomic_state *dm_state = NULL;
9247         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9248         struct dc_stream_state *new_stream;
9249         int ret = 0;
9250
9251         /*
9252          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9253          * update changed items
9254          */
9255         struct amdgpu_crtc *acrtc = NULL;
9256         struct amdgpu_dm_connector *aconnector = NULL;
9257         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9258         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9259
9260         new_stream = NULL;
9261
9262         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9263         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9264         acrtc = to_amdgpu_crtc(crtc);
9265         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9266
9267         /* TODO This hack should go away */
9268         if (aconnector && enable) {
9269                 /* Make sure fake sink is created in plug-in scenario */
9270                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9271                                                             &aconnector->base);
9272                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9273                                                             &aconnector->base);
9274
9275                 if (IS_ERR(drm_new_conn_state)) {
9276                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9277                         goto fail;
9278                 }
9279
9280                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9281                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9282
9283                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9284                         goto skip_modeset;
9285
9286                 new_stream = create_validate_stream_for_sink(aconnector,
9287                                                              &new_crtc_state->mode,
9288                                                              dm_new_conn_state,
9289                                                              dm_old_crtc_state->stream);
9290
9291                 /*
9292                  * we can have no stream on ACTION_SET if a display
9293                  * was disconnected during S3, in this case it is not an
9294                  * error, the OS will be updated after detection, and
9295                  * will do the right thing on next atomic commit
9296                  */
9297
9298                 if (!new_stream) {
9299                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9300                                         __func__, acrtc->base.base.id);
9301                         ret = -ENOMEM;
9302                         goto fail;
9303                 }
9304
9305                 /*
9306                  * TODO: Check VSDB bits to decide whether this should
9307                  * be enabled or not.
9308                  */
9309                 new_stream->triggered_crtc_reset.enabled =
9310                         dm->force_timing_sync;
9311
9312                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9313
9314                 ret = fill_hdr_info_packet(drm_new_conn_state,
9315                                            &new_stream->hdr_static_metadata);
9316                 if (ret)
9317                         goto fail;
9318
9319                 /*
9320                  * If we already removed the old stream from the context
9321                  * (and set the new stream to NULL) then we can't reuse
9322                  * the old stream even if the stream and scaling are unchanged.
9323                  * We'll hit the BUG_ON and black screen.
9324                  *
9325                  * TODO: Refactor this function to allow this check to work
9326                  * in all conditions.
9327                  */
9328                 if (amdgpu_freesync_vid_mode &&
9329                     dm_new_crtc_state->stream &&
9330                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9331                         goto skip_modeset;
9332
9333                 if (dm_new_crtc_state->stream &&
9334                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9335                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9336                         new_crtc_state->mode_changed = false;
9337                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9338                                          new_crtc_state->mode_changed);
9339                 }
9340         }
9341
9342         /* mode_changed flag may get updated above, need to check again */
9343         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9344                 goto skip_modeset;
9345
9346         DRM_DEBUG_ATOMIC(
9347                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9348                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9349                 "connectors_changed:%d\n",
9350                 acrtc->crtc_id,
9351                 new_crtc_state->enable,
9352                 new_crtc_state->active,
9353                 new_crtc_state->planes_changed,
9354                 new_crtc_state->mode_changed,
9355                 new_crtc_state->active_changed,
9356                 new_crtc_state->connectors_changed);
9357
9358         /* Remove stream for any changed/disabled CRTC */
9359         if (!enable) {
9360
9361                 if (!dm_old_crtc_state->stream)
9362                         goto skip_modeset;
9363
9364                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9365                     is_timing_unchanged_for_freesync(new_crtc_state,
9366                                                      old_crtc_state)) {
9367                         new_crtc_state->mode_changed = false;
9368                         DRM_DEBUG_DRIVER(
9369                                 "Mode change not required for front porch change, "
9370                                 "setting mode_changed to %d",
9371                                 new_crtc_state->mode_changed);
9372
9373                         set_freesync_fixed_config(dm_new_crtc_state);
9374
9375                         goto skip_modeset;
9376                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9377                            is_freesync_video_mode(&new_crtc_state->mode,
9378                                                   aconnector)) {
9379                         set_freesync_fixed_config(dm_new_crtc_state);
9380                 }
9381
9382                 ret = dm_atomic_get_state(state, &dm_state);
9383                 if (ret)
9384                         goto fail;
9385
9386                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9387                                 crtc->base.id);
9388
9389                 /* i.e. reset mode */
9390                 if (dc_remove_stream_from_ctx(
9391                                 dm->dc,
9392                                 dm_state->context,
9393                                 dm_old_crtc_state->stream) != DC_OK) {
9394                         ret = -EINVAL;
9395                         goto fail;
9396                 }
9397
9398                 dc_stream_release(dm_old_crtc_state->stream);
9399                 dm_new_crtc_state->stream = NULL;
9400
9401                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9402
9403                 *lock_and_validation_needed = true;
9404
9405         } else {/* Add stream for any updated/enabled CRTC */
9406                 /*
9407                  * Quick fix to prevent NULL pointer on new_stream when
9408                  * added MST connectors not found in existing crtc_state in the chained mode
9409                  * TODO: need to dig out the root cause of that
9410                  */
9411                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9412                         goto skip_modeset;
9413
9414                 if (modereset_required(new_crtc_state))
9415                         goto skip_modeset;
9416
9417                 if (modeset_required(new_crtc_state, new_stream,
9418                                      dm_old_crtc_state->stream)) {
9419
9420                         WARN_ON(dm_new_crtc_state->stream);
9421
9422                         ret = dm_atomic_get_state(state, &dm_state);
9423                         if (ret)
9424                                 goto fail;
9425
9426                         dm_new_crtc_state->stream = new_stream;
9427
9428                         dc_stream_retain(new_stream);
9429
9430                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9431                                          crtc->base.id);
9432
9433                         if (dc_add_stream_to_ctx(
9434                                         dm->dc,
9435                                         dm_state->context,
9436                                         dm_new_crtc_state->stream) != DC_OK) {
9437                                 ret = -EINVAL;
9438                                 goto fail;
9439                         }
9440
9441                         *lock_and_validation_needed = true;
9442                 }
9443         }
9444
9445 skip_modeset:
9446         /* Release extra reference */
9447         if (new_stream)
9448                  dc_stream_release(new_stream);
9449
9450         /*
9451          * We want to do dc stream updates that do not require a
9452          * full modeset below.
9453          */
9454         if (!(enable && aconnector && new_crtc_state->active))
9455                 return 0;
9456         /*
9457          * Given above conditions, the dc state cannot be NULL because:
9458          * 1. We're in the process of enabling CRTCs (just been added
9459          *    to the dc context, or already is on the context)
9460          * 2. Has a valid connector attached, and
9461          * 3. Is currently active and enabled.
9462          * => The dc stream state currently exists.
9463          */
9464         BUG_ON(dm_new_crtc_state->stream == NULL);
9465
9466         /* Scaling or underscan settings */
9467         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9468                 update_stream_scaling_settings(
9469                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9470
9471         /* ABM settings */
9472         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9473
9474         /*
9475          * Color management settings. We also update color properties
9476          * when a modeset is needed, to ensure it gets reprogrammed.
9477          */
9478         if (dm_new_crtc_state->base.color_mgmt_changed ||
9479             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9480                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9481                 if (ret)
9482                         goto fail;
9483         }
9484
9485         /* Update Freesync settings. */
9486         get_freesync_config_for_crtc(dm_new_crtc_state,
9487                                      dm_new_conn_state);
9488
9489         return ret;
9490
9491 fail:
9492         if (new_stream)
9493                 dc_stream_release(new_stream);
9494         return ret;
9495 }
9496
9497 static bool should_reset_plane(struct drm_atomic_state *state,
9498                                struct drm_plane *plane,
9499                                struct drm_plane_state *old_plane_state,
9500                                struct drm_plane_state *new_plane_state)
9501 {
9502         struct drm_plane *other;
9503         struct drm_plane_state *old_other_state, *new_other_state;
9504         struct drm_crtc_state *new_crtc_state;
9505         int i;
9506
9507         /*
9508          * TODO: Remove this hack once the checks below are sufficient
9509          * enough to determine when we need to reset all the planes on
9510          * the stream.
9511          */
9512         if (state->allow_modeset)
9513                 return true;
9514
9515         /* Exit early if we know that we're adding or removing the plane. */
9516         if (old_plane_state->crtc != new_plane_state->crtc)
9517                 return true;
9518
9519         /* old crtc == new_crtc == NULL, plane not in context. */
9520         if (!new_plane_state->crtc)
9521                 return false;
9522
9523         new_crtc_state =
9524                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9525
9526         if (!new_crtc_state)
9527                 return true;
9528
9529         /* CRTC Degamma changes currently require us to recreate planes. */
9530         if (new_crtc_state->color_mgmt_changed)
9531                 return true;
9532
9533         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9534                 return true;
9535
9536         /*
9537          * If there are any new primary or overlay planes being added or
9538          * removed then the z-order can potentially change. To ensure
9539          * correct z-order and pipe acquisition the current DC architecture
9540          * requires us to remove and recreate all existing planes.
9541          *
9542          * TODO: Come up with a more elegant solution for this.
9543          */
9544         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9545                 struct amdgpu_framebuffer *old_afb, *new_afb;
9546                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9547                         continue;
9548
9549                 if (old_other_state->crtc != new_plane_state->crtc &&
9550                     new_other_state->crtc != new_plane_state->crtc)
9551                         continue;
9552
9553                 if (old_other_state->crtc != new_other_state->crtc)
9554                         return true;
9555
9556                 /* Src/dst size and scaling updates. */
9557                 if (old_other_state->src_w != new_other_state->src_w ||
9558                     old_other_state->src_h != new_other_state->src_h ||
9559                     old_other_state->crtc_w != new_other_state->crtc_w ||
9560                     old_other_state->crtc_h != new_other_state->crtc_h)
9561                         return true;
9562
9563                 /* Rotation / mirroring updates. */
9564                 if (old_other_state->rotation != new_other_state->rotation)
9565                         return true;
9566
9567                 /* Blending updates. */
9568                 if (old_other_state->pixel_blend_mode !=
9569                     new_other_state->pixel_blend_mode)
9570                         return true;
9571
9572                 /* Alpha updates. */
9573                 if (old_other_state->alpha != new_other_state->alpha)
9574                         return true;
9575
9576                 /* Colorspace changes. */
9577                 if (old_other_state->color_range != new_other_state->color_range ||
9578                     old_other_state->color_encoding != new_other_state->color_encoding)
9579                         return true;
9580
9581                 /* Framebuffer checks fall at the end. */
9582                 if (!old_other_state->fb || !new_other_state->fb)
9583                         continue;
9584
9585                 /* Pixel format changes can require bandwidth updates. */
9586                 if (old_other_state->fb->format != new_other_state->fb->format)
9587                         return true;
9588
9589                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9590                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9591
9592                 /* Tiling and DCC changes also require bandwidth updates. */
9593                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9594                     old_afb->base.modifier != new_afb->base.modifier)
9595                         return true;
9596         }
9597
9598         return false;
9599 }
9600
9601 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9602                               struct drm_plane_state *new_plane_state,
9603                               struct drm_framebuffer *fb)
9604 {
9605         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9606         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9607         unsigned int pitch;
9608         bool linear;
9609
9610         if (fb->width > new_acrtc->max_cursor_width ||
9611             fb->height > new_acrtc->max_cursor_height) {
9612                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9613                                  new_plane_state->fb->width,
9614                                  new_plane_state->fb->height);
9615                 return -EINVAL;
9616         }
9617         if (new_plane_state->src_w != fb->width << 16 ||
9618             new_plane_state->src_h != fb->height << 16) {
9619                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9620                 return -EINVAL;
9621         }
9622
9623         /* Pitch in pixels */
9624         pitch = fb->pitches[0] / fb->format->cpp[0];
9625
9626         if (fb->width != pitch) {
9627                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9628                                  fb->width, pitch);
9629                 return -EINVAL;
9630         }
9631
9632         switch (pitch) {
9633         case 64:
9634         case 128:
9635         case 256:
9636                 /* FB pitch is supported by cursor plane */
9637                 break;
9638         default:
9639                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9640                 return -EINVAL;
9641         }
9642
9643         /* Core DRM takes care of checking FB modifiers, so we only need to
9644          * check tiling flags when the FB doesn't have a modifier. */
9645         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9646                 if (adev->family < AMDGPU_FAMILY_AI) {
9647                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9648                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9649                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9650                 } else {
9651                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9652                 }
9653                 if (!linear) {
9654                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9655                         return -EINVAL;
9656                 }
9657         }
9658
9659         return 0;
9660 }
9661
9662 static int dm_update_plane_state(struct dc *dc,
9663                                  struct drm_atomic_state *state,
9664                                  struct drm_plane *plane,
9665                                  struct drm_plane_state *old_plane_state,
9666                                  struct drm_plane_state *new_plane_state,
9667                                  bool enable,
9668                                  bool *lock_and_validation_needed)
9669 {
9670
9671         struct dm_atomic_state *dm_state = NULL;
9672         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9673         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9674         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9675         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9676         struct amdgpu_crtc *new_acrtc;
9677         bool needs_reset;
9678         int ret = 0;
9679
9680
9681         new_plane_crtc = new_plane_state->crtc;
9682         old_plane_crtc = old_plane_state->crtc;
9683         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9684         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9685
9686         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9687                 if (!enable || !new_plane_crtc ||
9688                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9689                         return 0;
9690
9691                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9692
9693                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9694                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9695                         return -EINVAL;
9696                 }
9697
9698                 if (new_plane_state->fb) {
9699                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9700                                                  new_plane_state->fb);
9701                         if (ret)
9702                                 return ret;
9703                 }
9704
9705                 return 0;
9706         }
9707
9708         needs_reset = should_reset_plane(state, plane, old_plane_state,
9709                                          new_plane_state);
9710
9711         /* Remove any changed/removed planes */
9712         if (!enable) {
9713                 if (!needs_reset)
9714                         return 0;
9715
9716                 if (!old_plane_crtc)
9717                         return 0;
9718
9719                 old_crtc_state = drm_atomic_get_old_crtc_state(
9720                                 state, old_plane_crtc);
9721                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9722
9723                 if (!dm_old_crtc_state->stream)
9724                         return 0;
9725
9726                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9727                                 plane->base.id, old_plane_crtc->base.id);
9728
9729                 ret = dm_atomic_get_state(state, &dm_state);
9730                 if (ret)
9731                         return ret;
9732
9733                 if (!dc_remove_plane_from_context(
9734                                 dc,
9735                                 dm_old_crtc_state->stream,
9736                                 dm_old_plane_state->dc_state,
9737                                 dm_state->context)) {
9738
9739                         return -EINVAL;
9740                 }
9741
9742
9743                 dc_plane_state_release(dm_old_plane_state->dc_state);
9744                 dm_new_plane_state->dc_state = NULL;
9745
9746                 *lock_and_validation_needed = true;
9747
9748         } else { /* Add new planes */
9749                 struct dc_plane_state *dc_new_plane_state;
9750
9751                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9752                         return 0;
9753
9754                 if (!new_plane_crtc)
9755                         return 0;
9756
9757                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9758                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9759
9760                 if (!dm_new_crtc_state->stream)
9761                         return 0;
9762
9763                 if (!needs_reset)
9764                         return 0;
9765
9766                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9767                 if (ret)
9768                         return ret;
9769
9770                 WARN_ON(dm_new_plane_state->dc_state);
9771
9772                 dc_new_plane_state = dc_create_plane_state(dc);
9773                 if (!dc_new_plane_state)
9774                         return -ENOMEM;
9775
9776                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9777                                  plane->base.id, new_plane_crtc->base.id);
9778
9779                 ret = fill_dc_plane_attributes(
9780                         drm_to_adev(new_plane_crtc->dev),
9781                         dc_new_plane_state,
9782                         new_plane_state,
9783                         new_crtc_state);
9784                 if (ret) {
9785                         dc_plane_state_release(dc_new_plane_state);
9786                         return ret;
9787                 }
9788
9789                 ret = dm_atomic_get_state(state, &dm_state);
9790                 if (ret) {
9791                         dc_plane_state_release(dc_new_plane_state);
9792                         return ret;
9793                 }
9794
9795                 /*
9796                  * Any atomic check errors that occur after this will
9797                  * not need a release. The plane state will be attached
9798                  * to the stream, and therefore part of the atomic
9799                  * state. It'll be released when the atomic state is
9800                  * cleaned.
9801                  */
9802                 if (!dc_add_plane_to_context(
9803                                 dc,
9804                                 dm_new_crtc_state->stream,
9805                                 dc_new_plane_state,
9806                                 dm_state->context)) {
9807
9808                         dc_plane_state_release(dc_new_plane_state);
9809                         return -EINVAL;
9810                 }
9811
9812                 dm_new_plane_state->dc_state = dc_new_plane_state;
9813
9814                 /* Tell DC to do a full surface update every time there
9815                  * is a plane change. Inefficient, but works for now.
9816                  */
9817                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9818
9819                 *lock_and_validation_needed = true;
9820         }
9821
9822
9823         return ret;
9824 }
9825
9826 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9827                                 struct drm_crtc *crtc,
9828                                 struct drm_crtc_state *new_crtc_state)
9829 {
9830         struct drm_plane_state *new_cursor_state, *new_primary_state;
9831         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9832
9833         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9834          * cursor per pipe but it's going to inherit the scaling and
9835          * positioning from the underlying pipe. Check the cursor plane's
9836          * blending properties match the primary plane's. */
9837
9838         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9839         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9840         if (!new_cursor_state || !new_primary_state ||
9841             !new_cursor_state->fb || !new_primary_state->fb) {
9842                 return 0;
9843         }
9844
9845         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9846                          (new_cursor_state->src_w >> 16);
9847         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9848                          (new_cursor_state->src_h >> 16);
9849
9850         primary_scale_w = new_primary_state->crtc_w * 1000 /
9851                          (new_primary_state->src_w >> 16);
9852         primary_scale_h = new_primary_state->crtc_h * 1000 /
9853                          (new_primary_state->src_h >> 16);
9854
9855         if (cursor_scale_w != primary_scale_w ||
9856             cursor_scale_h != primary_scale_h) {
9857                 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
9858                 return -EINVAL;
9859         }
9860
9861         return 0;
9862 }
9863
9864 #if defined(CONFIG_DRM_AMD_DC_DCN)
9865 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9866 {
9867         struct drm_connector *connector;
9868         struct drm_connector_state *conn_state;
9869         struct amdgpu_dm_connector *aconnector = NULL;
9870         int i;
9871         for_each_new_connector_in_state(state, connector, conn_state, i) {
9872                 if (conn_state->crtc != crtc)
9873                         continue;
9874
9875                 aconnector = to_amdgpu_dm_connector(connector);
9876                 if (!aconnector->port || !aconnector->mst_port)
9877                         aconnector = NULL;
9878                 else
9879                         break;
9880         }
9881
9882         if (!aconnector)
9883                 return 0;
9884
9885         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9886 }
9887 #endif
9888
9889 static int validate_overlay(struct drm_atomic_state *state)
9890 {
9891         int i;
9892         struct drm_plane *plane;
9893         struct drm_plane_state *old_plane_state, *new_plane_state;
9894         struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
9895
9896         /* Check if primary plane is contained inside overlay */
9897         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9898                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9899                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9900                                 return 0;
9901
9902                         overlay_state = new_plane_state;
9903                         continue;
9904                 }
9905         }
9906
9907         /* check if we're making changes to the overlay plane */
9908         if (!overlay_state)
9909                 return 0;
9910
9911         /* check if overlay plane is enabled */
9912         if (!overlay_state->crtc)
9913                 return 0;
9914
9915         /* find the primary plane for the CRTC that the overlay is enabled on */
9916         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
9917         if (IS_ERR(primary_state))
9918                 return PTR_ERR(primary_state);
9919
9920         /* check if primary plane is enabled */
9921         if (!primary_state->crtc)
9922                 return 0;
9923
9924         /* check if cursor plane is enabled */
9925         cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
9926         if (IS_ERR(cursor_state))
9927                 return PTR_ERR(cursor_state);
9928
9929         if (drm_atomic_plane_disabling(plane->state, cursor_state))
9930                 return 0;
9931
9932         /* Perform the bounds check to ensure the overlay plane covers the primary */
9933         if (primary_state->crtc_x < overlay_state->crtc_x ||
9934             primary_state->crtc_y < overlay_state->crtc_y ||
9935             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
9936             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
9937                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
9938                 return -EINVAL;
9939         }
9940
9941         return 0;
9942 }
9943
9944 /**
9945  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9946  * @dev: The DRM device
9947  * @state: The atomic state to commit
9948  *
9949  * Validate that the given atomic state is programmable by DC into hardware.
9950  * This involves constructing a &struct dc_state reflecting the new hardware
9951  * state we wish to commit, then querying DC to see if it is programmable. It's
9952  * important not to modify the existing DC state. Otherwise, atomic_check
9953  * may unexpectedly commit hardware changes.
9954  *
9955  * When validating the DC state, it's important that the right locks are
9956  * acquired. For full updates case which removes/adds/updates streams on one
9957  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9958  * that any such full update commit will wait for completion of any outstanding
9959  * flip using DRMs synchronization events.
9960  *
9961  * Note that DM adds the affected connectors for all CRTCs in state, when that
9962  * might not seem necessary. This is because DC stream creation requires the
9963  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9964  * be possible but non-trivial - a possible TODO item.
9965  *
9966  * Return: -Error code if validation failed.
9967  */
9968 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9969                                   struct drm_atomic_state *state)
9970 {
9971         struct amdgpu_device *adev = drm_to_adev(dev);
9972         struct dm_atomic_state *dm_state = NULL;
9973         struct dc *dc = adev->dm.dc;
9974         struct drm_connector *connector;
9975         struct drm_connector_state *old_con_state, *new_con_state;
9976         struct drm_crtc *crtc;
9977         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9978         struct drm_plane *plane;
9979         struct drm_plane_state *old_plane_state, *new_plane_state;
9980         enum dc_status status;
9981         int ret, i;
9982         bool lock_and_validation_needed = false;
9983         struct dm_crtc_state *dm_old_crtc_state;
9984
9985         trace_amdgpu_dm_atomic_check_begin(state);
9986
9987         ret = drm_atomic_helper_check_modeset(dev, state);
9988         if (ret)
9989                 goto fail;
9990
9991         /* Check connector changes */
9992         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9993                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9994                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9995
9996                 /* Skip connectors that are disabled or part of modeset already. */
9997                 if (!old_con_state->crtc && !new_con_state->crtc)
9998                         continue;
9999
10000                 if (!new_con_state->crtc)
10001                         continue;
10002
10003                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10004                 if (IS_ERR(new_crtc_state)) {
10005                         ret = PTR_ERR(new_crtc_state);
10006                         goto fail;
10007                 }
10008
10009                 if (dm_old_con_state->abm_level !=
10010                     dm_new_con_state->abm_level)
10011                         new_crtc_state->connectors_changed = true;
10012         }
10013
10014 #if defined(CONFIG_DRM_AMD_DC_DCN)
10015         if (dc_resource_is_dsc_encoding_supported(dc)) {
10016                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10017                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10018                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10019                                 if (ret)
10020                                         goto fail;
10021                         }
10022                 }
10023         }
10024 #endif
10025         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10026                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10027
10028                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10029                     !new_crtc_state->color_mgmt_changed &&
10030                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10031                         dm_old_crtc_state->dsc_force_changed == false)
10032                         continue;
10033
10034                 if (!new_crtc_state->enable)
10035                         continue;
10036
10037                 ret = drm_atomic_add_affected_connectors(state, crtc);
10038                 if (ret)
10039                         return ret;
10040
10041                 ret = drm_atomic_add_affected_planes(state, crtc);
10042                 if (ret)
10043                         goto fail;
10044
10045                 if (dm_old_crtc_state->dsc_force_changed)
10046                         new_crtc_state->mode_changed = true;
10047         }
10048
10049         /*
10050          * Add all primary and overlay planes on the CRTC to the state
10051          * whenever a plane is enabled to maintain correct z-ordering
10052          * and to enable fast surface updates.
10053          */
10054         drm_for_each_crtc(crtc, dev) {
10055                 bool modified = false;
10056
10057                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10058                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10059                                 continue;
10060
10061                         if (new_plane_state->crtc == crtc ||
10062                             old_plane_state->crtc == crtc) {
10063                                 modified = true;
10064                                 break;
10065                         }
10066                 }
10067
10068                 if (!modified)
10069                         continue;
10070
10071                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10072                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10073                                 continue;
10074
10075                         new_plane_state =
10076                                 drm_atomic_get_plane_state(state, plane);
10077
10078                         if (IS_ERR(new_plane_state)) {
10079                                 ret = PTR_ERR(new_plane_state);
10080                                 goto fail;
10081                         }
10082                 }
10083         }
10084
10085         /* Remove exiting planes if they are modified */
10086         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10087                 ret = dm_update_plane_state(dc, state, plane,
10088                                             old_plane_state,
10089                                             new_plane_state,
10090                                             false,
10091                                             &lock_and_validation_needed);
10092                 if (ret)
10093                         goto fail;
10094         }
10095
10096         /* Disable all crtcs which require disable */
10097         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10098                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10099                                            old_crtc_state,
10100                                            new_crtc_state,
10101                                            false,
10102                                            &lock_and_validation_needed);
10103                 if (ret)
10104                         goto fail;
10105         }
10106
10107         /* Enable all crtcs which require enable */
10108         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10109                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10110                                            old_crtc_state,
10111                                            new_crtc_state,
10112                                            true,
10113                                            &lock_and_validation_needed);
10114                 if (ret)
10115                         goto fail;
10116         }
10117
10118         ret = validate_overlay(state);
10119         if (ret)
10120                 goto fail;
10121
10122         /* Add new/modified planes */
10123         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10124                 ret = dm_update_plane_state(dc, state, plane,
10125                                             old_plane_state,
10126                                             new_plane_state,
10127                                             true,
10128                                             &lock_and_validation_needed);
10129                 if (ret)
10130                         goto fail;
10131         }
10132
10133         /* Run this here since we want to validate the streams we created */
10134         ret = drm_atomic_helper_check_planes(dev, state);
10135         if (ret)
10136                 goto fail;
10137
10138         /* Check cursor planes scaling */
10139         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10140                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10141                 if (ret)
10142                         goto fail;
10143         }
10144
10145         if (state->legacy_cursor_update) {
10146                 /*
10147                  * This is a fast cursor update coming from the plane update
10148                  * helper, check if it can be done asynchronously for better
10149                  * performance.
10150                  */
10151                 state->async_update =
10152                         !drm_atomic_helper_async_check(dev, state);
10153
10154                 /*
10155                  * Skip the remaining global validation if this is an async
10156                  * update. Cursor updates can be done without affecting
10157                  * state or bandwidth calcs and this avoids the performance
10158                  * penalty of locking the private state object and
10159                  * allocating a new dc_state.
10160                  */
10161                 if (state->async_update)
10162                         return 0;
10163         }
10164
10165         /* Check scaling and underscan changes*/
10166         /* TODO Removed scaling changes validation due to inability to commit
10167          * new stream into context w\o causing full reset. Need to
10168          * decide how to handle.
10169          */
10170         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10171                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10172                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10173                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10174
10175                 /* Skip any modesets/resets */
10176                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10177                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10178                         continue;
10179
10180                 /* Skip any thing not scale or underscan changes */
10181                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10182                         continue;
10183
10184                 lock_and_validation_needed = true;
10185         }
10186
10187         /**
10188          * Streams and planes are reset when there are changes that affect
10189          * bandwidth. Anything that affects bandwidth needs to go through
10190          * DC global validation to ensure that the configuration can be applied
10191          * to hardware.
10192          *
10193          * We have to currently stall out here in atomic_check for outstanding
10194          * commits to finish in this case because our IRQ handlers reference
10195          * DRM state directly - we can end up disabling interrupts too early
10196          * if we don't.
10197          *
10198          * TODO: Remove this stall and drop DM state private objects.
10199          */
10200         if (lock_and_validation_needed) {
10201                 ret = dm_atomic_get_state(state, &dm_state);
10202                 if (ret)
10203                         goto fail;
10204
10205                 ret = do_aquire_global_lock(dev, state);
10206                 if (ret)
10207                         goto fail;
10208
10209 #if defined(CONFIG_DRM_AMD_DC_DCN)
10210                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10211                         goto fail;
10212
10213                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10214                 if (ret)
10215                         goto fail;
10216 #endif
10217
10218                 /*
10219                  * Perform validation of MST topology in the state:
10220                  * We need to perform MST atomic check before calling
10221                  * dc_validate_global_state(), or there is a chance
10222                  * to get stuck in an infinite loop and hang eventually.
10223                  */
10224                 ret = drm_dp_mst_atomic_check(state);
10225                 if (ret)
10226                         goto fail;
10227                 status = dc_validate_global_state(dc, dm_state->context, false);
10228                 if (status != DC_OK) {
10229                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10230                                        dc_status_to_str(status), status);
10231                         ret = -EINVAL;
10232                         goto fail;
10233                 }
10234         } else {
10235                 /*
10236                  * The commit is a fast update. Fast updates shouldn't change
10237                  * the DC context, affect global validation, and can have their
10238                  * commit work done in parallel with other commits not touching
10239                  * the same resource. If we have a new DC context as part of
10240                  * the DM atomic state from validation we need to free it and
10241                  * retain the existing one instead.
10242                  *
10243                  * Furthermore, since the DM atomic state only contains the DC
10244                  * context and can safely be annulled, we can free the state
10245                  * and clear the associated private object now to free
10246                  * some memory and avoid a possible use-after-free later.
10247                  */
10248
10249                 for (i = 0; i < state->num_private_objs; i++) {
10250                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10251
10252                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10253                                 int j = state->num_private_objs-1;
10254
10255                                 dm_atomic_destroy_state(obj,
10256                                                 state->private_objs[i].state);
10257
10258                                 /* If i is not at the end of the array then the
10259                                  * last element needs to be moved to where i was
10260                                  * before the array can safely be truncated.
10261                                  */
10262                                 if (i != j)
10263                                         state->private_objs[i] =
10264                                                 state->private_objs[j];
10265
10266                                 state->private_objs[j].ptr = NULL;
10267                                 state->private_objs[j].state = NULL;
10268                                 state->private_objs[j].old_state = NULL;
10269                                 state->private_objs[j].new_state = NULL;
10270
10271                                 state->num_private_objs = j;
10272                                 break;
10273                         }
10274                 }
10275         }
10276
10277         /* Store the overall update type for use later in atomic check. */
10278         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10279                 struct dm_crtc_state *dm_new_crtc_state =
10280                         to_dm_crtc_state(new_crtc_state);
10281
10282                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10283                                                          UPDATE_TYPE_FULL :
10284                                                          UPDATE_TYPE_FAST;
10285         }
10286
10287         /* Must be success */
10288         WARN_ON(ret);
10289
10290         trace_amdgpu_dm_atomic_check_finish(state, ret);
10291
10292         return ret;
10293
10294 fail:
10295         if (ret == -EDEADLK)
10296                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10297         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10298                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10299         else
10300                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10301
10302         trace_amdgpu_dm_atomic_check_finish(state, ret);
10303
10304         return ret;
10305 }
10306
10307 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10308                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10309 {
10310         uint8_t dpcd_data;
10311         bool capable = false;
10312
10313         if (amdgpu_dm_connector->dc_link &&
10314                 dm_helpers_dp_read_dpcd(
10315                                 NULL,
10316                                 amdgpu_dm_connector->dc_link,
10317                                 DP_DOWN_STREAM_PORT_COUNT,
10318                                 &dpcd_data,
10319                                 sizeof(dpcd_data))) {
10320                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10321         }
10322
10323         return capable;
10324 }
10325
10326 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10327                 uint8_t *edid_ext, int len,
10328                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10329 {
10330         int i;
10331         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10332         struct dc *dc = adev->dm.dc;
10333
10334         /* send extension block to DMCU for parsing */
10335         for (i = 0; i < len; i += 8) {
10336                 bool res;
10337                 int offset;
10338
10339                 /* send 8 bytes a time */
10340                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10341                         return false;
10342
10343                 if (i+8 == len) {
10344                         /* EDID block sent completed, expect result */
10345                         int version, min_rate, max_rate;
10346
10347                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10348                         if (res) {
10349                                 /* amd vsdb found */
10350                                 vsdb_info->freesync_supported = 1;
10351                                 vsdb_info->amd_vsdb_version = version;
10352                                 vsdb_info->min_refresh_rate_hz = min_rate;
10353                                 vsdb_info->max_refresh_rate_hz = max_rate;
10354                                 return true;
10355                         }
10356                         /* not amd vsdb */
10357                         return false;
10358                 }
10359
10360                 /* check for ack*/
10361                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10362                 if (!res)
10363                         return false;
10364         }
10365
10366         return false;
10367 }
10368
10369 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10370                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10371 {
10372         uint8_t *edid_ext = NULL;
10373         int i;
10374         bool valid_vsdb_found = false;
10375
10376         /*----- drm_find_cea_extension() -----*/
10377         /* No EDID or EDID extensions */
10378         if (edid == NULL || edid->extensions == 0)
10379                 return -ENODEV;
10380
10381         /* Find CEA extension */
10382         for (i = 0; i < edid->extensions; i++) {
10383                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10384                 if (edid_ext[0] == CEA_EXT)
10385                         break;
10386         }
10387
10388         if (i == edid->extensions)
10389                 return -ENODEV;
10390
10391         /*----- cea_db_offsets() -----*/
10392         if (edid_ext[0] != CEA_EXT)
10393                 return -ENODEV;
10394
10395         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10396
10397         return valid_vsdb_found ? i : -ENODEV;
10398 }
10399
10400 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10401                                         struct edid *edid)
10402 {
10403         int i = 0;
10404         struct detailed_timing *timing;
10405         struct detailed_non_pixel *data;
10406         struct detailed_data_monitor_range *range;
10407         struct amdgpu_dm_connector *amdgpu_dm_connector =
10408                         to_amdgpu_dm_connector(connector);
10409         struct dm_connector_state *dm_con_state = NULL;
10410
10411         struct drm_device *dev = connector->dev;
10412         struct amdgpu_device *adev = drm_to_adev(dev);
10413         bool freesync_capable = false;
10414         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10415
10416         if (!connector->state) {
10417                 DRM_ERROR("%s - Connector has no state", __func__);
10418                 goto update;
10419         }
10420
10421         if (!edid) {
10422                 dm_con_state = to_dm_connector_state(connector->state);
10423
10424                 amdgpu_dm_connector->min_vfreq = 0;
10425                 amdgpu_dm_connector->max_vfreq = 0;
10426                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10427
10428                 goto update;
10429         }
10430
10431         dm_con_state = to_dm_connector_state(connector->state);
10432
10433         if (!amdgpu_dm_connector->dc_sink) {
10434                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10435                 goto update;
10436         }
10437         if (!adev->dm.freesync_module)
10438                 goto update;
10439
10440
10441         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10442                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10443                 bool edid_check_required = false;
10444
10445                 if (edid) {
10446                         edid_check_required = is_dp_capable_without_timing_msa(
10447                                                 adev->dm.dc,
10448                                                 amdgpu_dm_connector);
10449                 }
10450
10451                 if (edid_check_required == true && (edid->version > 1 ||
10452                    (edid->version == 1 && edid->revision > 1))) {
10453                         for (i = 0; i < 4; i++) {
10454
10455                                 timing  = &edid->detailed_timings[i];
10456                                 data    = &timing->data.other_data;
10457                                 range   = &data->data.range;
10458                                 /*
10459                                  * Check if monitor has continuous frequency mode
10460                                  */
10461                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10462                                         continue;
10463                                 /*
10464                                  * Check for flag range limits only. If flag == 1 then
10465                                  * no additional timing information provided.
10466                                  * Default GTF, GTF Secondary curve and CVT are not
10467                                  * supported
10468                                  */
10469                                 if (range->flags != 1)
10470                                         continue;
10471
10472                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10473                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10474                                 amdgpu_dm_connector->pixel_clock_mhz =
10475                                         range->pixel_clock_mhz * 10;
10476
10477                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10478                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10479
10480                                 break;
10481                         }
10482
10483                         if (amdgpu_dm_connector->max_vfreq -
10484                             amdgpu_dm_connector->min_vfreq > 10) {
10485
10486                                 freesync_capable = true;
10487                         }
10488                 }
10489         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10490                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10491                 if (i >= 0 && vsdb_info.freesync_supported) {
10492                         timing  = &edid->detailed_timings[i];
10493                         data    = &timing->data.other_data;
10494
10495                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10496                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10497                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10498                                 freesync_capable = true;
10499
10500                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10501                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10502                 }
10503         }
10504
10505 update:
10506         if (dm_con_state)
10507                 dm_con_state->freesync_capable = freesync_capable;
10508
10509         if (connector->vrr_capable_property)
10510                 drm_connector_set_vrr_capable_property(connector,
10511                                                        freesync_capable);
10512 }
10513
10514 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10515 {
10516         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10517
10518         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10519                 return;
10520         if (link->type == dc_connection_none)
10521                 return;
10522         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10523                                         dpcd_data, sizeof(dpcd_data))) {
10524                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10525
10526                 if (dpcd_data[0] == 0) {
10527                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10528                         link->psr_settings.psr_feature_enabled = false;
10529                 } else {
10530                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10531                         link->psr_settings.psr_feature_enabled = true;
10532                 }
10533
10534                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10535         }
10536 }
10537
10538 /*
10539  * amdgpu_dm_link_setup_psr() - configure psr link
10540  * @stream: stream state
10541  *
10542  * Return: true if success
10543  */
10544 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10545 {
10546         struct dc_link *link = NULL;
10547         struct psr_config psr_config = {0};
10548         struct psr_context psr_context = {0};
10549         bool ret = false;
10550
10551         if (stream == NULL)
10552                 return false;
10553
10554         link = stream->link;
10555
10556         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10557
10558         if (psr_config.psr_version > 0) {
10559                 psr_config.psr_exit_link_training_required = 0x1;
10560                 psr_config.psr_frame_capture_indication_req = 0;
10561                 psr_config.psr_rfb_setup_time = 0x37;
10562                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10563                 psr_config.allow_smu_optimizations = 0x0;
10564
10565                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10566
10567         }
10568         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10569
10570         return ret;
10571 }
10572
10573 /*
10574  * amdgpu_dm_psr_enable() - enable psr f/w
10575  * @stream: stream state
10576  *
10577  * Return: true if success
10578  */
10579 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10580 {
10581         struct dc_link *link = stream->link;
10582         unsigned int vsync_rate_hz = 0;
10583         struct dc_static_screen_params params = {0};
10584         /* Calculate number of static frames before generating interrupt to
10585          * enter PSR.
10586          */
10587         // Init fail safe of 2 frames static
10588         unsigned int num_frames_static = 2;
10589
10590         DRM_DEBUG_DRIVER("Enabling psr...\n");
10591
10592         vsync_rate_hz = div64_u64(div64_u64((
10593                         stream->timing.pix_clk_100hz * 100),
10594                         stream->timing.v_total),
10595                         stream->timing.h_total);
10596
10597         /* Round up
10598          * Calculate number of frames such that at least 30 ms of time has
10599          * passed.
10600          */
10601         if (vsync_rate_hz != 0) {
10602                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10603                 num_frames_static = (30000 / frame_time_microsec) + 1;
10604         }
10605
10606         params.triggers.cursor_update = true;
10607         params.triggers.overlay_update = true;
10608         params.triggers.surface_update = true;
10609         params.num_frames = num_frames_static;
10610
10611         dc_stream_set_static_screen_params(link->ctx->dc,
10612                                            &stream, 1,
10613                                            &params);
10614
10615         return dc_link_set_psr_allow_active(link, true, false, false);
10616 }
10617
10618 /*
10619  * amdgpu_dm_psr_disable() - disable psr f/w
10620  * @stream:  stream state
10621  *
10622  * Return: true if success
10623  */
10624 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10625 {
10626
10627         DRM_DEBUG_DRIVER("Disabling psr...\n");
10628
10629         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10630 }
10631
10632 /*
10633  * amdgpu_dm_psr_disable() - disable psr f/w
10634  * if psr is enabled on any stream
10635  *
10636  * Return: true if success
10637  */
10638 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10639 {
10640         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10641         return dc_set_psr_allow_active(dm->dc, false);
10642 }
10643
10644 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10645 {
10646         struct amdgpu_device *adev = drm_to_adev(dev);
10647         struct dc *dc = adev->dm.dc;
10648         int i;
10649
10650         mutex_lock(&adev->dm.dc_lock);
10651         if (dc->current_state) {
10652                 for (i = 0; i < dc->current_state->stream_count; ++i)
10653                         dc->current_state->streams[i]
10654                                 ->triggered_crtc_reset.enabled =
10655                                 adev->dm.force_timing_sync;
10656
10657                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10658                 dc_trigger_sync(dc, dc->current_state);
10659         }
10660         mutex_unlock(&adev->dm.dc_lock);
10661 }
10662
10663 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10664                        uint32_t value, const char *func_name)
10665 {
10666 #ifdef DM_CHECK_ADDR_0
10667         if (address == 0) {
10668                 DC_ERR("invalid register write. address = 0");
10669                 return;
10670         }
10671 #endif
10672         cgs_write_register(ctx->cgs_device, address, value);
10673         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10674 }
10675
10676 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10677                           const char *func_name)
10678 {
10679         uint32_t value;
10680 #ifdef DM_CHECK_ADDR_0
10681         if (address == 0) {
10682                 DC_ERR("invalid register read; address = 0\n");
10683                 return 0;
10684         }
10685 #endif
10686
10687         if (ctx->dmub_srv &&
10688             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10689             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10690                 ASSERT(false);
10691                 return 0;
10692         }
10693
10694         value = cgs_read_register(ctx->cgs_device, address);
10695
10696         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10697
10698         return value;
10699 }