Merge tag 'amd-drm-next-5.14-2021-06-02' of https://gitlab.freedesktop.org/agd5f...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61
62 #include "ivsrcid/ivsrcid_vislands30.h"
63
64 #include "i2caux_interface.h"
65 #include <linux/module.h>
66 #include <linux/moduleparam.h>
67 #include <linux/types.h>
68 #include <linux/pm_runtime.h>
69 #include <linux/pci.h>
70 #include <linux/firmware.h>
71 #include <linux/component.h>
72
73 #include <drm/drm_atomic.h>
74 #include <drm/drm_atomic_uapi.h>
75 #include <drm/drm_atomic_helper.h>
76 #include <drm/drm_dp_mst_helper.h>
77 #include <drm/drm_fb_helper.h>
78 #include <drm/drm_fourcc.h>
79 #include <drm/drm_edid.h>
80 #include <drm/drm_vblank.h>
81 #include <drm/drm_audio_component.h>
82
83 #if defined(CONFIG_DRM_AMD_DC_DCN)
84 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
85
86 #include "dcn/dcn_1_0_offset.h"
87 #include "dcn/dcn_1_0_sh_mask.h"
88 #include "soc15_hw_ip.h"
89 #include "vega10_ip_offset.h"
90
91 #include "soc15_common.h"
92 #endif
93
94 #include "modules/inc/mod_freesync.h"
95 #include "modules/power/power_helpers.h"
96 #include "modules/inc/mod_info_packet.h"
97
98 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
100 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
102 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
108 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
110 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
112
113 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
115
116 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
118
119 /* Number of bytes in PSP header for firmware. */
120 #define PSP_HEADER_BYTES 0x100
121
122 /* Number of bytes in PSP footer for firmware. */
123 #define PSP_FOOTER_BYTES 0x100
124
125 /**
126  * DOC: overview
127  *
128  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
129  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
130  * requests into DC requests, and DC responses into DRM responses.
131  *
132  * The root control structure is &struct amdgpu_display_manager.
133  */
134
135 /* basic init/fini API */
136 static int amdgpu_dm_init(struct amdgpu_device *adev);
137 static void amdgpu_dm_fini(struct amdgpu_device *adev);
138 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
139
140 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
141 {
142         switch (link->dpcd_caps.dongle_type) {
143         case DISPLAY_DONGLE_NONE:
144                 return DRM_MODE_SUBCONNECTOR_Native;
145         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
146                 return DRM_MODE_SUBCONNECTOR_VGA;
147         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
148         case DISPLAY_DONGLE_DP_DVI_DONGLE:
149                 return DRM_MODE_SUBCONNECTOR_DVID;
150         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
151         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
152                 return DRM_MODE_SUBCONNECTOR_HDMIA;
153         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
154         default:
155                 return DRM_MODE_SUBCONNECTOR_Unknown;
156         }
157 }
158
159 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
160 {
161         struct dc_link *link = aconnector->dc_link;
162         struct drm_connector *connector = &aconnector->base;
163         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
164
165         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
166                 return;
167
168         if (aconnector->dc_sink)
169                 subconnector = get_subconnector_type(link);
170
171         drm_object_property_set_value(&connector->base,
172                         connector->dev->mode_config.dp_subconnector_property,
173                         subconnector);
174 }
175
176 /*
177  * initializes drm_device display related structures, based on the information
178  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
179  * drm_encoder, drm_mode_config
180  *
181  * Returns 0 on success
182  */
183 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
184 /* removes and deallocates the drm structures, created by the above function */
185 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
186
187 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
188                                 struct drm_plane *plane,
189                                 unsigned long possible_crtcs,
190                                 const struct dc_plane_cap *plane_cap);
191 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
192                                struct drm_plane *plane,
193                                uint32_t link_index);
194 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
195                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
196                                     uint32_t link_index,
197                                     struct amdgpu_encoder *amdgpu_encoder);
198 static int amdgpu_dm_encoder_init(struct drm_device *dev,
199                                   struct amdgpu_encoder *aencoder,
200                                   uint32_t link_index);
201
202 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
203
204 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
205
206 static int amdgpu_dm_atomic_check(struct drm_device *dev,
207                                   struct drm_atomic_state *state);
208
209 static void handle_cursor_update(struct drm_plane *plane,
210                                  struct drm_plane_state *old_plane_state);
211
212 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
213 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
214 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
216 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
217
218 static const struct drm_format_info *
219 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
220
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223                                  struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239         if (crtc >= adev->mode_info.num_crtc)
240                 return 0;
241         else {
242                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243
244                 if (acrtc->dm_irq_params.stream == NULL) {
245                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246                                   crtc);
247                         return 0;
248                 }
249
250                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251         }
252 }
253
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255                                   u32 *vbl, u32 *position)
256 {
257         uint32_t v_blank_start, v_blank_end, h_position, v_position;
258
259         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260                 return -EINVAL;
261         else {
262                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263
264                 if (acrtc->dm_irq_params.stream ==  NULL) {
265                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266                                   crtc);
267                         return 0;
268                 }
269
270                 /*
271                  * TODO rework base driver to use values directly.
272                  * for now parse it back into reg-format
273                  */
274                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275                                          &v_blank_start,
276                                          &v_blank_end,
277                                          &h_position,
278                                          &v_position);
279
280                 *position = v_position | (h_position << 16);
281                 *vbl = v_blank_start | (v_blank_end << 16);
282         }
283
284         return 0;
285 }
286
287 static bool dm_is_idle(void *handle)
288 {
289         /* XXX todo */
290         return true;
291 }
292
293 static int dm_wait_for_idle(void *handle)
294 {
295         /* XXX todo */
296         return 0;
297 }
298
299 static bool dm_check_soft_reset(void *handle)
300 {
301         return false;
302 }
303
304 static int dm_soft_reset(void *handle)
305 {
306         /* XXX todo */
307         return 0;
308 }
309
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312                      int otg_inst)
313 {
314         struct drm_device *dev = adev_to_drm(adev);
315         struct drm_crtc *crtc;
316         struct amdgpu_crtc *amdgpu_crtc;
317
318         if (WARN_ON(otg_inst == -1))
319                 return adev->mode_info.crtcs[0];
320
321         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322                 amdgpu_crtc = to_amdgpu_crtc(crtc);
323
324                 if (amdgpu_crtc->otg_inst == otg_inst)
325                         return amdgpu_crtc;
326         }
327
328         return NULL;
329 }
330
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332 {
333         return acrtc->dm_irq_params.freesync_config.state ==
334                        VRR_STATE_ACTIVE_VARIABLE ||
335                acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_FIXED;
337 }
338
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340 {
341         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343 }
344
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346                                               struct dm_crtc_state *new_state)
347 {
348         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
349                 return true;
350         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351                 return true;
352         else
353                 return false;
354 }
355
356 /**
357  * dm_pflip_high_irq() - Handle pageflip interrupt
358  * @interrupt_params: ignored
359  *
360  * Handles the pageflip interrupt by notifying all interested parties
361  * that the pageflip has been completed.
362  */
363 static void dm_pflip_high_irq(void *interrupt_params)
364 {
365         struct amdgpu_crtc *amdgpu_crtc;
366         struct common_irq_params *irq_params = interrupt_params;
367         struct amdgpu_device *adev = irq_params->adev;
368         unsigned long flags;
369         struct drm_pending_vblank_event *e;
370         uint32_t vpos, hpos, v_blank_start, v_blank_end;
371         bool vrr_active;
372
373         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374
375         /* IRQ could occur when in initial stage */
376         /* TODO work and BO cleanup */
377         if (amdgpu_crtc == NULL) {
378                 DC_LOG_PFLIP("CRTC is null, returning.\n");
379                 return;
380         }
381
382         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
383
384         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386                                                  amdgpu_crtc->pflip_status,
387                                                  AMDGPU_FLIP_SUBMITTED,
388                                                  amdgpu_crtc->crtc_id,
389                                                  amdgpu_crtc);
390                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391                 return;
392         }
393
394         /* page flip completed. */
395         e = amdgpu_crtc->event;
396         amdgpu_crtc->event = NULL;
397
398         WARN_ON(!e);
399
400         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401
402         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
403         if (!vrr_active ||
404             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405                                       &v_blank_end, &hpos, &vpos) ||
406             (vpos < v_blank_start)) {
407                 /* Update to correct count and vblank timestamp if racing with
408                  * vblank irq. This also updates to the correct vblank timestamp
409                  * even in VRR mode, as scanout is past the front-porch atm.
410                  */
411                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412
413                 /* Wake up userspace by sending the pageflip event with proper
414                  * count and timestamp of vblank of flip completion.
415                  */
416                 if (e) {
417                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418
419                         /* Event sent, so done with vblank for this flip */
420                         drm_crtc_vblank_put(&amdgpu_crtc->base);
421                 }
422         } else if (e) {
423                 /* VRR active and inside front-porch: vblank count and
424                  * timestamp for pageflip event will only be up to date after
425                  * drm_crtc_handle_vblank() has been executed from late vblank
426                  * irq handler after start of back-porch (vline 0). We queue the
427                  * pageflip event for send-out by drm_crtc_handle_vblank() with
428                  * updated timestamp and count, once it runs after us.
429                  *
430                  * We need to open-code this instead of using the helper
431                  * drm_crtc_arm_vblank_event(), as that helper would
432                  * call drm_crtc_accurate_vblank_count(), which we must
433                  * not call in VRR mode while we are in front-porch!
434                  */
435
436                 /* sequence will be replaced by real count during send-out. */
437                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438                 e->pipe = amdgpu_crtc->crtc_id;
439
440                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441                 e = NULL;
442         }
443
444         /* Keep track of vblank of this flip for flip throttling. We use the
445          * cooked hw counter, as that one incremented at start of this vblank
446          * of pageflip completion, so last_flip_vblank is the forbidden count
447          * for queueing new pageflips if vsync + VRR is enabled.
448          */
449         amdgpu_crtc->dm_irq_params.last_flip_vblank =
450                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451
452         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454
455         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456                      amdgpu_crtc->crtc_id, amdgpu_crtc,
457                      vrr_active, (int) !e);
458 }
459
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462         struct common_irq_params *irq_params = interrupt_params;
463         struct amdgpu_device *adev = irq_params->adev;
464         struct amdgpu_crtc *acrtc;
465         struct drm_device *drm_dev;
466         struct drm_vblank_crtc *vblank;
467         ktime_t frame_duration_ns, previous_timestamp;
468         unsigned long flags;
469         int vrr_active;
470
471         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472
473         if (acrtc) {
474                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475                 drm_dev = acrtc->base.dev;
476                 vblank = &drm_dev->vblank[acrtc->base.index];
477                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478                 frame_duration_ns = vblank->time - previous_timestamp;
479
480                 if (frame_duration_ns > 0) {
481                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
482                                                 frame_duration_ns,
483                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
485                 }
486
487                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488                               acrtc->crtc_id,
489                               vrr_active);
490
491                 /* Core vblank handling is done here after end of front-porch in
492                  * vrr mode, as vblank timestamping will give valid results
493                  * while now done after front-porch. This will also deliver
494                  * page-flip completion events that have been queued to us
495                  * if a pageflip happened inside front-porch.
496                  */
497                 if (vrr_active) {
498                         drm_crtc_handle_vblank(&acrtc->base);
499
500                         /* BTR processing for pre-DCE12 ASICs */
501                         if (acrtc->dm_irq_params.stream &&
502                             adev->family < AMDGPU_FAMILY_AI) {
503                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504                                 mod_freesync_handle_v_update(
505                                     adev->dm.freesync_module,
506                                     acrtc->dm_irq_params.stream,
507                                     &acrtc->dm_irq_params.vrr_params);
508
509                                 dc_stream_adjust_vmin_vmax(
510                                     adev->dm.dc,
511                                     acrtc->dm_irq_params.stream,
512                                     &acrtc->dm_irq_params.vrr_params.adjust);
513                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514                         }
515                 }
516         }
517 }
518
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528         struct common_irq_params *irq_params = interrupt_params;
529         struct amdgpu_device *adev = irq_params->adev;
530         struct amdgpu_crtc *acrtc;
531         unsigned long flags;
532         int vrr_active;
533
534         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535         if (!acrtc)
536                 return;
537
538         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539
540         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541                       vrr_active, acrtc->dm_irq_params.active_planes);
542
543         /**
544          * Core vblank handling at start of front-porch is only possible
545          * in non-vrr mode, as only there vblank timestamping will give
546          * valid results while done in front-porch. Otherwise defer it
547          * to dm_vupdate_high_irq after end of front-porch.
548          */
549         if (!vrr_active)
550                 drm_crtc_handle_vblank(&acrtc->base);
551
552         /**
553          * Following stuff must happen at start of vblank, for crc
554          * computation and below-the-range btr support in vrr mode.
555          */
556         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557
558         /* BTR updates need to happen before VUPDATE on Vega and above. */
559         if (adev->family < AMDGPU_FAMILY_AI)
560                 return;
561
562         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563
564         if (acrtc->dm_irq_params.stream &&
565             acrtc->dm_irq_params.vrr_params.supported &&
566             acrtc->dm_irq_params.freesync_config.state ==
567                     VRR_STATE_ACTIVE_VARIABLE) {
568                 mod_freesync_handle_v_update(adev->dm.freesync_module,
569                                              acrtc->dm_irq_params.stream,
570                                              &acrtc->dm_irq_params.vrr_params);
571
572                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573                                            &acrtc->dm_irq_params.vrr_params.adjust);
574         }
575
576         /*
577          * If there aren't any active_planes then DCH HUBP may be clock-gated.
578          * In that case, pageflip completion interrupts won't fire and pageflip
579          * completion events won't get delivered. Prevent this by sending
580          * pending pageflip events from here if a flip is still pending.
581          *
582          * If any planes are enabled, use dm_pflip_high_irq() instead, to
583          * avoid race conditions between flip programming and completion,
584          * which could cause too early flip completion events.
585          */
586         if (adev->family >= AMDGPU_FAMILY_RV &&
587             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588             acrtc->dm_irq_params.active_planes == 0) {
589                 if (acrtc->event) {
590                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591                         acrtc->event = NULL;
592                         drm_crtc_vblank_put(&acrtc->base);
593                 }
594                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
595         }
596
597         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
602 /**
603  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604  * DCN generation ASICs
605  * @interrupt_params: interrupt parameters
606  *
607  * Used to set crc window/read out crc value at vertical line 0 position
608  */
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611         struct common_irq_params *irq_params = interrupt_params;
612         struct amdgpu_device *adev = irq_params->adev;
613         struct amdgpu_crtc *acrtc;
614
615         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616
617         if (!acrtc)
618                 return;
619
620         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623
624 /**
625  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
626  * @interrupt_params: used for determining the Outbox instance
627  *
628  * Handles the Outbox Interrupt
629  * event handler.
630  */
631 #define DMUB_TRACE_MAX_READ 64
632 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
633 {
634         struct dmub_notification notify;
635         struct common_irq_params *irq_params = interrupt_params;
636         struct amdgpu_device *adev = irq_params->adev;
637         struct amdgpu_display_manager *dm = &adev->dm;
638         struct dmcub_trace_buf_entry entry = { 0 };
639         uint32_t count = 0;
640
641         if (dc_enable_dmub_notifications(adev->dm.dc)) {
642                 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
643                         do {
644                                 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
645                         } while (notify.pending_notification);
646
647                         if (adev->dm.dmub_notify)
648                                 memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
649                         if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
650                                 complete(&adev->dm.dmub_aux_transfer_done);
651                         // TODO : HPD Implementation
652
653                 } else {
654                         DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
655                 }
656         }
657
658
659         do {
660                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
661                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
662                                                         entry.param0, entry.param1);
663
664                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
665                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
666                 } else
667                         break;
668
669                 count++;
670
671         } while (count <= DMUB_TRACE_MAX_READ);
672
673         ASSERT(count <= DMUB_TRACE_MAX_READ);
674 }
675 #endif
676
677 static int dm_set_clockgating_state(void *handle,
678                   enum amd_clockgating_state state)
679 {
680         return 0;
681 }
682
683 static int dm_set_powergating_state(void *handle,
684                   enum amd_powergating_state state)
685 {
686         return 0;
687 }
688
689 /* Prototypes of private functions */
690 static int dm_early_init(void* handle);
691
692 /* Allocate memory for FBC compressed data  */
693 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
694 {
695         struct drm_device *dev = connector->dev;
696         struct amdgpu_device *adev = drm_to_adev(dev);
697         struct dm_compressor_info *compressor = &adev->dm.compressor;
698         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
699         struct drm_display_mode *mode;
700         unsigned long max_size = 0;
701
702         if (adev->dm.dc->fbc_compressor == NULL)
703                 return;
704
705         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
706                 return;
707
708         if (compressor->bo_ptr)
709                 return;
710
711
712         list_for_each_entry(mode, &connector->modes, head) {
713                 if (max_size < mode->htotal * mode->vtotal)
714                         max_size = mode->htotal * mode->vtotal;
715         }
716
717         if (max_size) {
718                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
719                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
720                             &compressor->gpu_addr, &compressor->cpu_addr);
721
722                 if (r)
723                         DRM_ERROR("DM: Failed to initialize FBC\n");
724                 else {
725                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
726                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
727                 }
728
729         }
730
731 }
732
733 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
734                                           int pipe, bool *enabled,
735                                           unsigned char *buf, int max_bytes)
736 {
737         struct drm_device *dev = dev_get_drvdata(kdev);
738         struct amdgpu_device *adev = drm_to_adev(dev);
739         struct drm_connector *connector;
740         struct drm_connector_list_iter conn_iter;
741         struct amdgpu_dm_connector *aconnector;
742         int ret = 0;
743
744         *enabled = false;
745
746         mutex_lock(&adev->dm.audio_lock);
747
748         drm_connector_list_iter_begin(dev, &conn_iter);
749         drm_for_each_connector_iter(connector, &conn_iter) {
750                 aconnector = to_amdgpu_dm_connector(connector);
751                 if (aconnector->audio_inst != port)
752                         continue;
753
754                 *enabled = true;
755                 ret = drm_eld_size(connector->eld);
756                 memcpy(buf, connector->eld, min(max_bytes, ret));
757
758                 break;
759         }
760         drm_connector_list_iter_end(&conn_iter);
761
762         mutex_unlock(&adev->dm.audio_lock);
763
764         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
765
766         return ret;
767 }
768
769 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
770         .get_eld = amdgpu_dm_audio_component_get_eld,
771 };
772
773 static int amdgpu_dm_audio_component_bind(struct device *kdev,
774                                        struct device *hda_kdev, void *data)
775 {
776         struct drm_device *dev = dev_get_drvdata(kdev);
777         struct amdgpu_device *adev = drm_to_adev(dev);
778         struct drm_audio_component *acomp = data;
779
780         acomp->ops = &amdgpu_dm_audio_component_ops;
781         acomp->dev = kdev;
782         adev->dm.audio_component = acomp;
783
784         return 0;
785 }
786
787 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
788                                           struct device *hda_kdev, void *data)
789 {
790         struct drm_device *dev = dev_get_drvdata(kdev);
791         struct amdgpu_device *adev = drm_to_adev(dev);
792         struct drm_audio_component *acomp = data;
793
794         acomp->ops = NULL;
795         acomp->dev = NULL;
796         adev->dm.audio_component = NULL;
797 }
798
799 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
800         .bind   = amdgpu_dm_audio_component_bind,
801         .unbind = amdgpu_dm_audio_component_unbind,
802 };
803
804 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
805 {
806         int i, ret;
807
808         if (!amdgpu_audio)
809                 return 0;
810
811         adev->mode_info.audio.enabled = true;
812
813         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
814
815         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
816                 adev->mode_info.audio.pin[i].channels = -1;
817                 adev->mode_info.audio.pin[i].rate = -1;
818                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
819                 adev->mode_info.audio.pin[i].status_bits = 0;
820                 adev->mode_info.audio.pin[i].category_code = 0;
821                 adev->mode_info.audio.pin[i].connected = false;
822                 adev->mode_info.audio.pin[i].id =
823                         adev->dm.dc->res_pool->audios[i]->inst;
824                 adev->mode_info.audio.pin[i].offset = 0;
825         }
826
827         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
828         if (ret < 0)
829                 return ret;
830
831         adev->dm.audio_registered = true;
832
833         return 0;
834 }
835
836 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
837 {
838         if (!amdgpu_audio)
839                 return;
840
841         if (!adev->mode_info.audio.enabled)
842                 return;
843
844         if (adev->dm.audio_registered) {
845                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
846                 adev->dm.audio_registered = false;
847         }
848
849         /* TODO: Disable audio? */
850
851         adev->mode_info.audio.enabled = false;
852 }
853
854 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
855 {
856         struct drm_audio_component *acomp = adev->dm.audio_component;
857
858         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
859                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
860
861                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
862                                                  pin, -1);
863         }
864 }
865
866 static int dm_dmub_hw_init(struct amdgpu_device *adev)
867 {
868         const struct dmcub_firmware_header_v1_0 *hdr;
869         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
870         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
871         const struct firmware *dmub_fw = adev->dm.dmub_fw;
872         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
873         struct abm *abm = adev->dm.dc->res_pool->abm;
874         struct dmub_srv_hw_params hw_params;
875         enum dmub_status status;
876         const unsigned char *fw_inst_const, *fw_bss_data;
877         uint32_t i, fw_inst_const_size, fw_bss_data_size;
878         bool has_hw_support;
879
880         if (!dmub_srv)
881                 /* DMUB isn't supported on the ASIC. */
882                 return 0;
883
884         if (!fb_info) {
885                 DRM_ERROR("No framebuffer info for DMUB service.\n");
886                 return -EINVAL;
887         }
888
889         if (!dmub_fw) {
890                 /* Firmware required for DMUB support. */
891                 DRM_ERROR("No firmware provided for DMUB.\n");
892                 return -EINVAL;
893         }
894
895         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
896         if (status != DMUB_STATUS_OK) {
897                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
898                 return -EINVAL;
899         }
900
901         if (!has_hw_support) {
902                 DRM_INFO("DMUB unsupported on ASIC\n");
903                 return 0;
904         }
905
906         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
907
908         fw_inst_const = dmub_fw->data +
909                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
910                         PSP_HEADER_BYTES;
911
912         fw_bss_data = dmub_fw->data +
913                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914                       le32_to_cpu(hdr->inst_const_bytes);
915
916         /* Copy firmware and bios info into FB memory. */
917         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
918                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
919
920         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
921
922         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
923          * amdgpu_ucode_init_single_fw will load dmub firmware
924          * fw_inst_const part to cw0; otherwise, the firmware back door load
925          * will be done by dm_dmub_hw_init
926          */
927         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
928                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
929                                 fw_inst_const_size);
930         }
931
932         if (fw_bss_data_size)
933                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
934                        fw_bss_data, fw_bss_data_size);
935
936         /* Copy firmware bios info into FB memory. */
937         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
938                adev->bios_size);
939
940         /* Reset regions that need to be reset. */
941         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
942         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
943
944         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
945                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
946
947         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
948                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
949
950         /* Initialize hardware. */
951         memset(&hw_params, 0, sizeof(hw_params));
952         hw_params.fb_base = adev->gmc.fb_start;
953         hw_params.fb_offset = adev->gmc.aper_base;
954
955         /* backdoor load firmware and trigger dmub running */
956         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
957                 hw_params.load_inst_const = true;
958
959         if (dmcu)
960                 hw_params.psp_version = dmcu->psp_version;
961
962         for (i = 0; i < fb_info->num_fb; ++i)
963                 hw_params.fb[i] = &fb_info->fb[i];
964
965         status = dmub_srv_hw_init(dmub_srv, &hw_params);
966         if (status != DMUB_STATUS_OK) {
967                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
968                 return -EINVAL;
969         }
970
971         /* Wait for firmware load to finish. */
972         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
973         if (status != DMUB_STATUS_OK)
974                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
975
976         /* Init DMCU and ABM if available. */
977         if (dmcu && abm) {
978                 dmcu->funcs->dmcu_init(dmcu);
979                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
980         }
981
982         if (!adev->dm.dc->ctx->dmub_srv)
983                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
984         if (!adev->dm.dc->ctx->dmub_srv) {
985                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
986                 return -ENOMEM;
987         }
988
989         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
990                  adev->dm.dmcub_fw_version);
991
992         return 0;
993 }
994
995 #if defined(CONFIG_DRM_AMD_DC_DCN)
996 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
997 {
998         uint64_t pt_base;
999         uint32_t logical_addr_low;
1000         uint32_t logical_addr_high;
1001         uint32_t agp_base, agp_bot, agp_top;
1002         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1003
1004         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1005         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1006
1007         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1008                 /*
1009                  * Raven2 has a HW issue that it is unable to use the vram which
1010                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1011                  * workaround that increase system aperture high address (add 1)
1012                  * to get rid of the VM fault and hardware hang.
1013                  */
1014                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1015         else
1016                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1017
1018         agp_base = 0;
1019         agp_bot = adev->gmc.agp_start >> 24;
1020         agp_top = adev->gmc.agp_end >> 24;
1021
1022
1023         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1024         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1025         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1026         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1027         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1028         page_table_base.low_part = lower_32_bits(pt_base);
1029
1030         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1031         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1032
1033         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1034         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1035         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1036
1037         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1038         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1039         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1040
1041         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1042         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1043         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1044
1045         pa_config->is_hvm_enabled = 0;
1046
1047 }
1048 #endif
1049 #if defined(CONFIG_DRM_AMD_DC_DCN)
1050 static void event_mall_stutter(struct work_struct *work)
1051 {
1052
1053         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1054         struct amdgpu_display_manager *dm = vblank_work->dm;
1055
1056         mutex_lock(&dm->dc_lock);
1057
1058         if (vblank_work->enable)
1059                 dm->active_vblank_irq_count++;
1060         else if(dm->active_vblank_irq_count)
1061                 dm->active_vblank_irq_count--;
1062
1063         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1064
1065         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1066
1067         mutex_unlock(&dm->dc_lock);
1068 }
1069
1070 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1071 {
1072
1073         int max_caps = dc->caps.max_links;
1074         struct vblank_workqueue *vblank_work;
1075         int i = 0;
1076
1077         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1078         if (ZERO_OR_NULL_PTR(vblank_work)) {
1079                 kfree(vblank_work);
1080                 return NULL;
1081         }
1082
1083         for (i = 0; i < max_caps; i++)
1084                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1085
1086         return vblank_work;
1087 }
1088 #endif
1089 static int amdgpu_dm_init(struct amdgpu_device *adev)
1090 {
1091         struct dc_init_data init_data;
1092 #ifdef CONFIG_DRM_AMD_DC_HDCP
1093         struct dc_callback_init init_params;
1094 #endif
1095         int r;
1096
1097         adev->dm.ddev = adev_to_drm(adev);
1098         adev->dm.adev = adev;
1099
1100         /* Zero all the fields */
1101         memset(&init_data, 0, sizeof(init_data));
1102 #ifdef CONFIG_DRM_AMD_DC_HDCP
1103         memset(&init_params, 0, sizeof(init_params));
1104 #endif
1105
1106         mutex_init(&adev->dm.dc_lock);
1107         mutex_init(&adev->dm.audio_lock);
1108 #if defined(CONFIG_DRM_AMD_DC_DCN)
1109         spin_lock_init(&adev->dm.vblank_lock);
1110 #endif
1111
1112         if(amdgpu_dm_irq_init(adev)) {
1113                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1114                 goto error;
1115         }
1116
1117         init_data.asic_id.chip_family = adev->family;
1118
1119         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1120         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1121
1122         init_data.asic_id.vram_width = adev->gmc.vram_width;
1123         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1124         init_data.asic_id.atombios_base_address =
1125                 adev->mode_info.atom_context->bios;
1126
1127         init_data.driver = adev;
1128
1129         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1130
1131         if (!adev->dm.cgs_device) {
1132                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1133                 goto error;
1134         }
1135
1136         init_data.cgs_device = adev->dm.cgs_device;
1137
1138         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1139
1140         switch (adev->asic_type) {
1141         case CHIP_CARRIZO:
1142         case CHIP_STONEY:
1143         case CHIP_RAVEN:
1144         case CHIP_RENOIR:
1145                 init_data.flags.gpu_vm_support = true;
1146                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1147                         init_data.flags.disable_dmcu = true;
1148                 break;
1149 #if defined(CONFIG_DRM_AMD_DC_DCN)
1150         case CHIP_VANGOGH:
1151                 init_data.flags.gpu_vm_support = true;
1152                 break;
1153 #endif
1154         default:
1155                 break;
1156         }
1157
1158         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1159                 init_data.flags.fbc_support = true;
1160
1161         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1162                 init_data.flags.multi_mon_pp_mclk_switch = true;
1163
1164         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1165                 init_data.flags.disable_fractional_pwm = true;
1166
1167         init_data.flags.power_down_display_on_boot = true;
1168
1169         INIT_LIST_HEAD(&adev->dm.da_list);
1170         /* Display Core create. */
1171         adev->dm.dc = dc_create(&init_data);
1172
1173         if (adev->dm.dc) {
1174                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1175         } else {
1176                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1177                 goto error;
1178         }
1179
1180         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1181                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1182                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1183         }
1184
1185         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1186                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1187
1188         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1189                 adev->dm.dc->debug.disable_stutter = true;
1190
1191         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1192                 adev->dm.dc->debug.disable_dsc = true;
1193
1194         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1195                 adev->dm.dc->debug.disable_clock_gate = true;
1196
1197         r = dm_dmub_hw_init(adev);
1198         if (r) {
1199                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1200                 goto error;
1201         }
1202
1203         dc_hardware_init(adev->dm.dc);
1204
1205 #if defined(CONFIG_DRM_AMD_DC_DCN)
1206         if (adev->apu_flags) {
1207                 struct dc_phy_addr_space_config pa_config;
1208
1209                 mmhub_read_system_context(adev, &pa_config);
1210
1211                 // Call the DC init_memory func
1212                 dc_setup_system_context(adev->dm.dc, &pa_config);
1213         }
1214 #endif
1215
1216         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1217         if (!adev->dm.freesync_module) {
1218                 DRM_ERROR(
1219                 "amdgpu: failed to initialize freesync_module.\n");
1220         } else
1221                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1222                                 adev->dm.freesync_module);
1223
1224         amdgpu_dm_init_color_mod();
1225
1226 #if defined(CONFIG_DRM_AMD_DC_DCN)
1227         if (adev->dm.dc->caps.max_links > 0) {
1228                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1229
1230                 if (!adev->dm.vblank_workqueue)
1231                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1232                 else
1233                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1234         }
1235 #endif
1236
1237 #ifdef CONFIG_DRM_AMD_DC_HDCP
1238         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1239                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1240
1241                 if (!adev->dm.hdcp_workqueue)
1242                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1243                 else
1244                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1245
1246                 dc_init_callbacks(adev->dm.dc, &init_params);
1247         }
1248 #endif
1249 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1250         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1251 #endif
1252         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1253                 init_completion(&adev->dm.dmub_aux_transfer_done);
1254                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1255                 if (!adev->dm.dmub_notify) {
1256                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1257                         goto error;
1258                 }
1259                 amdgpu_dm_outbox_init(adev);
1260         }
1261
1262         if (amdgpu_dm_initialize_drm_device(adev)) {
1263                 DRM_ERROR(
1264                 "amdgpu: failed to initialize sw for display support.\n");
1265                 goto error;
1266         }
1267
1268         /* create fake encoders for MST */
1269         dm_dp_create_fake_mst_encoders(adev);
1270
1271         /* TODO: Add_display_info? */
1272
1273         /* TODO use dynamic cursor width */
1274         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1275         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1276
1277         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1278                 DRM_ERROR(
1279                 "amdgpu: failed to initialize sw for display support.\n");
1280                 goto error;
1281         }
1282
1283
1284         DRM_DEBUG_DRIVER("KMS initialized.\n");
1285
1286         return 0;
1287 error:
1288         amdgpu_dm_fini(adev);
1289
1290         return -EINVAL;
1291 }
1292
1293 static int amdgpu_dm_early_fini(void *handle)
1294 {
1295         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1296
1297         amdgpu_dm_audio_fini(adev);
1298
1299         return 0;
1300 }
1301
1302 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1303 {
1304         int i;
1305
1306         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1307                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1308         }
1309
1310         amdgpu_dm_destroy_drm_device(&adev->dm);
1311
1312 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1313         if (adev->dm.crc_rd_wrk) {
1314                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1315                 kfree(adev->dm.crc_rd_wrk);
1316                 adev->dm.crc_rd_wrk = NULL;
1317         }
1318 #endif
1319 #ifdef CONFIG_DRM_AMD_DC_HDCP
1320         if (adev->dm.hdcp_workqueue) {
1321                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1322                 adev->dm.hdcp_workqueue = NULL;
1323         }
1324
1325         if (adev->dm.dc)
1326                 dc_deinit_callbacks(adev->dm.dc);
1327 #endif
1328
1329 #if defined(CONFIG_DRM_AMD_DC_DCN)
1330         if (adev->dm.vblank_workqueue) {
1331                 adev->dm.vblank_workqueue->dm = NULL;
1332                 kfree(adev->dm.vblank_workqueue);
1333                 adev->dm.vblank_workqueue = NULL;
1334         }
1335 #endif
1336
1337         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1338
1339         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1340                 kfree(adev->dm.dmub_notify);
1341                 adev->dm.dmub_notify = NULL;
1342         }
1343
1344         if (adev->dm.dmub_bo)
1345                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1346                                       &adev->dm.dmub_bo_gpu_addr,
1347                                       &adev->dm.dmub_bo_cpu_addr);
1348
1349         /* DC Destroy TODO: Replace destroy DAL */
1350         if (adev->dm.dc)
1351                 dc_destroy(&adev->dm.dc);
1352         /*
1353          * TODO: pageflip, vlank interrupt
1354          *
1355          * amdgpu_dm_irq_fini(adev);
1356          */
1357
1358         if (adev->dm.cgs_device) {
1359                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1360                 adev->dm.cgs_device = NULL;
1361         }
1362         if (adev->dm.freesync_module) {
1363                 mod_freesync_destroy(adev->dm.freesync_module);
1364                 adev->dm.freesync_module = NULL;
1365         }
1366
1367         mutex_destroy(&adev->dm.audio_lock);
1368         mutex_destroy(&adev->dm.dc_lock);
1369
1370         return;
1371 }
1372
1373 static int load_dmcu_fw(struct amdgpu_device *adev)
1374 {
1375         const char *fw_name_dmcu = NULL;
1376         int r;
1377         const struct dmcu_firmware_header_v1_0 *hdr;
1378
1379         switch(adev->asic_type) {
1380 #if defined(CONFIG_DRM_AMD_DC_SI)
1381         case CHIP_TAHITI:
1382         case CHIP_PITCAIRN:
1383         case CHIP_VERDE:
1384         case CHIP_OLAND:
1385 #endif
1386         case CHIP_BONAIRE:
1387         case CHIP_HAWAII:
1388         case CHIP_KAVERI:
1389         case CHIP_KABINI:
1390         case CHIP_MULLINS:
1391         case CHIP_TONGA:
1392         case CHIP_FIJI:
1393         case CHIP_CARRIZO:
1394         case CHIP_STONEY:
1395         case CHIP_POLARIS11:
1396         case CHIP_POLARIS10:
1397         case CHIP_POLARIS12:
1398         case CHIP_VEGAM:
1399         case CHIP_VEGA10:
1400         case CHIP_VEGA12:
1401         case CHIP_VEGA20:
1402         case CHIP_NAVI10:
1403         case CHIP_NAVI14:
1404         case CHIP_RENOIR:
1405         case CHIP_SIENNA_CICHLID:
1406         case CHIP_NAVY_FLOUNDER:
1407         case CHIP_DIMGREY_CAVEFISH:
1408         case CHIP_BEIGE_GOBY:
1409         case CHIP_VANGOGH:
1410                 return 0;
1411         case CHIP_NAVI12:
1412                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1413                 break;
1414         case CHIP_RAVEN:
1415                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1416                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1418                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1419                 else
1420                         return 0;
1421                 break;
1422         default:
1423                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1424                 return -EINVAL;
1425         }
1426
1427         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1428                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1429                 return 0;
1430         }
1431
1432         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1433         if (r == -ENOENT) {
1434                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1435                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1436                 adev->dm.fw_dmcu = NULL;
1437                 return 0;
1438         }
1439         if (r) {
1440                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1441                         fw_name_dmcu);
1442                 return r;
1443         }
1444
1445         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1446         if (r) {
1447                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1448                         fw_name_dmcu);
1449                 release_firmware(adev->dm.fw_dmcu);
1450                 adev->dm.fw_dmcu = NULL;
1451                 return r;
1452         }
1453
1454         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1455         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1456         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1457         adev->firmware.fw_size +=
1458                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1459
1460         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1461         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1462         adev->firmware.fw_size +=
1463                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1464
1465         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1466
1467         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1468
1469         return 0;
1470 }
1471
1472 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1473 {
1474         struct amdgpu_device *adev = ctx;
1475
1476         return dm_read_reg(adev->dm.dc->ctx, address);
1477 }
1478
1479 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1480                                      uint32_t value)
1481 {
1482         struct amdgpu_device *adev = ctx;
1483
1484         return dm_write_reg(adev->dm.dc->ctx, address, value);
1485 }
1486
1487 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1488 {
1489         struct dmub_srv_create_params create_params;
1490         struct dmub_srv_region_params region_params;
1491         struct dmub_srv_region_info region_info;
1492         struct dmub_srv_fb_params fb_params;
1493         struct dmub_srv_fb_info *fb_info;
1494         struct dmub_srv *dmub_srv;
1495         const struct dmcub_firmware_header_v1_0 *hdr;
1496         const char *fw_name_dmub;
1497         enum dmub_asic dmub_asic;
1498         enum dmub_status status;
1499         int r;
1500
1501         switch (adev->asic_type) {
1502         case CHIP_RENOIR:
1503                 dmub_asic = DMUB_ASIC_DCN21;
1504                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1505                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1506                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1507                 break;
1508         case CHIP_SIENNA_CICHLID:
1509                 dmub_asic = DMUB_ASIC_DCN30;
1510                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1511                 break;
1512         case CHIP_NAVY_FLOUNDER:
1513                 dmub_asic = DMUB_ASIC_DCN30;
1514                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1515                 break;
1516         case CHIP_VANGOGH:
1517                 dmub_asic = DMUB_ASIC_DCN301;
1518                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1519                 break;
1520         case CHIP_DIMGREY_CAVEFISH:
1521                 dmub_asic = DMUB_ASIC_DCN302;
1522                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1523                 break;
1524         case CHIP_BEIGE_GOBY:
1525                 dmub_asic = DMUB_ASIC_DCN303;
1526                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1527                 break;
1528
1529         default:
1530                 /* ASIC doesn't support DMUB. */
1531                 return 0;
1532         }
1533
1534         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1535         if (r) {
1536                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1537                 return 0;
1538         }
1539
1540         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1541         if (r) {
1542                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1543                 return 0;
1544         }
1545
1546         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1547
1548         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1549                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1550                         AMDGPU_UCODE_ID_DMCUB;
1551                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1552                         adev->dm.dmub_fw;
1553                 adev->firmware.fw_size +=
1554                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1555
1556                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1557                          adev->dm.dmcub_fw_version);
1558         }
1559
1560         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1561
1562         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1563         dmub_srv = adev->dm.dmub_srv;
1564
1565         if (!dmub_srv) {
1566                 DRM_ERROR("Failed to allocate DMUB service!\n");
1567                 return -ENOMEM;
1568         }
1569
1570         memset(&create_params, 0, sizeof(create_params));
1571         create_params.user_ctx = adev;
1572         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1573         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1574         create_params.asic = dmub_asic;
1575
1576         /* Create the DMUB service. */
1577         status = dmub_srv_create(dmub_srv, &create_params);
1578         if (status != DMUB_STATUS_OK) {
1579                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1580                 return -EINVAL;
1581         }
1582
1583         /* Calculate the size of all the regions for the DMUB service. */
1584         memset(&region_params, 0, sizeof(region_params));
1585
1586         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1587                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1588         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1589         region_params.vbios_size = adev->bios_size;
1590         region_params.fw_bss_data = region_params.bss_data_size ?
1591                 adev->dm.dmub_fw->data +
1592                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1593                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1594         region_params.fw_inst_const =
1595                 adev->dm.dmub_fw->data +
1596                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1597                 PSP_HEADER_BYTES;
1598
1599         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1600                                            &region_info);
1601
1602         if (status != DMUB_STATUS_OK) {
1603                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1604                 return -EINVAL;
1605         }
1606
1607         /*
1608          * Allocate a framebuffer based on the total size of all the regions.
1609          * TODO: Move this into GART.
1610          */
1611         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1612                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1613                                     &adev->dm.dmub_bo_gpu_addr,
1614                                     &adev->dm.dmub_bo_cpu_addr);
1615         if (r)
1616                 return r;
1617
1618         /* Rebase the regions on the framebuffer address. */
1619         memset(&fb_params, 0, sizeof(fb_params));
1620         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1621         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1622         fb_params.region_info = &region_info;
1623
1624         adev->dm.dmub_fb_info =
1625                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1626         fb_info = adev->dm.dmub_fb_info;
1627
1628         if (!fb_info) {
1629                 DRM_ERROR(
1630                         "Failed to allocate framebuffer info for DMUB service!\n");
1631                 return -ENOMEM;
1632         }
1633
1634         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1635         if (status != DMUB_STATUS_OK) {
1636                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1637                 return -EINVAL;
1638         }
1639
1640         return 0;
1641 }
1642
1643 static int dm_sw_init(void *handle)
1644 {
1645         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1646         int r;
1647
1648         r = dm_dmub_sw_init(adev);
1649         if (r)
1650                 return r;
1651
1652         return load_dmcu_fw(adev);
1653 }
1654
1655 static int dm_sw_fini(void *handle)
1656 {
1657         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1658
1659         kfree(adev->dm.dmub_fb_info);
1660         adev->dm.dmub_fb_info = NULL;
1661
1662         if (adev->dm.dmub_srv) {
1663                 dmub_srv_destroy(adev->dm.dmub_srv);
1664                 adev->dm.dmub_srv = NULL;
1665         }
1666
1667         release_firmware(adev->dm.dmub_fw);
1668         adev->dm.dmub_fw = NULL;
1669
1670         release_firmware(adev->dm.fw_dmcu);
1671         adev->dm.fw_dmcu = NULL;
1672
1673         return 0;
1674 }
1675
1676 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1677 {
1678         struct amdgpu_dm_connector *aconnector;
1679         struct drm_connector *connector;
1680         struct drm_connector_list_iter iter;
1681         int ret = 0;
1682
1683         drm_connector_list_iter_begin(dev, &iter);
1684         drm_for_each_connector_iter(connector, &iter) {
1685                 aconnector = to_amdgpu_dm_connector(connector);
1686                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1687                     aconnector->mst_mgr.aux) {
1688                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1689                                          aconnector,
1690                                          aconnector->base.base.id);
1691
1692                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1693                         if (ret < 0) {
1694                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1695                                 aconnector->dc_link->type =
1696                                         dc_connection_single;
1697                                 break;
1698                         }
1699                 }
1700         }
1701         drm_connector_list_iter_end(&iter);
1702
1703         return ret;
1704 }
1705
1706 static int dm_late_init(void *handle)
1707 {
1708         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1709
1710         struct dmcu_iram_parameters params;
1711         unsigned int linear_lut[16];
1712         int i;
1713         struct dmcu *dmcu = NULL;
1714
1715         dmcu = adev->dm.dc->res_pool->dmcu;
1716
1717         for (i = 0; i < 16; i++)
1718                 linear_lut[i] = 0xFFFF * i / 15;
1719
1720         params.set = 0;
1721         params.backlight_ramping_start = 0xCCCC;
1722         params.backlight_ramping_reduction = 0xCCCCCCCC;
1723         params.backlight_lut_array_size = 16;
1724         params.backlight_lut_array = linear_lut;
1725
1726         /* Min backlight level after ABM reduction,  Don't allow below 1%
1727          * 0xFFFF x 0.01 = 0x28F
1728          */
1729         params.min_abm_backlight = 0x28F;
1730         /* In the case where abm is implemented on dmcub,
1731         * dmcu object will be null.
1732         * ABM 2.4 and up are implemented on dmcub.
1733         */
1734         if (dmcu) {
1735                 if (!dmcu_load_iram(dmcu, params))
1736                         return -EINVAL;
1737         } else if (adev->dm.dc->ctx->dmub_srv) {
1738                 struct dc_link *edp_links[MAX_NUM_EDP];
1739                 int edp_num;
1740
1741                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1742                 for (i = 0; i < edp_num; i++) {
1743                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1744                                 return -EINVAL;
1745                 }
1746         }
1747
1748         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1749 }
1750
1751 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1752 {
1753         struct amdgpu_dm_connector *aconnector;
1754         struct drm_connector *connector;
1755         struct drm_connector_list_iter iter;
1756         struct drm_dp_mst_topology_mgr *mgr;
1757         int ret;
1758         bool need_hotplug = false;
1759
1760         drm_connector_list_iter_begin(dev, &iter);
1761         drm_for_each_connector_iter(connector, &iter) {
1762                 aconnector = to_amdgpu_dm_connector(connector);
1763                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1764                     aconnector->mst_port)
1765                         continue;
1766
1767                 mgr = &aconnector->mst_mgr;
1768
1769                 if (suspend) {
1770                         drm_dp_mst_topology_mgr_suspend(mgr);
1771                 } else {
1772                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1773                         if (ret < 0) {
1774                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1775                                 need_hotplug = true;
1776                         }
1777                 }
1778         }
1779         drm_connector_list_iter_end(&iter);
1780
1781         if (need_hotplug)
1782                 drm_kms_helper_hotplug_event(dev);
1783 }
1784
1785 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1786 {
1787         struct smu_context *smu = &adev->smu;
1788         int ret = 0;
1789
1790         if (!is_support_sw_smu(adev))
1791                 return 0;
1792
1793         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1794          * on window driver dc implementation.
1795          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1796          * should be passed to smu during boot up and resume from s3.
1797          * boot up: dc calculate dcn watermark clock settings within dc_create,
1798          * dcn20_resource_construct
1799          * then call pplib functions below to pass the settings to smu:
1800          * smu_set_watermarks_for_clock_ranges
1801          * smu_set_watermarks_table
1802          * navi10_set_watermarks_table
1803          * smu_write_watermarks_table
1804          *
1805          * For Renoir, clock settings of dcn watermark are also fixed values.
1806          * dc has implemented different flow for window driver:
1807          * dc_hardware_init / dc_set_power_state
1808          * dcn10_init_hw
1809          * notify_wm_ranges
1810          * set_wm_ranges
1811          * -- Linux
1812          * smu_set_watermarks_for_clock_ranges
1813          * renoir_set_watermarks_table
1814          * smu_write_watermarks_table
1815          *
1816          * For Linux,
1817          * dc_hardware_init -> amdgpu_dm_init
1818          * dc_set_power_state --> dm_resume
1819          *
1820          * therefore, this function apply to navi10/12/14 but not Renoir
1821          * *
1822          */
1823         switch(adev->asic_type) {
1824         case CHIP_NAVI10:
1825         case CHIP_NAVI14:
1826         case CHIP_NAVI12:
1827                 break;
1828         default:
1829                 return 0;
1830         }
1831
1832         ret = smu_write_watermarks_table(smu);
1833         if (ret) {
1834                 DRM_ERROR("Failed to update WMTABLE!\n");
1835                 return ret;
1836         }
1837
1838         return 0;
1839 }
1840
1841 /**
1842  * dm_hw_init() - Initialize DC device
1843  * @handle: The base driver device containing the amdgpu_dm device.
1844  *
1845  * Initialize the &struct amdgpu_display_manager device. This involves calling
1846  * the initializers of each DM component, then populating the struct with them.
1847  *
1848  * Although the function implies hardware initialization, both hardware and
1849  * software are initialized here. Splitting them out to their relevant init
1850  * hooks is a future TODO item.
1851  *
1852  * Some notable things that are initialized here:
1853  *
1854  * - Display Core, both software and hardware
1855  * - DC modules that we need (freesync and color management)
1856  * - DRM software states
1857  * - Interrupt sources and handlers
1858  * - Vblank support
1859  * - Debug FS entries, if enabled
1860  */
1861 static int dm_hw_init(void *handle)
1862 {
1863         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1864         /* Create DAL display manager */
1865         amdgpu_dm_init(adev);
1866         amdgpu_dm_hpd_init(adev);
1867
1868         return 0;
1869 }
1870
1871 /**
1872  * dm_hw_fini() - Teardown DC device
1873  * @handle: The base driver device containing the amdgpu_dm device.
1874  *
1875  * Teardown components within &struct amdgpu_display_manager that require
1876  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1877  * were loaded. Also flush IRQ workqueues and disable them.
1878  */
1879 static int dm_hw_fini(void *handle)
1880 {
1881         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1882
1883         amdgpu_dm_hpd_fini(adev);
1884
1885         amdgpu_dm_irq_fini(adev);
1886         amdgpu_dm_fini(adev);
1887         return 0;
1888 }
1889
1890
1891 static int dm_enable_vblank(struct drm_crtc *crtc);
1892 static void dm_disable_vblank(struct drm_crtc *crtc);
1893
1894 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1895                                  struct dc_state *state, bool enable)
1896 {
1897         enum dc_irq_source irq_source;
1898         struct amdgpu_crtc *acrtc;
1899         int rc = -EBUSY;
1900         int i = 0;
1901
1902         for (i = 0; i < state->stream_count; i++) {
1903                 acrtc = get_crtc_by_otg_inst(
1904                                 adev, state->stream_status[i].primary_otg_inst);
1905
1906                 if (acrtc && state->stream_status[i].plane_count != 0) {
1907                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1908                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1909                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1910                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
1911                         if (rc)
1912                                 DRM_WARN("Failed to %s pflip interrupts\n",
1913                                          enable ? "enable" : "disable");
1914
1915                         if (enable) {
1916                                 rc = dm_enable_vblank(&acrtc->base);
1917                                 if (rc)
1918                                         DRM_WARN("Failed to enable vblank interrupts\n");
1919                         } else {
1920                                 dm_disable_vblank(&acrtc->base);
1921                         }
1922
1923                 }
1924         }
1925
1926 }
1927
1928 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1929 {
1930         struct dc_state *context = NULL;
1931         enum dc_status res = DC_ERROR_UNEXPECTED;
1932         int i;
1933         struct dc_stream_state *del_streams[MAX_PIPES];
1934         int del_streams_count = 0;
1935
1936         memset(del_streams, 0, sizeof(del_streams));
1937
1938         context = dc_create_state(dc);
1939         if (context == NULL)
1940                 goto context_alloc_fail;
1941
1942         dc_resource_state_copy_construct_current(dc, context);
1943
1944         /* First remove from context all streams */
1945         for (i = 0; i < context->stream_count; i++) {
1946                 struct dc_stream_state *stream = context->streams[i];
1947
1948                 del_streams[del_streams_count++] = stream;
1949         }
1950
1951         /* Remove all planes for removed streams and then remove the streams */
1952         for (i = 0; i < del_streams_count; i++) {
1953                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1954                         res = DC_FAIL_DETACH_SURFACES;
1955                         goto fail;
1956                 }
1957
1958                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1959                 if (res != DC_OK)
1960                         goto fail;
1961         }
1962
1963
1964         res = dc_validate_global_state(dc, context, false);
1965
1966         if (res != DC_OK) {
1967                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1968                 goto fail;
1969         }
1970
1971         res = dc_commit_state(dc, context);
1972
1973 fail:
1974         dc_release_state(context);
1975
1976 context_alloc_fail:
1977         return res;
1978 }
1979
1980 static int dm_suspend(void *handle)
1981 {
1982         struct amdgpu_device *adev = handle;
1983         struct amdgpu_display_manager *dm = &adev->dm;
1984         int ret = 0;
1985
1986         if (amdgpu_in_reset(adev)) {
1987                 mutex_lock(&dm->dc_lock);
1988
1989 #if defined(CONFIG_DRM_AMD_DC_DCN)
1990                 dc_allow_idle_optimizations(adev->dm.dc, false);
1991 #endif
1992
1993                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1994
1995                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1996
1997                 amdgpu_dm_commit_zero_streams(dm->dc);
1998
1999                 amdgpu_dm_irq_suspend(adev);
2000
2001                 return ret;
2002         }
2003
2004         WARN_ON(adev->dm.cached_state);
2005         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2006
2007         s3_handle_mst(adev_to_drm(adev), true);
2008
2009         amdgpu_dm_irq_suspend(adev);
2010
2011         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2012
2013         return 0;
2014 }
2015
2016 static struct amdgpu_dm_connector *
2017 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2018                                              struct drm_crtc *crtc)
2019 {
2020         uint32_t i;
2021         struct drm_connector_state *new_con_state;
2022         struct drm_connector *connector;
2023         struct drm_crtc *crtc_from_state;
2024
2025         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2026                 crtc_from_state = new_con_state->crtc;
2027
2028                 if (crtc_from_state == crtc)
2029                         return to_amdgpu_dm_connector(connector);
2030         }
2031
2032         return NULL;
2033 }
2034
2035 static void emulated_link_detect(struct dc_link *link)
2036 {
2037         struct dc_sink_init_data sink_init_data = { 0 };
2038         struct display_sink_capability sink_caps = { 0 };
2039         enum dc_edid_status edid_status;
2040         struct dc_context *dc_ctx = link->ctx;
2041         struct dc_sink *sink = NULL;
2042         struct dc_sink *prev_sink = NULL;
2043
2044         link->type = dc_connection_none;
2045         prev_sink = link->local_sink;
2046
2047         if (prev_sink)
2048                 dc_sink_release(prev_sink);
2049
2050         switch (link->connector_signal) {
2051         case SIGNAL_TYPE_HDMI_TYPE_A: {
2052                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2053                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2054                 break;
2055         }
2056
2057         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2058                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2059                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2060                 break;
2061         }
2062
2063         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2064                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2065                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2066                 break;
2067         }
2068
2069         case SIGNAL_TYPE_LVDS: {
2070                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2071                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2072                 break;
2073         }
2074
2075         case SIGNAL_TYPE_EDP: {
2076                 sink_caps.transaction_type =
2077                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2078                 sink_caps.signal = SIGNAL_TYPE_EDP;
2079                 break;
2080         }
2081
2082         case SIGNAL_TYPE_DISPLAY_PORT: {
2083                 sink_caps.transaction_type =
2084                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2085                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2086                 break;
2087         }
2088
2089         default:
2090                 DC_ERROR("Invalid connector type! signal:%d\n",
2091                         link->connector_signal);
2092                 return;
2093         }
2094
2095         sink_init_data.link = link;
2096         sink_init_data.sink_signal = sink_caps.signal;
2097
2098         sink = dc_sink_create(&sink_init_data);
2099         if (!sink) {
2100                 DC_ERROR("Failed to create sink!\n");
2101                 return;
2102         }
2103
2104         /* dc_sink_create returns a new reference */
2105         link->local_sink = sink;
2106
2107         edid_status = dm_helpers_read_local_edid(
2108                         link->ctx,
2109                         link,
2110                         sink);
2111
2112         if (edid_status != EDID_OK)
2113                 DC_ERROR("Failed to read EDID");
2114
2115 }
2116
2117 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2118                                      struct amdgpu_display_manager *dm)
2119 {
2120         struct {
2121                 struct dc_surface_update surface_updates[MAX_SURFACES];
2122                 struct dc_plane_info plane_infos[MAX_SURFACES];
2123                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2124                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2125                 struct dc_stream_update stream_update;
2126         } * bundle;
2127         int k, m;
2128
2129         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2130
2131         if (!bundle) {
2132                 dm_error("Failed to allocate update bundle\n");
2133                 goto cleanup;
2134         }
2135
2136         for (k = 0; k < dc_state->stream_count; k++) {
2137                 bundle->stream_update.stream = dc_state->streams[k];
2138
2139                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2140                         bundle->surface_updates[m].surface =
2141                                 dc_state->stream_status->plane_states[m];
2142                         bundle->surface_updates[m].surface->force_full_update =
2143                                 true;
2144                 }
2145                 dc_commit_updates_for_stream(
2146                         dm->dc, bundle->surface_updates,
2147                         dc_state->stream_status->plane_count,
2148                         dc_state->streams[k], &bundle->stream_update, dc_state);
2149         }
2150
2151 cleanup:
2152         kfree(bundle);
2153
2154         return;
2155 }
2156
2157 static void dm_set_dpms_off(struct dc_link *link)
2158 {
2159         struct dc_stream_state *stream_state;
2160         struct amdgpu_dm_connector *aconnector = link->priv;
2161         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2162         struct dc_stream_update stream_update;
2163         bool dpms_off = true;
2164
2165         memset(&stream_update, 0, sizeof(stream_update));
2166         stream_update.dpms_off = &dpms_off;
2167
2168         mutex_lock(&adev->dm.dc_lock);
2169         stream_state = dc_stream_find_from_link(link);
2170
2171         if (stream_state == NULL) {
2172                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2173                 mutex_unlock(&adev->dm.dc_lock);
2174                 return;
2175         }
2176
2177         stream_update.stream = stream_state;
2178         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2179                                      stream_state, &stream_update,
2180                                      stream_state->ctx->dc->current_state);
2181         mutex_unlock(&adev->dm.dc_lock);
2182 }
2183
2184 static int dm_resume(void *handle)
2185 {
2186         struct amdgpu_device *adev = handle;
2187         struct drm_device *ddev = adev_to_drm(adev);
2188         struct amdgpu_display_manager *dm = &adev->dm;
2189         struct amdgpu_dm_connector *aconnector;
2190         struct drm_connector *connector;
2191         struct drm_connector_list_iter iter;
2192         struct drm_crtc *crtc;
2193         struct drm_crtc_state *new_crtc_state;
2194         struct dm_crtc_state *dm_new_crtc_state;
2195         struct drm_plane *plane;
2196         struct drm_plane_state *new_plane_state;
2197         struct dm_plane_state *dm_new_plane_state;
2198         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2199         enum dc_connection_type new_connection_type = dc_connection_none;
2200         struct dc_state *dc_state;
2201         int i, r, j;
2202
2203         if (amdgpu_in_reset(adev)) {
2204                 dc_state = dm->cached_dc_state;
2205
2206                 r = dm_dmub_hw_init(adev);
2207                 if (r)
2208                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2209
2210                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2211                 dc_resume(dm->dc);
2212
2213                 amdgpu_dm_irq_resume_early(adev);
2214
2215                 for (i = 0; i < dc_state->stream_count; i++) {
2216                         dc_state->streams[i]->mode_changed = true;
2217                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2218                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2219                                         = 0xffffffff;
2220                         }
2221                 }
2222
2223                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2224
2225                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2226
2227                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2228
2229                 dc_release_state(dm->cached_dc_state);
2230                 dm->cached_dc_state = NULL;
2231
2232                 amdgpu_dm_irq_resume_late(adev);
2233
2234                 mutex_unlock(&dm->dc_lock);
2235
2236                 return 0;
2237         }
2238         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2239         dc_release_state(dm_state->context);
2240         dm_state->context = dc_create_state(dm->dc);
2241         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2242         dc_resource_state_construct(dm->dc, dm_state->context);
2243
2244         /* Before powering on DC we need to re-initialize DMUB. */
2245         r = dm_dmub_hw_init(adev);
2246         if (r)
2247                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2248
2249         /* power on hardware */
2250         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2251
2252         /* program HPD filter */
2253         dc_resume(dm->dc);
2254
2255         /*
2256          * early enable HPD Rx IRQ, should be done before set mode as short
2257          * pulse interrupts are used for MST
2258          */
2259         amdgpu_dm_irq_resume_early(adev);
2260
2261         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2262         s3_handle_mst(ddev, false);
2263
2264         /* Do detection*/
2265         drm_connector_list_iter_begin(ddev, &iter);
2266         drm_for_each_connector_iter(connector, &iter) {
2267                 aconnector = to_amdgpu_dm_connector(connector);
2268
2269                 /*
2270                  * this is the case when traversing through already created
2271                  * MST connectors, should be skipped
2272                  */
2273                 if (aconnector->mst_port)
2274                         continue;
2275
2276                 mutex_lock(&aconnector->hpd_lock);
2277                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2278                         DRM_ERROR("KMS: Failed to detect connector\n");
2279
2280                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2281                         emulated_link_detect(aconnector->dc_link);
2282                 else
2283                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2284
2285                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2286                         aconnector->fake_enable = false;
2287
2288                 if (aconnector->dc_sink)
2289                         dc_sink_release(aconnector->dc_sink);
2290                 aconnector->dc_sink = NULL;
2291                 amdgpu_dm_update_connector_after_detect(aconnector);
2292                 mutex_unlock(&aconnector->hpd_lock);
2293         }
2294         drm_connector_list_iter_end(&iter);
2295
2296         /* Force mode set in atomic commit */
2297         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2298                 new_crtc_state->active_changed = true;
2299
2300         /*
2301          * atomic_check is expected to create the dc states. We need to release
2302          * them here, since they were duplicated as part of the suspend
2303          * procedure.
2304          */
2305         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2306                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2307                 if (dm_new_crtc_state->stream) {
2308                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2309                         dc_stream_release(dm_new_crtc_state->stream);
2310                         dm_new_crtc_state->stream = NULL;
2311                 }
2312         }
2313
2314         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2315                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2316                 if (dm_new_plane_state->dc_state) {
2317                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2318                         dc_plane_state_release(dm_new_plane_state->dc_state);
2319                         dm_new_plane_state->dc_state = NULL;
2320                 }
2321         }
2322
2323         drm_atomic_helper_resume(ddev, dm->cached_state);
2324
2325         dm->cached_state = NULL;
2326
2327         amdgpu_dm_irq_resume_late(adev);
2328
2329         amdgpu_dm_smu_write_watermarks_table(adev);
2330
2331         return 0;
2332 }
2333
2334 /**
2335  * DOC: DM Lifecycle
2336  *
2337  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2338  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2339  * the base driver's device list to be initialized and torn down accordingly.
2340  *
2341  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2342  */
2343
2344 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2345         .name = "dm",
2346         .early_init = dm_early_init,
2347         .late_init = dm_late_init,
2348         .sw_init = dm_sw_init,
2349         .sw_fini = dm_sw_fini,
2350         .early_fini = amdgpu_dm_early_fini,
2351         .hw_init = dm_hw_init,
2352         .hw_fini = dm_hw_fini,
2353         .suspend = dm_suspend,
2354         .resume = dm_resume,
2355         .is_idle = dm_is_idle,
2356         .wait_for_idle = dm_wait_for_idle,
2357         .check_soft_reset = dm_check_soft_reset,
2358         .soft_reset = dm_soft_reset,
2359         .set_clockgating_state = dm_set_clockgating_state,
2360         .set_powergating_state = dm_set_powergating_state,
2361 };
2362
2363 const struct amdgpu_ip_block_version dm_ip_block =
2364 {
2365         .type = AMD_IP_BLOCK_TYPE_DCE,
2366         .major = 1,
2367         .minor = 0,
2368         .rev = 0,
2369         .funcs = &amdgpu_dm_funcs,
2370 };
2371
2372
2373 /**
2374  * DOC: atomic
2375  *
2376  * *WIP*
2377  */
2378
2379 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2380         .fb_create = amdgpu_display_user_framebuffer_create,
2381         .get_format_info = amd_get_format_info,
2382         .output_poll_changed = drm_fb_helper_output_poll_changed,
2383         .atomic_check = amdgpu_dm_atomic_check,
2384         .atomic_commit = drm_atomic_helper_commit,
2385 };
2386
2387 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2388         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2389 };
2390
2391 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2392 {
2393         u32 max_cll, min_cll, max, min, q, r;
2394         struct amdgpu_dm_backlight_caps *caps;
2395         struct amdgpu_display_manager *dm;
2396         struct drm_connector *conn_base;
2397         struct amdgpu_device *adev;
2398         struct dc_link *link = NULL;
2399         static const u8 pre_computed_values[] = {
2400                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2401                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2402
2403         if (!aconnector || !aconnector->dc_link)
2404                 return;
2405
2406         link = aconnector->dc_link;
2407         if (link->connector_signal != SIGNAL_TYPE_EDP)
2408                 return;
2409
2410         conn_base = &aconnector->base;
2411         adev = drm_to_adev(conn_base->dev);
2412         dm = &adev->dm;
2413         caps = &dm->backlight_caps;
2414         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2415         caps->aux_support = false;
2416         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2417         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2418
2419         if (caps->ext_caps->bits.oled == 1 ||
2420             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2421             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2422                 caps->aux_support = true;
2423
2424         if (amdgpu_backlight == 0)
2425                 caps->aux_support = false;
2426         else if (amdgpu_backlight == 1)
2427                 caps->aux_support = true;
2428
2429         /* From the specification (CTA-861-G), for calculating the maximum
2430          * luminance we need to use:
2431          *      Luminance = 50*2**(CV/32)
2432          * Where CV is a one-byte value.
2433          * For calculating this expression we may need float point precision;
2434          * to avoid this complexity level, we take advantage that CV is divided
2435          * by a constant. From the Euclids division algorithm, we know that CV
2436          * can be written as: CV = 32*q + r. Next, we replace CV in the
2437          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2438          * need to pre-compute the value of r/32. For pre-computing the values
2439          * We just used the following Ruby line:
2440          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2441          * The results of the above expressions can be verified at
2442          * pre_computed_values.
2443          */
2444         q = max_cll >> 5;
2445         r = max_cll % 32;
2446         max = (1 << q) * pre_computed_values[r];
2447
2448         // min luminance: maxLum * (CV/255)^2 / 100
2449         q = DIV_ROUND_CLOSEST(min_cll, 255);
2450         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2451
2452         caps->aux_max_input_signal = max;
2453         caps->aux_min_input_signal = min;
2454 }
2455
2456 void amdgpu_dm_update_connector_after_detect(
2457                 struct amdgpu_dm_connector *aconnector)
2458 {
2459         struct drm_connector *connector = &aconnector->base;
2460         struct drm_device *dev = connector->dev;
2461         struct dc_sink *sink;
2462
2463         /* MST handled by drm_mst framework */
2464         if (aconnector->mst_mgr.mst_state == true)
2465                 return;
2466
2467         sink = aconnector->dc_link->local_sink;
2468         if (sink)
2469                 dc_sink_retain(sink);
2470
2471         /*
2472          * Edid mgmt connector gets first update only in mode_valid hook and then
2473          * the connector sink is set to either fake or physical sink depends on link status.
2474          * Skip if already done during boot.
2475          */
2476         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2477                         && aconnector->dc_em_sink) {
2478
2479                 /*
2480                  * For S3 resume with headless use eml_sink to fake stream
2481                  * because on resume connector->sink is set to NULL
2482                  */
2483                 mutex_lock(&dev->mode_config.mutex);
2484
2485                 if (sink) {
2486                         if (aconnector->dc_sink) {
2487                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2488                                 /*
2489                                  * retain and release below are used to
2490                                  * bump up refcount for sink because the link doesn't point
2491                                  * to it anymore after disconnect, so on next crtc to connector
2492                                  * reshuffle by UMD we will get into unwanted dc_sink release
2493                                  */
2494                                 dc_sink_release(aconnector->dc_sink);
2495                         }
2496                         aconnector->dc_sink = sink;
2497                         dc_sink_retain(aconnector->dc_sink);
2498                         amdgpu_dm_update_freesync_caps(connector,
2499                                         aconnector->edid);
2500                 } else {
2501                         amdgpu_dm_update_freesync_caps(connector, NULL);
2502                         if (!aconnector->dc_sink) {
2503                                 aconnector->dc_sink = aconnector->dc_em_sink;
2504                                 dc_sink_retain(aconnector->dc_sink);
2505                         }
2506                 }
2507
2508                 mutex_unlock(&dev->mode_config.mutex);
2509
2510                 if (sink)
2511                         dc_sink_release(sink);
2512                 return;
2513         }
2514
2515         /*
2516          * TODO: temporary guard to look for proper fix
2517          * if this sink is MST sink, we should not do anything
2518          */
2519         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2520                 dc_sink_release(sink);
2521                 return;
2522         }
2523
2524         if (aconnector->dc_sink == sink) {
2525                 /*
2526                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2527                  * Do nothing!!
2528                  */
2529                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2530                                 aconnector->connector_id);
2531                 if (sink)
2532                         dc_sink_release(sink);
2533                 return;
2534         }
2535
2536         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2537                 aconnector->connector_id, aconnector->dc_sink, sink);
2538
2539         mutex_lock(&dev->mode_config.mutex);
2540
2541         /*
2542          * 1. Update status of the drm connector
2543          * 2. Send an event and let userspace tell us what to do
2544          */
2545         if (sink) {
2546                 /*
2547                  * TODO: check if we still need the S3 mode update workaround.
2548                  * If yes, put it here.
2549                  */
2550                 if (aconnector->dc_sink) {
2551                         amdgpu_dm_update_freesync_caps(connector, NULL);
2552                         dc_sink_release(aconnector->dc_sink);
2553                 }
2554
2555                 aconnector->dc_sink = sink;
2556                 dc_sink_retain(aconnector->dc_sink);
2557                 if (sink->dc_edid.length == 0) {
2558                         aconnector->edid = NULL;
2559                         if (aconnector->dc_link->aux_mode) {
2560                                 drm_dp_cec_unset_edid(
2561                                         &aconnector->dm_dp_aux.aux);
2562                         }
2563                 } else {
2564                         aconnector->edid =
2565                                 (struct edid *)sink->dc_edid.raw_edid;
2566
2567                         drm_connector_update_edid_property(connector,
2568                                                            aconnector->edid);
2569                         if (aconnector->dc_link->aux_mode)
2570                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2571                                                     aconnector->edid);
2572                 }
2573
2574                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2575                 update_connector_ext_caps(aconnector);
2576         } else {
2577                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2578                 amdgpu_dm_update_freesync_caps(connector, NULL);
2579                 drm_connector_update_edid_property(connector, NULL);
2580                 aconnector->num_modes = 0;
2581                 dc_sink_release(aconnector->dc_sink);
2582                 aconnector->dc_sink = NULL;
2583                 aconnector->edid = NULL;
2584 #ifdef CONFIG_DRM_AMD_DC_HDCP
2585                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2586                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2587                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2588 #endif
2589         }
2590
2591         mutex_unlock(&dev->mode_config.mutex);
2592
2593         update_subconnector_property(aconnector);
2594
2595         if (sink)
2596                 dc_sink_release(sink);
2597 }
2598
2599 static void handle_hpd_irq(void *param)
2600 {
2601         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2602         struct drm_connector *connector = &aconnector->base;
2603         struct drm_device *dev = connector->dev;
2604         enum dc_connection_type new_connection_type = dc_connection_none;
2605         struct amdgpu_device *adev = drm_to_adev(dev);
2606 #ifdef CONFIG_DRM_AMD_DC_HDCP
2607         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2608 #endif
2609
2610         if (adev->dm.disable_hpd_irq)
2611                 return;
2612
2613         /*
2614          * In case of failure or MST no need to update connector status or notify the OS
2615          * since (for MST case) MST does this in its own context.
2616          */
2617         mutex_lock(&aconnector->hpd_lock);
2618
2619 #ifdef CONFIG_DRM_AMD_DC_HDCP
2620         if (adev->dm.hdcp_workqueue) {
2621                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2622                 dm_con_state->update_hdcp = true;
2623         }
2624 #endif
2625         if (aconnector->fake_enable)
2626                 aconnector->fake_enable = false;
2627
2628         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2629                 DRM_ERROR("KMS: Failed to detect connector\n");
2630
2631         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2632                 emulated_link_detect(aconnector->dc_link);
2633
2634
2635                 drm_modeset_lock_all(dev);
2636                 dm_restore_drm_connector_state(dev, connector);
2637                 drm_modeset_unlock_all(dev);
2638
2639                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2640                         drm_kms_helper_hotplug_event(dev);
2641
2642         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2643                 if (new_connection_type == dc_connection_none &&
2644                     aconnector->dc_link->type == dc_connection_none)
2645                         dm_set_dpms_off(aconnector->dc_link);
2646
2647                 amdgpu_dm_update_connector_after_detect(aconnector);
2648
2649                 drm_modeset_lock_all(dev);
2650                 dm_restore_drm_connector_state(dev, connector);
2651                 drm_modeset_unlock_all(dev);
2652
2653                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2654                         drm_kms_helper_hotplug_event(dev);
2655         }
2656         mutex_unlock(&aconnector->hpd_lock);
2657
2658 }
2659
2660 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2661 {
2662         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2663         uint8_t dret;
2664         bool new_irq_handled = false;
2665         int dpcd_addr;
2666         int dpcd_bytes_to_read;
2667
2668         const int max_process_count = 30;
2669         int process_count = 0;
2670
2671         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2672
2673         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2674                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2675                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2676                 dpcd_addr = DP_SINK_COUNT;
2677         } else {
2678                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2679                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2680                 dpcd_addr = DP_SINK_COUNT_ESI;
2681         }
2682
2683         dret = drm_dp_dpcd_read(
2684                 &aconnector->dm_dp_aux.aux,
2685                 dpcd_addr,
2686                 esi,
2687                 dpcd_bytes_to_read);
2688
2689         while (dret == dpcd_bytes_to_read &&
2690                 process_count < max_process_count) {
2691                 uint8_t retry;
2692                 dret = 0;
2693
2694                 process_count++;
2695
2696                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2697                 /* handle HPD short pulse irq */
2698                 if (aconnector->mst_mgr.mst_state)
2699                         drm_dp_mst_hpd_irq(
2700                                 &aconnector->mst_mgr,
2701                                 esi,
2702                                 &new_irq_handled);
2703
2704                 if (new_irq_handled) {
2705                         /* ACK at DPCD to notify down stream */
2706                         const int ack_dpcd_bytes_to_write =
2707                                 dpcd_bytes_to_read - 1;
2708
2709                         for (retry = 0; retry < 3; retry++) {
2710                                 uint8_t wret;
2711
2712                                 wret = drm_dp_dpcd_write(
2713                                         &aconnector->dm_dp_aux.aux,
2714                                         dpcd_addr + 1,
2715                                         &esi[1],
2716                                         ack_dpcd_bytes_to_write);
2717                                 if (wret == ack_dpcd_bytes_to_write)
2718                                         break;
2719                         }
2720
2721                         /* check if there is new irq to be handled */
2722                         dret = drm_dp_dpcd_read(
2723                                 &aconnector->dm_dp_aux.aux,
2724                                 dpcd_addr,
2725                                 esi,
2726                                 dpcd_bytes_to_read);
2727
2728                         new_irq_handled = false;
2729                 } else {
2730                         break;
2731                 }
2732         }
2733
2734         if (process_count == max_process_count)
2735                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2736 }
2737
2738 static void handle_hpd_rx_irq(void *param)
2739 {
2740         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2741         struct drm_connector *connector = &aconnector->base;
2742         struct drm_device *dev = connector->dev;
2743         struct dc_link *dc_link = aconnector->dc_link;
2744         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2745         bool result = false;
2746         enum dc_connection_type new_connection_type = dc_connection_none;
2747         struct amdgpu_device *adev = drm_to_adev(dev);
2748         union hpd_irq_data hpd_irq_data;
2749         bool lock_flag = 0;
2750
2751         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2752
2753         if (adev->dm.disable_hpd_irq)
2754                 return;
2755
2756
2757         /*
2758          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2759          * conflict, after implement i2c helper, this mutex should be
2760          * retired.
2761          */
2762         mutex_lock(&aconnector->hpd_lock);
2763
2764         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2765
2766         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2767                 (dc_link->type == dc_connection_mst_branch)) {
2768                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2769                         result = true;
2770                         dm_handle_hpd_rx_irq(aconnector);
2771                         goto out;
2772                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2773                         result = false;
2774                         dm_handle_hpd_rx_irq(aconnector);
2775                         goto out;
2776                 }
2777         }
2778
2779         /*
2780          * TODO: We need the lock to avoid touching DC state while it's being
2781          * modified during automated compliance testing, or when link loss
2782          * happens. While this should be split into subhandlers and proper
2783          * interfaces to avoid having to conditionally lock like this in the
2784          * outer layer, we need this workaround temporarily to allow MST
2785          * lightup in some scenarios to avoid timeout.
2786          */
2787         if (!amdgpu_in_reset(adev) &&
2788             (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2789              hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2790                 mutex_lock(&adev->dm.dc_lock);
2791                 lock_flag = 1;
2792         }
2793
2794 #ifdef CONFIG_DRM_AMD_DC_HDCP
2795         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2796 #else
2797         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2798 #endif
2799         if (!amdgpu_in_reset(adev) && lock_flag)
2800                 mutex_unlock(&adev->dm.dc_lock);
2801
2802 out:
2803         if (result && !is_mst_root_connector) {
2804                 /* Downstream Port status changed. */
2805                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2806                         DRM_ERROR("KMS: Failed to detect connector\n");
2807
2808                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2809                         emulated_link_detect(dc_link);
2810
2811                         if (aconnector->fake_enable)
2812                                 aconnector->fake_enable = false;
2813
2814                         amdgpu_dm_update_connector_after_detect(aconnector);
2815
2816
2817                         drm_modeset_lock_all(dev);
2818                         dm_restore_drm_connector_state(dev, connector);
2819                         drm_modeset_unlock_all(dev);
2820
2821                         drm_kms_helper_hotplug_event(dev);
2822                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2823
2824                         if (aconnector->fake_enable)
2825                                 aconnector->fake_enable = false;
2826
2827                         amdgpu_dm_update_connector_after_detect(aconnector);
2828
2829
2830                         drm_modeset_lock_all(dev);
2831                         dm_restore_drm_connector_state(dev, connector);
2832                         drm_modeset_unlock_all(dev);
2833
2834                         drm_kms_helper_hotplug_event(dev);
2835                 }
2836         }
2837 #ifdef CONFIG_DRM_AMD_DC_HDCP
2838         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2839                 if (adev->dm.hdcp_workqueue)
2840                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2841         }
2842 #endif
2843
2844         if (dc_link->type != dc_connection_mst_branch)
2845                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2846
2847         mutex_unlock(&aconnector->hpd_lock);
2848 }
2849
2850 static void register_hpd_handlers(struct amdgpu_device *adev)
2851 {
2852         struct drm_device *dev = adev_to_drm(adev);
2853         struct drm_connector *connector;
2854         struct amdgpu_dm_connector *aconnector;
2855         const struct dc_link *dc_link;
2856         struct dc_interrupt_params int_params = {0};
2857
2858         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2859         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2860
2861         list_for_each_entry(connector,
2862                         &dev->mode_config.connector_list, head) {
2863
2864                 aconnector = to_amdgpu_dm_connector(connector);
2865                 dc_link = aconnector->dc_link;
2866
2867                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2868                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2869                         int_params.irq_source = dc_link->irq_source_hpd;
2870
2871                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2872                                         handle_hpd_irq,
2873                                         (void *) aconnector);
2874                 }
2875
2876                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2877
2878                         /* Also register for DP short pulse (hpd_rx). */
2879                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2880                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2881
2882                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2883                                         handle_hpd_rx_irq,
2884                                         (void *) aconnector);
2885                 }
2886         }
2887 }
2888
2889 #if defined(CONFIG_DRM_AMD_DC_SI)
2890 /* Register IRQ sources and initialize IRQ callbacks */
2891 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2892 {
2893         struct dc *dc = adev->dm.dc;
2894         struct common_irq_params *c_irq_params;
2895         struct dc_interrupt_params int_params = {0};
2896         int r;
2897         int i;
2898         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2899
2900         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2901         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2902
2903         /*
2904          * Actions of amdgpu_irq_add_id():
2905          * 1. Register a set() function with base driver.
2906          *    Base driver will call set() function to enable/disable an
2907          *    interrupt in DC hardware.
2908          * 2. Register amdgpu_dm_irq_handler().
2909          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2910          *    coming from DC hardware.
2911          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2912          *    for acknowledging and handling. */
2913
2914         /* Use VBLANK interrupt */
2915         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2916                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2917                 if (r) {
2918                         DRM_ERROR("Failed to add crtc irq id!\n");
2919                         return r;
2920                 }
2921
2922                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2923                 int_params.irq_source =
2924                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2925
2926                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2927
2928                 c_irq_params->adev = adev;
2929                 c_irq_params->irq_src = int_params.irq_source;
2930
2931                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2932                                 dm_crtc_high_irq, c_irq_params);
2933         }
2934
2935         /* Use GRPH_PFLIP interrupt */
2936         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2937                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2938                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2939                 if (r) {
2940                         DRM_ERROR("Failed to add page flip irq id!\n");
2941                         return r;
2942                 }
2943
2944                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2945                 int_params.irq_source =
2946                         dc_interrupt_to_irq_source(dc, i, 0);
2947
2948                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2949
2950                 c_irq_params->adev = adev;
2951                 c_irq_params->irq_src = int_params.irq_source;
2952
2953                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2954                                 dm_pflip_high_irq, c_irq_params);
2955
2956         }
2957
2958         /* HPD */
2959         r = amdgpu_irq_add_id(adev, client_id,
2960                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2961         if (r) {
2962                 DRM_ERROR("Failed to add hpd irq id!\n");
2963                 return r;
2964         }
2965
2966         register_hpd_handlers(adev);
2967
2968         return 0;
2969 }
2970 #endif
2971
2972 /* Register IRQ sources and initialize IRQ callbacks */
2973 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2974 {
2975         struct dc *dc = adev->dm.dc;
2976         struct common_irq_params *c_irq_params;
2977         struct dc_interrupt_params int_params = {0};
2978         int r;
2979         int i;
2980         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2981
2982         if (adev->asic_type >= CHIP_VEGA10)
2983                 client_id = SOC15_IH_CLIENTID_DCE;
2984
2985         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2986         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2987
2988         /*
2989          * Actions of amdgpu_irq_add_id():
2990          * 1. Register a set() function with base driver.
2991          *    Base driver will call set() function to enable/disable an
2992          *    interrupt in DC hardware.
2993          * 2. Register amdgpu_dm_irq_handler().
2994          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2995          *    coming from DC hardware.
2996          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2997          *    for acknowledging and handling. */
2998
2999         /* Use VBLANK interrupt */
3000         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3001                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3002                 if (r) {
3003                         DRM_ERROR("Failed to add crtc irq id!\n");
3004                         return r;
3005                 }
3006
3007                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008                 int_params.irq_source =
3009                         dc_interrupt_to_irq_source(dc, i, 0);
3010
3011                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3012
3013                 c_irq_params->adev = adev;
3014                 c_irq_params->irq_src = int_params.irq_source;
3015
3016                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017                                 dm_crtc_high_irq, c_irq_params);
3018         }
3019
3020         /* Use VUPDATE interrupt */
3021         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3022                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3023                 if (r) {
3024                         DRM_ERROR("Failed to add vupdate irq id!\n");
3025                         return r;
3026                 }
3027
3028                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3029                 int_params.irq_source =
3030                         dc_interrupt_to_irq_source(dc, i, 0);
3031
3032                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3033
3034                 c_irq_params->adev = adev;
3035                 c_irq_params->irq_src = int_params.irq_source;
3036
3037                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3038                                 dm_vupdate_high_irq, c_irq_params);
3039         }
3040
3041         /* Use GRPH_PFLIP interrupt */
3042         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3043                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3044                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3045                 if (r) {
3046                         DRM_ERROR("Failed to add page flip irq id!\n");
3047                         return r;
3048                 }
3049
3050                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3051                 int_params.irq_source =
3052                         dc_interrupt_to_irq_source(dc, i, 0);
3053
3054                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3055
3056                 c_irq_params->adev = adev;
3057                 c_irq_params->irq_src = int_params.irq_source;
3058
3059                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3060                                 dm_pflip_high_irq, c_irq_params);
3061
3062         }
3063
3064         /* HPD */
3065         r = amdgpu_irq_add_id(adev, client_id,
3066                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3067         if (r) {
3068                 DRM_ERROR("Failed to add hpd irq id!\n");
3069                 return r;
3070         }
3071
3072         register_hpd_handlers(adev);
3073
3074         return 0;
3075 }
3076
3077 #if defined(CONFIG_DRM_AMD_DC_DCN)
3078 /* Register IRQ sources and initialize IRQ callbacks */
3079 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3080 {
3081         struct dc *dc = adev->dm.dc;
3082         struct common_irq_params *c_irq_params;
3083         struct dc_interrupt_params int_params = {0};
3084         int r;
3085         int i;
3086 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3087         static const unsigned int vrtl_int_srcid[] = {
3088                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3089                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3090                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3091                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3092                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3093                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3094         };
3095 #endif
3096
3097         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3098         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3099
3100         /*
3101          * Actions of amdgpu_irq_add_id():
3102          * 1. Register a set() function with base driver.
3103          *    Base driver will call set() function to enable/disable an
3104          *    interrupt in DC hardware.
3105          * 2. Register amdgpu_dm_irq_handler().
3106          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3107          *    coming from DC hardware.
3108          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3109          *    for acknowledging and handling.
3110          */
3111
3112         /* Use VSTARTUP interrupt */
3113         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3114                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3115                         i++) {
3116                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3117
3118                 if (r) {
3119                         DRM_ERROR("Failed to add crtc irq id!\n");
3120                         return r;
3121                 }
3122
3123                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3124                 int_params.irq_source =
3125                         dc_interrupt_to_irq_source(dc, i, 0);
3126
3127                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3128
3129                 c_irq_params->adev = adev;
3130                 c_irq_params->irq_src = int_params.irq_source;
3131
3132                 amdgpu_dm_irq_register_interrupt(
3133                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3134         }
3135
3136         /* Use otg vertical line interrupt */
3137 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3138         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3139                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3140                                 vrtl_int_srcid[i], &adev->vline0_irq);
3141
3142                 if (r) {
3143                         DRM_ERROR("Failed to add vline0 irq id!\n");
3144                         return r;
3145                 }
3146
3147                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3148                 int_params.irq_source =
3149                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3150
3151                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3152                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3153                         break;
3154                 }
3155
3156                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3157                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3158
3159                 c_irq_params->adev = adev;
3160                 c_irq_params->irq_src = int_params.irq_source;
3161
3162                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3163                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3164         }
3165 #endif
3166
3167         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3168          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3169          * to trigger at end of each vblank, regardless of state of the lock,
3170          * matching DCE behaviour.
3171          */
3172         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3173              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3174              i++) {
3175                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3176
3177                 if (r) {
3178                         DRM_ERROR("Failed to add vupdate irq id!\n");
3179                         return r;
3180                 }
3181
3182                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3183                 int_params.irq_source =
3184                         dc_interrupt_to_irq_source(dc, i, 0);
3185
3186                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3187
3188                 c_irq_params->adev = adev;
3189                 c_irq_params->irq_src = int_params.irq_source;
3190
3191                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3192                                 dm_vupdate_high_irq, c_irq_params);
3193         }
3194
3195         /* Use GRPH_PFLIP interrupt */
3196         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3197                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3198                         i++) {
3199                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3200                 if (r) {
3201                         DRM_ERROR("Failed to add page flip irq id!\n");
3202                         return r;
3203                 }
3204
3205                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3206                 int_params.irq_source =
3207                         dc_interrupt_to_irq_source(dc, i, 0);
3208
3209                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3210
3211                 c_irq_params->adev = adev;
3212                 c_irq_params->irq_src = int_params.irq_source;
3213
3214                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3215                                 dm_pflip_high_irq, c_irq_params);
3216
3217         }
3218
3219         /* HPD */
3220         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3221                         &adev->hpd_irq);
3222         if (r) {
3223                 DRM_ERROR("Failed to add hpd irq id!\n");
3224                 return r;
3225         }
3226
3227         register_hpd_handlers(adev);
3228
3229         return 0;
3230 }
3231 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3232 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3233 {
3234         struct dc *dc = adev->dm.dc;
3235         struct common_irq_params *c_irq_params;
3236         struct dc_interrupt_params int_params = {0};
3237         int r, i;
3238
3239         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3240         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3241
3242         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3243                         &adev->dmub_outbox_irq);
3244         if (r) {
3245                 DRM_ERROR("Failed to add outbox irq id!\n");
3246                 return r;
3247         }
3248
3249         if (dc->ctx->dmub_srv) {
3250                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3251                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3252                 int_params.irq_source =
3253                 dc_interrupt_to_irq_source(dc, i, 0);
3254
3255                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3256
3257                 c_irq_params->adev = adev;
3258                 c_irq_params->irq_src = int_params.irq_source;
3259
3260                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3261                                 dm_dmub_outbox1_low_irq, c_irq_params);
3262         }
3263
3264         return 0;
3265 }
3266 #endif
3267
3268 /*
3269  * Acquires the lock for the atomic state object and returns
3270  * the new atomic state.
3271  *
3272  * This should only be called during atomic check.
3273  */
3274 static int dm_atomic_get_state(struct drm_atomic_state *state,
3275                                struct dm_atomic_state **dm_state)
3276 {
3277         struct drm_device *dev = state->dev;
3278         struct amdgpu_device *adev = drm_to_adev(dev);
3279         struct amdgpu_display_manager *dm = &adev->dm;
3280         struct drm_private_state *priv_state;
3281
3282         if (*dm_state)
3283                 return 0;
3284
3285         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3286         if (IS_ERR(priv_state))
3287                 return PTR_ERR(priv_state);
3288
3289         *dm_state = to_dm_atomic_state(priv_state);
3290
3291         return 0;
3292 }
3293
3294 static struct dm_atomic_state *
3295 dm_atomic_get_new_state(struct drm_atomic_state *state)
3296 {
3297         struct drm_device *dev = state->dev;
3298         struct amdgpu_device *adev = drm_to_adev(dev);
3299         struct amdgpu_display_manager *dm = &adev->dm;
3300         struct drm_private_obj *obj;
3301         struct drm_private_state *new_obj_state;
3302         int i;
3303
3304         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3305                 if (obj->funcs == dm->atomic_obj.funcs)
3306                         return to_dm_atomic_state(new_obj_state);
3307         }
3308
3309         return NULL;
3310 }
3311
3312 static struct drm_private_state *
3313 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3314 {
3315         struct dm_atomic_state *old_state, *new_state;
3316
3317         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3318         if (!new_state)
3319                 return NULL;
3320
3321         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3322
3323         old_state = to_dm_atomic_state(obj->state);
3324
3325         if (old_state && old_state->context)
3326                 new_state->context = dc_copy_state(old_state->context);
3327
3328         if (!new_state->context) {
3329                 kfree(new_state);
3330                 return NULL;
3331         }
3332
3333         return &new_state->base;
3334 }
3335
3336 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3337                                     struct drm_private_state *state)
3338 {
3339         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3340
3341         if (dm_state && dm_state->context)
3342                 dc_release_state(dm_state->context);
3343
3344         kfree(dm_state);
3345 }
3346
3347 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3348         .atomic_duplicate_state = dm_atomic_duplicate_state,
3349         .atomic_destroy_state = dm_atomic_destroy_state,
3350 };
3351
3352 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3353 {
3354         struct dm_atomic_state *state;
3355         int r;
3356
3357         adev->mode_info.mode_config_initialized = true;
3358
3359         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3360         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3361
3362         adev_to_drm(adev)->mode_config.max_width = 16384;
3363         adev_to_drm(adev)->mode_config.max_height = 16384;
3364
3365         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3366         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3367         /* indicates support for immediate flip */
3368         adev_to_drm(adev)->mode_config.async_page_flip = true;
3369
3370         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3371
3372         state = kzalloc(sizeof(*state), GFP_KERNEL);
3373         if (!state)
3374                 return -ENOMEM;
3375
3376         state->context = dc_create_state(adev->dm.dc);
3377         if (!state->context) {
3378                 kfree(state);
3379                 return -ENOMEM;
3380         }
3381
3382         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3383
3384         drm_atomic_private_obj_init(adev_to_drm(adev),
3385                                     &adev->dm.atomic_obj,
3386                                     &state->base,
3387                                     &dm_atomic_state_funcs);
3388
3389         r = amdgpu_display_modeset_create_props(adev);
3390         if (r) {
3391                 dc_release_state(state->context);
3392                 kfree(state);
3393                 return r;
3394         }
3395
3396         r = amdgpu_dm_audio_init(adev);
3397         if (r) {
3398                 dc_release_state(state->context);
3399                 kfree(state);
3400                 return r;
3401         }
3402
3403         return 0;
3404 }
3405
3406 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3407 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3408 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3409
3410 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3411         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3412
3413 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3414 {
3415 #if defined(CONFIG_ACPI)
3416         struct amdgpu_dm_backlight_caps caps;
3417
3418         memset(&caps, 0, sizeof(caps));
3419
3420         if (dm->backlight_caps.caps_valid)
3421                 return;
3422
3423         amdgpu_acpi_get_backlight_caps(&caps);
3424         if (caps.caps_valid) {
3425                 dm->backlight_caps.caps_valid = true;
3426                 if (caps.aux_support)
3427                         return;
3428                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3429                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3430         } else {
3431                 dm->backlight_caps.min_input_signal =
3432                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3433                 dm->backlight_caps.max_input_signal =
3434                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3435         }
3436 #else
3437         if (dm->backlight_caps.aux_support)
3438                 return;
3439
3440         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3441         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3442 #endif
3443 }
3444
3445 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3446                                 unsigned *min, unsigned *max)
3447 {
3448         if (!caps)
3449                 return 0;
3450
3451         if (caps->aux_support) {
3452                 // Firmware limits are in nits, DC API wants millinits.
3453                 *max = 1000 * caps->aux_max_input_signal;
3454                 *min = 1000 * caps->aux_min_input_signal;
3455         } else {
3456                 // Firmware limits are 8-bit, PWM control is 16-bit.
3457                 *max = 0x101 * caps->max_input_signal;
3458                 *min = 0x101 * caps->min_input_signal;
3459         }
3460         return 1;
3461 }
3462
3463 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3464                                         uint32_t brightness)
3465 {
3466         unsigned min, max;
3467
3468         if (!get_brightness_range(caps, &min, &max))
3469                 return brightness;
3470
3471         // Rescale 0..255 to min..max
3472         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3473                                        AMDGPU_MAX_BL_LEVEL);
3474 }
3475
3476 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3477                                       uint32_t brightness)
3478 {
3479         unsigned min, max;
3480
3481         if (!get_brightness_range(caps, &min, &max))
3482                 return brightness;
3483
3484         if (brightness < min)
3485                 return 0;
3486         // Rescale min..max to 0..255
3487         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3488                                  max - min);
3489 }
3490
3491 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3492                                          u32 user_brightness)
3493 {
3494         struct amdgpu_dm_backlight_caps caps;
3495         struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3496         u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3497         bool rc;
3498         int i;
3499
3500         amdgpu_dm_update_backlight_caps(dm);
3501         caps = dm->backlight_caps;
3502
3503         for (i = 0; i < dm->num_of_edps; i++) {
3504                 dm->brightness[i] = user_brightness;
3505                 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3506                 link[i] = (struct dc_link *)dm->backlight_link[i];
3507         }
3508
3509         /* Change brightness based on AUX property */
3510         if (caps.aux_support) {
3511                 for (i = 0; i < dm->num_of_edps; i++) {
3512                         rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3513                                 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3514                         if (!rc) {
3515                                 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3516                                 break;
3517                         }
3518                 }
3519         } else {
3520                 for (i = 0; i < dm->num_of_edps; i++) {
3521                         rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3522                         if (!rc) {
3523                                 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3524                                 break;
3525                         }
3526                 }
3527         }
3528
3529         return rc ? 0 : 1;
3530 }
3531
3532 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3533 {
3534         struct amdgpu_display_manager *dm = bl_get_data(bd);
3535
3536         amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3537
3538         return 0;
3539 }
3540
3541 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3542 {
3543         struct amdgpu_dm_backlight_caps caps;
3544
3545         amdgpu_dm_update_backlight_caps(dm);
3546         caps = dm->backlight_caps;
3547
3548         if (caps.aux_support) {
3549                 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3550                 u32 avg, peak;
3551                 bool rc;
3552
3553                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3554                 if (!rc)
3555                         return dm->brightness[0];
3556                 return convert_brightness_to_user(&caps, avg);
3557         } else {
3558                 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3559
3560                 if (ret == DC_ERROR_UNEXPECTED)
3561                         return dm->brightness[0];
3562                 return convert_brightness_to_user(&caps, ret);
3563         }
3564 }
3565
3566 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3567 {
3568         struct amdgpu_display_manager *dm = bl_get_data(bd);
3569
3570         return amdgpu_dm_backlight_get_level(dm);
3571 }
3572
3573 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3574         .options = BL_CORE_SUSPENDRESUME,
3575         .get_brightness = amdgpu_dm_backlight_get_brightness,
3576         .update_status  = amdgpu_dm_backlight_update_status,
3577 };
3578
3579 static void
3580 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3581 {
3582         char bl_name[16];
3583         struct backlight_properties props = { 0 };
3584         int i;
3585
3586         amdgpu_dm_update_backlight_caps(dm);
3587         for (i = 0; i < dm->num_of_edps; i++)
3588                 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3589
3590         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3591         props.brightness = AMDGPU_MAX_BL_LEVEL;
3592         props.type = BACKLIGHT_RAW;
3593
3594         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3595                  adev_to_drm(dm->adev)->primary->index);
3596
3597         dm->backlight_dev = backlight_device_register(bl_name,
3598                                                       adev_to_drm(dm->adev)->dev,
3599                                                       dm,
3600                                                       &amdgpu_dm_backlight_ops,
3601                                                       &props);
3602
3603         if (IS_ERR(dm->backlight_dev))
3604                 DRM_ERROR("DM: Backlight registration failed!\n");
3605         else
3606                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3607 }
3608
3609 #endif
3610
3611 static int initialize_plane(struct amdgpu_display_manager *dm,
3612                             struct amdgpu_mode_info *mode_info, int plane_id,
3613                             enum drm_plane_type plane_type,
3614                             const struct dc_plane_cap *plane_cap)
3615 {
3616         struct drm_plane *plane;
3617         unsigned long possible_crtcs;
3618         int ret = 0;
3619
3620         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3621         if (!plane) {
3622                 DRM_ERROR("KMS: Failed to allocate plane\n");
3623                 return -ENOMEM;
3624         }
3625         plane->type = plane_type;
3626
3627         /*
3628          * HACK: IGT tests expect that the primary plane for a CRTC
3629          * can only have one possible CRTC. Only expose support for
3630          * any CRTC if they're not going to be used as a primary plane
3631          * for a CRTC - like overlay or underlay planes.
3632          */
3633         possible_crtcs = 1 << plane_id;
3634         if (plane_id >= dm->dc->caps.max_streams)
3635                 possible_crtcs = 0xff;
3636
3637         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3638
3639         if (ret) {
3640                 DRM_ERROR("KMS: Failed to initialize plane\n");
3641                 kfree(plane);
3642                 return ret;
3643         }
3644
3645         if (mode_info)
3646                 mode_info->planes[plane_id] = plane;
3647
3648         return ret;
3649 }
3650
3651
3652 static void register_backlight_device(struct amdgpu_display_manager *dm,
3653                                       struct dc_link *link)
3654 {
3655 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3656         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3657
3658         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3659             link->type != dc_connection_none) {
3660                 /*
3661                  * Event if registration failed, we should continue with
3662                  * DM initialization because not having a backlight control
3663                  * is better then a black screen.
3664                  */
3665                 if (!dm->backlight_dev)
3666                         amdgpu_dm_register_backlight_device(dm);
3667
3668                 if (dm->backlight_dev) {
3669                         dm->backlight_link[dm->num_of_edps] = link;
3670                         dm->num_of_edps++;
3671                 }
3672         }
3673 #endif
3674 }
3675
3676
3677 /*
3678  * In this architecture, the association
3679  * connector -> encoder -> crtc
3680  * id not really requried. The crtc and connector will hold the
3681  * display_index as an abstraction to use with DAL component
3682  *
3683  * Returns 0 on success
3684  */
3685 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3686 {
3687         struct amdgpu_display_manager *dm = &adev->dm;
3688         int32_t i;
3689         struct amdgpu_dm_connector *aconnector = NULL;
3690         struct amdgpu_encoder *aencoder = NULL;
3691         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3692         uint32_t link_cnt;
3693         int32_t primary_planes;
3694         enum dc_connection_type new_connection_type = dc_connection_none;
3695         const struct dc_plane_cap *plane;
3696
3697         dm->display_indexes_num = dm->dc->caps.max_streams;
3698         /* Update the actual used number of crtc */
3699         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3700
3701         link_cnt = dm->dc->caps.max_links;
3702         if (amdgpu_dm_mode_config_init(dm->adev)) {
3703                 DRM_ERROR("DM: Failed to initialize mode config\n");
3704                 return -EINVAL;
3705         }
3706
3707         /* There is one primary plane per CRTC */
3708         primary_planes = dm->dc->caps.max_streams;
3709         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3710
3711         /*
3712          * Initialize primary planes, implicit planes for legacy IOCTLS.
3713          * Order is reversed to match iteration order in atomic check.
3714          */
3715         for (i = (primary_planes - 1); i >= 0; i--) {
3716                 plane = &dm->dc->caps.planes[i];
3717
3718                 if (initialize_plane(dm, mode_info, i,
3719                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3720                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3721                         goto fail;
3722                 }
3723         }
3724
3725         /*
3726          * Initialize overlay planes, index starting after primary planes.
3727          * These planes have a higher DRM index than the primary planes since
3728          * they should be considered as having a higher z-order.
3729          * Order is reversed to match iteration order in atomic check.
3730          *
3731          * Only support DCN for now, and only expose one so we don't encourage
3732          * userspace to use up all the pipes.
3733          */
3734         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3735                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3736
3737                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3738                         continue;
3739
3740                 if (!plane->blends_with_above || !plane->blends_with_below)
3741                         continue;
3742
3743                 if (!plane->pixel_format_support.argb8888)
3744                         continue;
3745
3746                 if (initialize_plane(dm, NULL, primary_planes + i,
3747                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3748                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3749                         goto fail;
3750                 }
3751
3752                 /* Only create one overlay plane. */
3753                 break;
3754         }
3755
3756         for (i = 0; i < dm->dc->caps.max_streams; i++)
3757                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3758                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3759                         goto fail;
3760                 }
3761
3762 #if defined(CONFIG_DRM_AMD_DC_DCN)
3763         /* Use Outbox interrupt */
3764         switch (adev->asic_type) {
3765         case CHIP_SIENNA_CICHLID:
3766         case CHIP_NAVY_FLOUNDER:
3767         case CHIP_RENOIR:
3768                 if (register_outbox_irq_handlers(dm->adev)) {
3769                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3770                         goto fail;
3771                 }
3772                 break;
3773         default:
3774                 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3775         }
3776 #endif
3777
3778         /* loops over all connectors on the board */
3779         for (i = 0; i < link_cnt; i++) {
3780                 struct dc_link *link = NULL;
3781
3782                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3783                         DRM_ERROR(
3784                                 "KMS: Cannot support more than %d display indexes\n",
3785                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3786                         continue;
3787                 }
3788
3789                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3790                 if (!aconnector)
3791                         goto fail;
3792
3793                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3794                 if (!aencoder)
3795                         goto fail;
3796
3797                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3798                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3799                         goto fail;
3800                 }
3801
3802                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3803                         DRM_ERROR("KMS: Failed to initialize connector\n");
3804                         goto fail;
3805                 }
3806
3807                 link = dc_get_link_at_index(dm->dc, i);
3808
3809                 if (!dc_link_detect_sink(link, &new_connection_type))
3810                         DRM_ERROR("KMS: Failed to detect connector\n");
3811
3812                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3813                         emulated_link_detect(link);
3814                         amdgpu_dm_update_connector_after_detect(aconnector);
3815
3816                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3817                         amdgpu_dm_update_connector_after_detect(aconnector);
3818                         register_backlight_device(dm, link);
3819                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3820                                 amdgpu_dm_set_psr_caps(link);
3821                 }
3822
3823
3824         }
3825
3826         /* Software is initialized. Now we can register interrupt handlers. */
3827         switch (adev->asic_type) {
3828 #if defined(CONFIG_DRM_AMD_DC_SI)
3829         case CHIP_TAHITI:
3830         case CHIP_PITCAIRN:
3831         case CHIP_VERDE:
3832         case CHIP_OLAND:
3833                 if (dce60_register_irq_handlers(dm->adev)) {
3834                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3835                         goto fail;
3836                 }
3837                 break;
3838 #endif
3839         case CHIP_BONAIRE:
3840         case CHIP_HAWAII:
3841         case CHIP_KAVERI:
3842         case CHIP_KABINI:
3843         case CHIP_MULLINS:
3844         case CHIP_TONGA:
3845         case CHIP_FIJI:
3846         case CHIP_CARRIZO:
3847         case CHIP_STONEY:
3848         case CHIP_POLARIS11:
3849         case CHIP_POLARIS10:
3850         case CHIP_POLARIS12:
3851         case CHIP_VEGAM:
3852         case CHIP_VEGA10:
3853         case CHIP_VEGA12:
3854         case CHIP_VEGA20:
3855                 if (dce110_register_irq_handlers(dm->adev)) {
3856                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3857                         goto fail;
3858                 }
3859                 break;
3860 #if defined(CONFIG_DRM_AMD_DC_DCN)
3861         case CHIP_RAVEN:
3862         case CHIP_NAVI12:
3863         case CHIP_NAVI10:
3864         case CHIP_NAVI14:
3865         case CHIP_RENOIR:
3866         case CHIP_SIENNA_CICHLID:
3867         case CHIP_NAVY_FLOUNDER:
3868         case CHIP_DIMGREY_CAVEFISH:
3869         case CHIP_BEIGE_GOBY:
3870         case CHIP_VANGOGH:
3871                 if (dcn10_register_irq_handlers(dm->adev)) {
3872                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3873                         goto fail;
3874                 }
3875                 break;
3876 #endif
3877         default:
3878                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3879                 goto fail;
3880         }
3881
3882         return 0;
3883 fail:
3884         kfree(aencoder);
3885         kfree(aconnector);
3886
3887         return -EINVAL;
3888 }
3889
3890 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3891 {
3892         drm_atomic_private_obj_fini(&dm->atomic_obj);
3893         return;
3894 }
3895
3896 /******************************************************************************
3897  * amdgpu_display_funcs functions
3898  *****************************************************************************/
3899
3900 /*
3901  * dm_bandwidth_update - program display watermarks
3902  *
3903  * @adev: amdgpu_device pointer
3904  *
3905  * Calculate and program the display watermarks and line buffer allocation.
3906  */
3907 static void dm_bandwidth_update(struct amdgpu_device *adev)
3908 {
3909         /* TODO: implement later */
3910 }
3911
3912 static const struct amdgpu_display_funcs dm_display_funcs = {
3913         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3914         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3915         .backlight_set_level = NULL, /* never called for DC */
3916         .backlight_get_level = NULL, /* never called for DC */
3917         .hpd_sense = NULL,/* called unconditionally */
3918         .hpd_set_polarity = NULL, /* called unconditionally */
3919         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3920         .page_flip_get_scanoutpos =
3921                 dm_crtc_get_scanoutpos,/* called unconditionally */
3922         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3923         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3924 };
3925
3926 #if defined(CONFIG_DEBUG_KERNEL_DC)
3927
3928 static ssize_t s3_debug_store(struct device *device,
3929                               struct device_attribute *attr,
3930                               const char *buf,
3931                               size_t count)
3932 {
3933         int ret;
3934         int s3_state;
3935         struct drm_device *drm_dev = dev_get_drvdata(device);
3936         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3937
3938         ret = kstrtoint(buf, 0, &s3_state);
3939
3940         if (ret == 0) {
3941                 if (s3_state) {
3942                         dm_resume(adev);
3943                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3944                 } else
3945                         dm_suspend(adev);
3946         }
3947
3948         return ret == 0 ? count : 0;
3949 }
3950
3951 DEVICE_ATTR_WO(s3_debug);
3952
3953 #endif
3954
3955 static int dm_early_init(void *handle)
3956 {
3957         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3958
3959         switch (adev->asic_type) {
3960 #if defined(CONFIG_DRM_AMD_DC_SI)
3961         case CHIP_TAHITI:
3962         case CHIP_PITCAIRN:
3963         case CHIP_VERDE:
3964                 adev->mode_info.num_crtc = 6;
3965                 adev->mode_info.num_hpd = 6;
3966                 adev->mode_info.num_dig = 6;
3967                 break;
3968         case CHIP_OLAND:
3969                 adev->mode_info.num_crtc = 2;
3970                 adev->mode_info.num_hpd = 2;
3971                 adev->mode_info.num_dig = 2;
3972                 break;
3973 #endif
3974         case CHIP_BONAIRE:
3975         case CHIP_HAWAII:
3976                 adev->mode_info.num_crtc = 6;
3977                 adev->mode_info.num_hpd = 6;
3978                 adev->mode_info.num_dig = 6;
3979                 break;
3980         case CHIP_KAVERI:
3981                 adev->mode_info.num_crtc = 4;
3982                 adev->mode_info.num_hpd = 6;
3983                 adev->mode_info.num_dig = 7;
3984                 break;
3985         case CHIP_KABINI:
3986         case CHIP_MULLINS:
3987                 adev->mode_info.num_crtc = 2;
3988                 adev->mode_info.num_hpd = 6;
3989                 adev->mode_info.num_dig = 6;
3990                 break;
3991         case CHIP_FIJI:
3992         case CHIP_TONGA:
3993                 adev->mode_info.num_crtc = 6;
3994                 adev->mode_info.num_hpd = 6;
3995                 adev->mode_info.num_dig = 7;
3996                 break;
3997         case CHIP_CARRIZO:
3998                 adev->mode_info.num_crtc = 3;
3999                 adev->mode_info.num_hpd = 6;
4000                 adev->mode_info.num_dig = 9;
4001                 break;
4002         case CHIP_STONEY:
4003                 adev->mode_info.num_crtc = 2;
4004                 adev->mode_info.num_hpd = 6;
4005                 adev->mode_info.num_dig = 9;
4006                 break;
4007         case CHIP_POLARIS11:
4008         case CHIP_POLARIS12:
4009                 adev->mode_info.num_crtc = 5;
4010                 adev->mode_info.num_hpd = 5;
4011                 adev->mode_info.num_dig = 5;
4012                 break;
4013         case CHIP_POLARIS10:
4014         case CHIP_VEGAM:
4015                 adev->mode_info.num_crtc = 6;
4016                 adev->mode_info.num_hpd = 6;
4017                 adev->mode_info.num_dig = 6;
4018                 break;
4019         case CHIP_VEGA10:
4020         case CHIP_VEGA12:
4021         case CHIP_VEGA20:
4022                 adev->mode_info.num_crtc = 6;
4023                 adev->mode_info.num_hpd = 6;
4024                 adev->mode_info.num_dig = 6;
4025                 break;
4026 #if defined(CONFIG_DRM_AMD_DC_DCN)
4027         case CHIP_RAVEN:
4028         case CHIP_RENOIR:
4029         case CHIP_VANGOGH:
4030                 adev->mode_info.num_crtc = 4;
4031                 adev->mode_info.num_hpd = 4;
4032                 adev->mode_info.num_dig = 4;
4033                 break;
4034         case CHIP_NAVI10:
4035         case CHIP_NAVI12:
4036         case CHIP_SIENNA_CICHLID:
4037         case CHIP_NAVY_FLOUNDER:
4038                 adev->mode_info.num_crtc = 6;
4039                 adev->mode_info.num_hpd = 6;
4040                 adev->mode_info.num_dig = 6;
4041                 break;
4042         case CHIP_NAVI14:
4043         case CHIP_DIMGREY_CAVEFISH:
4044                 adev->mode_info.num_crtc = 5;
4045                 adev->mode_info.num_hpd = 5;
4046                 adev->mode_info.num_dig = 5;
4047                 break;
4048         case CHIP_BEIGE_GOBY:
4049                 adev->mode_info.num_crtc = 2;
4050                 adev->mode_info.num_hpd = 2;
4051                 adev->mode_info.num_dig = 2;
4052                 break;
4053 #endif
4054         default:
4055                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4056                 return -EINVAL;
4057         }
4058
4059         amdgpu_dm_set_irq_funcs(adev);
4060
4061         if (adev->mode_info.funcs == NULL)
4062                 adev->mode_info.funcs = &dm_display_funcs;
4063
4064         /*
4065          * Note: Do NOT change adev->audio_endpt_rreg and
4066          * adev->audio_endpt_wreg because they are initialised in
4067          * amdgpu_device_init()
4068          */
4069 #if defined(CONFIG_DEBUG_KERNEL_DC)
4070         device_create_file(
4071                 adev_to_drm(adev)->dev,
4072                 &dev_attr_s3_debug);
4073 #endif
4074
4075         return 0;
4076 }
4077
4078 static bool modeset_required(struct drm_crtc_state *crtc_state,
4079                              struct dc_stream_state *new_stream,
4080                              struct dc_stream_state *old_stream)
4081 {
4082         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4083 }
4084
4085 static bool modereset_required(struct drm_crtc_state *crtc_state)
4086 {
4087         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4088 }
4089
4090 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4091 {
4092         drm_encoder_cleanup(encoder);
4093         kfree(encoder);
4094 }
4095
4096 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4097         .destroy = amdgpu_dm_encoder_destroy,
4098 };
4099
4100
4101 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4102                                          struct drm_framebuffer *fb,
4103                                          int *min_downscale, int *max_upscale)
4104 {
4105         struct amdgpu_device *adev = drm_to_adev(dev);
4106         struct dc *dc = adev->dm.dc;
4107         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4108         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4109
4110         switch (fb->format->format) {
4111         case DRM_FORMAT_P010:
4112         case DRM_FORMAT_NV12:
4113         case DRM_FORMAT_NV21:
4114                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4115                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4116                 break;
4117
4118         case DRM_FORMAT_XRGB16161616F:
4119         case DRM_FORMAT_ARGB16161616F:
4120         case DRM_FORMAT_XBGR16161616F:
4121         case DRM_FORMAT_ABGR16161616F:
4122                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4123                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4124                 break;
4125
4126         default:
4127                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4128                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4129                 break;
4130         }
4131
4132         /*
4133          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4134          * scaling factor of 1.0 == 1000 units.
4135          */
4136         if (*max_upscale == 1)
4137                 *max_upscale = 1000;
4138
4139         if (*min_downscale == 1)
4140                 *min_downscale = 1000;
4141 }
4142
4143
4144 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4145                                 struct dc_scaling_info *scaling_info)
4146 {
4147         int scale_w, scale_h, min_downscale, max_upscale;
4148
4149         memset(scaling_info, 0, sizeof(*scaling_info));
4150
4151         /* Source is fixed 16.16 but we ignore mantissa for now... */
4152         scaling_info->src_rect.x = state->src_x >> 16;
4153         scaling_info->src_rect.y = state->src_y >> 16;
4154
4155         /*
4156          * For reasons we don't (yet) fully understand a non-zero
4157          * src_y coordinate into an NV12 buffer can cause a
4158          * system hang. To avoid hangs (and maybe be overly cautious)
4159          * let's reject both non-zero src_x and src_y.
4160          *
4161          * We currently know of only one use-case to reproduce a
4162          * scenario with non-zero src_x and src_y for NV12, which
4163          * is to gesture the YouTube Android app into full screen
4164          * on ChromeOS.
4165          */
4166         if (state->fb &&
4167             state->fb->format->format == DRM_FORMAT_NV12 &&
4168             (scaling_info->src_rect.x != 0 ||
4169              scaling_info->src_rect.y != 0))
4170                 return -EINVAL;
4171
4172         scaling_info->src_rect.width = state->src_w >> 16;
4173         if (scaling_info->src_rect.width == 0)
4174                 return -EINVAL;
4175
4176         scaling_info->src_rect.height = state->src_h >> 16;
4177         if (scaling_info->src_rect.height == 0)
4178                 return -EINVAL;
4179
4180         scaling_info->dst_rect.x = state->crtc_x;
4181         scaling_info->dst_rect.y = state->crtc_y;
4182
4183         if (state->crtc_w == 0)
4184                 return -EINVAL;
4185
4186         scaling_info->dst_rect.width = state->crtc_w;
4187
4188         if (state->crtc_h == 0)
4189                 return -EINVAL;
4190
4191         scaling_info->dst_rect.height = state->crtc_h;
4192
4193         /* DRM doesn't specify clipping on destination output. */
4194         scaling_info->clip_rect = scaling_info->dst_rect;
4195
4196         /* Validate scaling per-format with DC plane caps */
4197         if (state->plane && state->plane->dev && state->fb) {
4198                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4199                                              &min_downscale, &max_upscale);
4200         } else {
4201                 min_downscale = 250;
4202                 max_upscale = 16000;
4203         }
4204
4205         scale_w = scaling_info->dst_rect.width * 1000 /
4206                   scaling_info->src_rect.width;
4207
4208         if (scale_w < min_downscale || scale_w > max_upscale)
4209                 return -EINVAL;
4210
4211         scale_h = scaling_info->dst_rect.height * 1000 /
4212                   scaling_info->src_rect.height;
4213
4214         if (scale_h < min_downscale || scale_h > max_upscale)
4215                 return -EINVAL;
4216
4217         /*
4218          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4219          * assume reasonable defaults based on the format.
4220          */
4221
4222         return 0;
4223 }
4224
4225 static void
4226 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4227                                  uint64_t tiling_flags)
4228 {
4229         /* Fill GFX8 params */
4230         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4231                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4232
4233                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4234                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4235                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4236                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4237                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4238
4239                 /* XXX fix me for VI */
4240                 tiling_info->gfx8.num_banks = num_banks;
4241                 tiling_info->gfx8.array_mode =
4242                                 DC_ARRAY_2D_TILED_THIN1;
4243                 tiling_info->gfx8.tile_split = tile_split;
4244                 tiling_info->gfx8.bank_width = bankw;
4245                 tiling_info->gfx8.bank_height = bankh;
4246                 tiling_info->gfx8.tile_aspect = mtaspect;
4247                 tiling_info->gfx8.tile_mode =
4248                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4249         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4250                         == DC_ARRAY_1D_TILED_THIN1) {
4251                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4252         }
4253
4254         tiling_info->gfx8.pipe_config =
4255                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4256 }
4257
4258 static void
4259 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4260                                   union dc_tiling_info *tiling_info)
4261 {
4262         tiling_info->gfx9.num_pipes =
4263                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4264         tiling_info->gfx9.num_banks =
4265                 adev->gfx.config.gb_addr_config_fields.num_banks;
4266         tiling_info->gfx9.pipe_interleave =
4267                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4268         tiling_info->gfx9.num_shader_engines =
4269                 adev->gfx.config.gb_addr_config_fields.num_se;
4270         tiling_info->gfx9.max_compressed_frags =
4271                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4272         tiling_info->gfx9.num_rb_per_se =
4273                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4274         tiling_info->gfx9.shaderEnable = 1;
4275         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4276             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4277             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4278             adev->asic_type == CHIP_BEIGE_GOBY ||
4279             adev->asic_type == CHIP_VANGOGH)
4280                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4281 }
4282
4283 static int
4284 validate_dcc(struct amdgpu_device *adev,
4285              const enum surface_pixel_format format,
4286              const enum dc_rotation_angle rotation,
4287              const union dc_tiling_info *tiling_info,
4288              const struct dc_plane_dcc_param *dcc,
4289              const struct dc_plane_address *address,
4290              const struct plane_size *plane_size)
4291 {
4292         struct dc *dc = adev->dm.dc;
4293         struct dc_dcc_surface_param input;
4294         struct dc_surface_dcc_cap output;
4295
4296         memset(&input, 0, sizeof(input));
4297         memset(&output, 0, sizeof(output));
4298
4299         if (!dcc->enable)
4300                 return 0;
4301
4302         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4303             !dc->cap_funcs.get_dcc_compression_cap)
4304                 return -EINVAL;
4305
4306         input.format = format;
4307         input.surface_size.width = plane_size->surface_size.width;
4308         input.surface_size.height = plane_size->surface_size.height;
4309         input.swizzle_mode = tiling_info->gfx9.swizzle;
4310
4311         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4312                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4313         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4314                 input.scan = SCAN_DIRECTION_VERTICAL;
4315
4316         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4317                 return -EINVAL;
4318
4319         if (!output.capable)
4320                 return -EINVAL;
4321
4322         if (dcc->independent_64b_blks == 0 &&
4323             output.grph.rgb.independent_64b_blks != 0)
4324                 return -EINVAL;
4325
4326         return 0;
4327 }
4328
4329 static bool
4330 modifier_has_dcc(uint64_t modifier)
4331 {
4332         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4333 }
4334
4335 static unsigned
4336 modifier_gfx9_swizzle_mode(uint64_t modifier)
4337 {
4338         if (modifier == DRM_FORMAT_MOD_LINEAR)
4339                 return 0;
4340
4341         return AMD_FMT_MOD_GET(TILE, modifier);
4342 }
4343
4344 static const struct drm_format_info *
4345 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4346 {
4347         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4348 }
4349
4350 static void
4351 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4352                                     union dc_tiling_info *tiling_info,
4353                                     uint64_t modifier)
4354 {
4355         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4356         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4357         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4358         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4359
4360         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4361
4362         if (!IS_AMD_FMT_MOD(modifier))
4363                 return;
4364
4365         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4366         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4367
4368         if (adev->family >= AMDGPU_FAMILY_NV) {
4369                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4370         } else {
4371                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4372
4373                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4374         }
4375 }
4376
4377 enum dm_micro_swizzle {
4378         MICRO_SWIZZLE_Z = 0,
4379         MICRO_SWIZZLE_S = 1,
4380         MICRO_SWIZZLE_D = 2,
4381         MICRO_SWIZZLE_R = 3
4382 };
4383
4384 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4385                                           uint32_t format,
4386                                           uint64_t modifier)
4387 {
4388         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4389         const struct drm_format_info *info = drm_format_info(format);
4390         int i;
4391
4392         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4393
4394         if (!info)
4395                 return false;
4396
4397         /*
4398          * We always have to allow these modifiers:
4399          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4400          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4401          */
4402         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4403             modifier == DRM_FORMAT_MOD_INVALID) {
4404                 return true;
4405         }
4406
4407         /* Check that the modifier is on the list of the plane's supported modifiers. */
4408         for (i = 0; i < plane->modifier_count; i++) {
4409                 if (modifier == plane->modifiers[i])
4410                         break;
4411         }
4412         if (i == plane->modifier_count)
4413                 return false;
4414
4415         /*
4416          * For D swizzle the canonical modifier depends on the bpp, so check
4417          * it here.
4418          */
4419         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4420             adev->family >= AMDGPU_FAMILY_NV) {
4421                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4422                         return false;
4423         }
4424
4425         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4426             info->cpp[0] < 8)
4427                 return false;
4428
4429         if (modifier_has_dcc(modifier)) {
4430                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4431                 if (info->cpp[0] != 4)
4432                         return false;
4433                 /* We support multi-planar formats, but not when combined with
4434                  * additional DCC metadata planes. */
4435                 if (info->num_planes > 1)
4436                         return false;
4437         }
4438
4439         return true;
4440 }
4441
4442 static void
4443 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4444 {
4445         if (!*mods)
4446                 return;
4447
4448         if (*cap - *size < 1) {
4449                 uint64_t new_cap = *cap * 2;
4450                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4451
4452                 if (!new_mods) {
4453                         kfree(*mods);
4454                         *mods = NULL;
4455                         return;
4456                 }
4457
4458                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4459                 kfree(*mods);
4460                 *mods = new_mods;
4461                 *cap = new_cap;
4462         }
4463
4464         (*mods)[*size] = mod;
4465         *size += 1;
4466 }
4467
4468 static void
4469 add_gfx9_modifiers(const struct amdgpu_device *adev,
4470                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4471 {
4472         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4473         int pipe_xor_bits = min(8, pipes +
4474                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4475         int bank_xor_bits = min(8 - pipe_xor_bits,
4476                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4477         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4478                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4479
4480
4481         if (adev->family == AMDGPU_FAMILY_RV) {
4482                 /* Raven2 and later */
4483                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4484
4485                 /*
4486                  * No _D DCC swizzles yet because we only allow 32bpp, which
4487                  * doesn't support _D on DCN
4488                  */
4489
4490                 if (has_constant_encode) {
4491                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4492                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4493                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4494                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4495                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4496                                     AMD_FMT_MOD_SET(DCC, 1) |
4497                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4498                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4499                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4500                 }
4501
4502                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4503                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4504                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4505                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4506                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4507                             AMD_FMT_MOD_SET(DCC, 1) |
4508                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4509                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4510                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4511
4512                 if (has_constant_encode) {
4513                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4514                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4515                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4516                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4517                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4518                                     AMD_FMT_MOD_SET(DCC, 1) |
4519                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4520                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4521                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4522
4523                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4524                                     AMD_FMT_MOD_SET(RB, rb) |
4525                                     AMD_FMT_MOD_SET(PIPE, pipes));
4526                 }
4527
4528                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4529                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4530                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4531                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4532                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4533                             AMD_FMT_MOD_SET(DCC, 1) |
4534                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4535                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4536                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4537                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4538                             AMD_FMT_MOD_SET(RB, rb) |
4539                             AMD_FMT_MOD_SET(PIPE, pipes));
4540         }
4541
4542         /*
4543          * Only supported for 64bpp on Raven, will be filtered on format in
4544          * dm_plane_format_mod_supported.
4545          */
4546         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4547                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4548                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4549                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4550                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4551
4552         if (adev->family == AMDGPU_FAMILY_RV) {
4553                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4554                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4555                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4556                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4557                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4558         }
4559
4560         /*
4561          * Only supported for 64bpp on Raven, will be filtered on format in
4562          * dm_plane_format_mod_supported.
4563          */
4564         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4565                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4566                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4567
4568         if (adev->family == AMDGPU_FAMILY_RV) {
4569                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4570                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4571                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4572         }
4573 }
4574
4575 static void
4576 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4577                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4578 {
4579         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4580
4581         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4582                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4583                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4584                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4585                     AMD_FMT_MOD_SET(DCC, 1) |
4586                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4587                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4588                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4589
4590         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4591                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4592                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4593                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4594                     AMD_FMT_MOD_SET(DCC, 1) |
4595                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4596                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4597                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4598                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4599
4600         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4601                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4602                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4603                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4604
4605         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4606                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4607                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4608                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4609
4610
4611         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4612         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4613                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4614                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4615
4616         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4617                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4618                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4619 }
4620
4621 static void
4622 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4623                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4624 {
4625         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4626         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4627
4628         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4629                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4630                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4631                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4632                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4633                     AMD_FMT_MOD_SET(DCC, 1) |
4634                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4635                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4636                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4637                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4638
4639         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4640                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4641                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4642                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4643                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4644                     AMD_FMT_MOD_SET(DCC, 1) |
4645                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4646                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4647                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4648                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4649                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4650
4651         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4652                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4653                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4654                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4655                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4656
4657         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4658                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4659                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4660                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4661                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4662
4663         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4664         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4665                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4666                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4667
4668         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4669                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4670                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4671 }
4672
4673 static int
4674 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4675 {
4676         uint64_t size = 0, capacity = 128;
4677         *mods = NULL;
4678
4679         /* We have not hooked up any pre-GFX9 modifiers. */
4680         if (adev->family < AMDGPU_FAMILY_AI)
4681                 return 0;
4682
4683         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4684
4685         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4686                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4687                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4688                 return *mods ? 0 : -ENOMEM;
4689         }
4690
4691         switch (adev->family) {
4692         case AMDGPU_FAMILY_AI:
4693         case AMDGPU_FAMILY_RV:
4694                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4695                 break;
4696         case AMDGPU_FAMILY_NV:
4697         case AMDGPU_FAMILY_VGH:
4698                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4699                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4700                 else
4701                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4702                 break;
4703         }
4704
4705         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4706
4707         /* INVALID marks the end of the list. */
4708         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4709
4710         if (!*mods)
4711                 return -ENOMEM;
4712
4713         return 0;
4714 }
4715
4716 static int
4717 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4718                                           const struct amdgpu_framebuffer *afb,
4719                                           const enum surface_pixel_format format,
4720                                           const enum dc_rotation_angle rotation,
4721                                           const struct plane_size *plane_size,
4722                                           union dc_tiling_info *tiling_info,
4723                                           struct dc_plane_dcc_param *dcc,
4724                                           struct dc_plane_address *address,
4725                                           const bool force_disable_dcc)
4726 {
4727         const uint64_t modifier = afb->base.modifier;
4728         int ret;
4729
4730         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4731         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4732
4733         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4734                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4735
4736                 dcc->enable = 1;
4737                 dcc->meta_pitch = afb->base.pitches[1];
4738                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4739
4740                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4741                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4742         }
4743
4744         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4745         if (ret)
4746                 return ret;
4747
4748         return 0;
4749 }
4750
4751 static int
4752 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4753                              const struct amdgpu_framebuffer *afb,
4754                              const enum surface_pixel_format format,
4755                              const enum dc_rotation_angle rotation,
4756                              const uint64_t tiling_flags,
4757                              union dc_tiling_info *tiling_info,
4758                              struct plane_size *plane_size,
4759                              struct dc_plane_dcc_param *dcc,
4760                              struct dc_plane_address *address,
4761                              bool tmz_surface,
4762                              bool force_disable_dcc)
4763 {
4764         const struct drm_framebuffer *fb = &afb->base;
4765         int ret;
4766
4767         memset(tiling_info, 0, sizeof(*tiling_info));
4768         memset(plane_size, 0, sizeof(*plane_size));
4769         memset(dcc, 0, sizeof(*dcc));
4770         memset(address, 0, sizeof(*address));
4771
4772         address->tmz_surface = tmz_surface;
4773
4774         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4775                 uint64_t addr = afb->address + fb->offsets[0];
4776
4777                 plane_size->surface_size.x = 0;
4778                 plane_size->surface_size.y = 0;
4779                 plane_size->surface_size.width = fb->width;
4780                 plane_size->surface_size.height = fb->height;
4781                 plane_size->surface_pitch =
4782                         fb->pitches[0] / fb->format->cpp[0];
4783
4784                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4785                 address->grph.addr.low_part = lower_32_bits(addr);
4786                 address->grph.addr.high_part = upper_32_bits(addr);
4787         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4788                 uint64_t luma_addr = afb->address + fb->offsets[0];
4789                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4790
4791                 plane_size->surface_size.x = 0;
4792                 plane_size->surface_size.y = 0;
4793                 plane_size->surface_size.width = fb->width;
4794                 plane_size->surface_size.height = fb->height;
4795                 plane_size->surface_pitch =
4796                         fb->pitches[0] / fb->format->cpp[0];
4797
4798                 plane_size->chroma_size.x = 0;
4799                 plane_size->chroma_size.y = 0;
4800                 /* TODO: set these based on surface format */
4801                 plane_size->chroma_size.width = fb->width / 2;
4802                 plane_size->chroma_size.height = fb->height / 2;
4803
4804                 plane_size->chroma_pitch =
4805                         fb->pitches[1] / fb->format->cpp[1];
4806
4807                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4808                 address->video_progressive.luma_addr.low_part =
4809                         lower_32_bits(luma_addr);
4810                 address->video_progressive.luma_addr.high_part =
4811                         upper_32_bits(luma_addr);
4812                 address->video_progressive.chroma_addr.low_part =
4813                         lower_32_bits(chroma_addr);
4814                 address->video_progressive.chroma_addr.high_part =
4815                         upper_32_bits(chroma_addr);
4816         }
4817
4818         if (adev->family >= AMDGPU_FAMILY_AI) {
4819                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4820                                                                 rotation, plane_size,
4821                                                                 tiling_info, dcc,
4822                                                                 address,
4823                                                                 force_disable_dcc);
4824                 if (ret)
4825                         return ret;
4826         } else {
4827                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4828         }
4829
4830         return 0;
4831 }
4832
4833 static void
4834 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4835                                bool *per_pixel_alpha, bool *global_alpha,
4836                                int *global_alpha_value)
4837 {
4838         *per_pixel_alpha = false;
4839         *global_alpha = false;
4840         *global_alpha_value = 0xff;
4841
4842         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4843                 return;
4844
4845         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4846                 static const uint32_t alpha_formats[] = {
4847                         DRM_FORMAT_ARGB8888,
4848                         DRM_FORMAT_RGBA8888,
4849                         DRM_FORMAT_ABGR8888,
4850                 };
4851                 uint32_t format = plane_state->fb->format->format;
4852                 unsigned int i;
4853
4854                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4855                         if (format == alpha_formats[i]) {
4856                                 *per_pixel_alpha = true;
4857                                 break;
4858                         }
4859                 }
4860         }
4861
4862         if (plane_state->alpha < 0xffff) {
4863                 *global_alpha = true;
4864                 *global_alpha_value = plane_state->alpha >> 8;
4865         }
4866 }
4867
4868 static int
4869 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4870                             const enum surface_pixel_format format,
4871                             enum dc_color_space *color_space)
4872 {
4873         bool full_range;
4874
4875         *color_space = COLOR_SPACE_SRGB;
4876
4877         /* DRM color properties only affect non-RGB formats. */
4878         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4879                 return 0;
4880
4881         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4882
4883         switch (plane_state->color_encoding) {
4884         case DRM_COLOR_YCBCR_BT601:
4885                 if (full_range)
4886                         *color_space = COLOR_SPACE_YCBCR601;
4887                 else
4888                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4889                 break;
4890
4891         case DRM_COLOR_YCBCR_BT709:
4892                 if (full_range)
4893                         *color_space = COLOR_SPACE_YCBCR709;
4894                 else
4895                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4896                 break;
4897
4898         case DRM_COLOR_YCBCR_BT2020:
4899                 if (full_range)
4900                         *color_space = COLOR_SPACE_2020_YCBCR;
4901                 else
4902                         return -EINVAL;
4903                 break;
4904
4905         default:
4906                 return -EINVAL;
4907         }
4908
4909         return 0;
4910 }
4911
4912 static int
4913 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4914                             const struct drm_plane_state *plane_state,
4915                             const uint64_t tiling_flags,
4916                             struct dc_plane_info *plane_info,
4917                             struct dc_plane_address *address,
4918                             bool tmz_surface,
4919                             bool force_disable_dcc)
4920 {
4921         const struct drm_framebuffer *fb = plane_state->fb;
4922         const struct amdgpu_framebuffer *afb =
4923                 to_amdgpu_framebuffer(plane_state->fb);
4924         int ret;
4925
4926         memset(plane_info, 0, sizeof(*plane_info));
4927
4928         switch (fb->format->format) {
4929         case DRM_FORMAT_C8:
4930                 plane_info->format =
4931                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4932                 break;
4933         case DRM_FORMAT_RGB565:
4934                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4935                 break;
4936         case DRM_FORMAT_XRGB8888:
4937         case DRM_FORMAT_ARGB8888:
4938                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4939                 break;
4940         case DRM_FORMAT_XRGB2101010:
4941         case DRM_FORMAT_ARGB2101010:
4942                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4943                 break;
4944         case DRM_FORMAT_XBGR2101010:
4945         case DRM_FORMAT_ABGR2101010:
4946                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4947                 break;
4948         case DRM_FORMAT_XBGR8888:
4949         case DRM_FORMAT_ABGR8888:
4950                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4951                 break;
4952         case DRM_FORMAT_NV21:
4953                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4954                 break;
4955         case DRM_FORMAT_NV12:
4956                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4957                 break;
4958         case DRM_FORMAT_P010:
4959                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4960                 break;
4961         case DRM_FORMAT_XRGB16161616F:
4962         case DRM_FORMAT_ARGB16161616F:
4963                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4964                 break;
4965         case DRM_FORMAT_XBGR16161616F:
4966         case DRM_FORMAT_ABGR16161616F:
4967                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4968                 break;
4969         case DRM_FORMAT_XRGB16161616:
4970         case DRM_FORMAT_ARGB16161616:
4971                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4972                 break;
4973         case DRM_FORMAT_XBGR16161616:
4974         case DRM_FORMAT_ABGR16161616:
4975                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4976                 break;
4977         default:
4978                 DRM_ERROR(
4979                         "Unsupported screen format %p4cc\n",
4980                         &fb->format->format);
4981                 return -EINVAL;
4982         }
4983
4984         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4985         case DRM_MODE_ROTATE_0:
4986                 plane_info->rotation = ROTATION_ANGLE_0;
4987                 break;
4988         case DRM_MODE_ROTATE_90:
4989                 plane_info->rotation = ROTATION_ANGLE_90;
4990                 break;
4991         case DRM_MODE_ROTATE_180:
4992                 plane_info->rotation = ROTATION_ANGLE_180;
4993                 break;
4994         case DRM_MODE_ROTATE_270:
4995                 plane_info->rotation = ROTATION_ANGLE_270;
4996                 break;
4997         default:
4998                 plane_info->rotation = ROTATION_ANGLE_0;
4999                 break;
5000         }
5001
5002         plane_info->visible = true;
5003         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5004
5005         plane_info->layer_index = 0;
5006
5007         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5008                                           &plane_info->color_space);
5009         if (ret)
5010                 return ret;
5011
5012         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5013                                            plane_info->rotation, tiling_flags,
5014                                            &plane_info->tiling_info,
5015                                            &plane_info->plane_size,
5016                                            &plane_info->dcc, address, tmz_surface,
5017                                            force_disable_dcc);
5018         if (ret)
5019                 return ret;
5020
5021         fill_blending_from_plane_state(
5022                 plane_state, &plane_info->per_pixel_alpha,
5023                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5024
5025         return 0;
5026 }
5027
5028 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5029                                     struct dc_plane_state *dc_plane_state,
5030                                     struct drm_plane_state *plane_state,
5031                                     struct drm_crtc_state *crtc_state)
5032 {
5033         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5034         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5035         struct dc_scaling_info scaling_info;
5036         struct dc_plane_info plane_info;
5037         int ret;
5038         bool force_disable_dcc = false;
5039
5040         ret = fill_dc_scaling_info(plane_state, &scaling_info);
5041         if (ret)
5042                 return ret;
5043
5044         dc_plane_state->src_rect = scaling_info.src_rect;
5045         dc_plane_state->dst_rect = scaling_info.dst_rect;
5046         dc_plane_state->clip_rect = scaling_info.clip_rect;
5047         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5048
5049         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5050         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5051                                           afb->tiling_flags,
5052                                           &plane_info,
5053                                           &dc_plane_state->address,
5054                                           afb->tmz_surface,
5055                                           force_disable_dcc);
5056         if (ret)
5057                 return ret;
5058
5059         dc_plane_state->format = plane_info.format;
5060         dc_plane_state->color_space = plane_info.color_space;
5061         dc_plane_state->format = plane_info.format;
5062         dc_plane_state->plane_size = plane_info.plane_size;
5063         dc_plane_state->rotation = plane_info.rotation;
5064         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5065         dc_plane_state->stereo_format = plane_info.stereo_format;
5066         dc_plane_state->tiling_info = plane_info.tiling_info;
5067         dc_plane_state->visible = plane_info.visible;
5068         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5069         dc_plane_state->global_alpha = plane_info.global_alpha;
5070         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5071         dc_plane_state->dcc = plane_info.dcc;
5072         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5073         dc_plane_state->flip_int_enabled = true;
5074
5075         /*
5076          * Always set input transfer function, since plane state is refreshed
5077          * every time.
5078          */
5079         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5080         if (ret)
5081                 return ret;
5082
5083         return 0;
5084 }
5085
5086 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5087                                            const struct dm_connector_state *dm_state,
5088                                            struct dc_stream_state *stream)
5089 {
5090         enum amdgpu_rmx_type rmx_type;
5091
5092         struct rect src = { 0 }; /* viewport in composition space*/
5093         struct rect dst = { 0 }; /* stream addressable area */
5094
5095         /* no mode. nothing to be done */
5096         if (!mode)
5097                 return;
5098
5099         /* Full screen scaling by default */
5100         src.width = mode->hdisplay;
5101         src.height = mode->vdisplay;
5102         dst.width = stream->timing.h_addressable;
5103         dst.height = stream->timing.v_addressable;
5104
5105         if (dm_state) {
5106                 rmx_type = dm_state->scaling;
5107                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5108                         if (src.width * dst.height <
5109                                         src.height * dst.width) {
5110                                 /* height needs less upscaling/more downscaling */
5111                                 dst.width = src.width *
5112                                                 dst.height / src.height;
5113                         } else {
5114                                 /* width needs less upscaling/more downscaling */
5115                                 dst.height = src.height *
5116                                                 dst.width / src.width;
5117                         }
5118                 } else if (rmx_type == RMX_CENTER) {
5119                         dst = src;
5120                 }
5121
5122                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5123                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5124
5125                 if (dm_state->underscan_enable) {
5126                         dst.x += dm_state->underscan_hborder / 2;
5127                         dst.y += dm_state->underscan_vborder / 2;
5128                         dst.width -= dm_state->underscan_hborder;
5129                         dst.height -= dm_state->underscan_vborder;
5130                 }
5131         }
5132
5133         stream->src = src;
5134         stream->dst = dst;
5135
5136         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5137                       dst.x, dst.y, dst.width, dst.height);
5138
5139 }
5140
5141 static enum dc_color_depth
5142 convert_color_depth_from_display_info(const struct drm_connector *connector,
5143                                       bool is_y420, int requested_bpc)
5144 {
5145         uint8_t bpc;
5146
5147         if (is_y420) {
5148                 bpc = 8;
5149
5150                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5151                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5152                         bpc = 16;
5153                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5154                         bpc = 12;
5155                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5156                         bpc = 10;
5157         } else {
5158                 bpc = (uint8_t)connector->display_info.bpc;
5159                 /* Assume 8 bpc by default if no bpc is specified. */
5160                 bpc = bpc ? bpc : 8;
5161         }
5162
5163         if (requested_bpc > 0) {
5164                 /*
5165                  * Cap display bpc based on the user requested value.
5166                  *
5167                  * The value for state->max_bpc may not correctly updated
5168                  * depending on when the connector gets added to the state
5169                  * or if this was called outside of atomic check, so it
5170                  * can't be used directly.
5171                  */
5172                 bpc = min_t(u8, bpc, requested_bpc);
5173
5174                 /* Round down to the nearest even number. */
5175                 bpc = bpc - (bpc & 1);
5176         }
5177
5178         switch (bpc) {
5179         case 0:
5180                 /*
5181                  * Temporary Work around, DRM doesn't parse color depth for
5182                  * EDID revision before 1.4
5183                  * TODO: Fix edid parsing
5184                  */
5185                 return COLOR_DEPTH_888;
5186         case 6:
5187                 return COLOR_DEPTH_666;
5188         case 8:
5189                 return COLOR_DEPTH_888;
5190         case 10:
5191                 return COLOR_DEPTH_101010;
5192         case 12:
5193                 return COLOR_DEPTH_121212;
5194         case 14:
5195                 return COLOR_DEPTH_141414;
5196         case 16:
5197                 return COLOR_DEPTH_161616;
5198         default:
5199                 return COLOR_DEPTH_UNDEFINED;
5200         }
5201 }
5202
5203 static enum dc_aspect_ratio
5204 get_aspect_ratio(const struct drm_display_mode *mode_in)
5205 {
5206         /* 1-1 mapping, since both enums follow the HDMI spec. */
5207         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5208 }
5209
5210 static enum dc_color_space
5211 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5212 {
5213         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5214
5215         switch (dc_crtc_timing->pixel_encoding) {
5216         case PIXEL_ENCODING_YCBCR422:
5217         case PIXEL_ENCODING_YCBCR444:
5218         case PIXEL_ENCODING_YCBCR420:
5219         {
5220                 /*
5221                  * 27030khz is the separation point between HDTV and SDTV
5222                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5223                  * respectively
5224                  */
5225                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5226                         if (dc_crtc_timing->flags.Y_ONLY)
5227                                 color_space =
5228                                         COLOR_SPACE_YCBCR709_LIMITED;
5229                         else
5230                                 color_space = COLOR_SPACE_YCBCR709;
5231                 } else {
5232                         if (dc_crtc_timing->flags.Y_ONLY)
5233                                 color_space =
5234                                         COLOR_SPACE_YCBCR601_LIMITED;
5235                         else
5236                                 color_space = COLOR_SPACE_YCBCR601;
5237                 }
5238
5239         }
5240         break;
5241         case PIXEL_ENCODING_RGB:
5242                 color_space = COLOR_SPACE_SRGB;
5243                 break;
5244
5245         default:
5246                 WARN_ON(1);
5247                 break;
5248         }
5249
5250         return color_space;
5251 }
5252
5253 static bool adjust_colour_depth_from_display_info(
5254         struct dc_crtc_timing *timing_out,
5255         const struct drm_display_info *info)
5256 {
5257         enum dc_color_depth depth = timing_out->display_color_depth;
5258         int normalized_clk;
5259         do {
5260                 normalized_clk = timing_out->pix_clk_100hz / 10;
5261                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5262                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5263                         normalized_clk /= 2;
5264                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5265                 switch (depth) {
5266                 case COLOR_DEPTH_888:
5267                         break;
5268                 case COLOR_DEPTH_101010:
5269                         normalized_clk = (normalized_clk * 30) / 24;
5270                         break;
5271                 case COLOR_DEPTH_121212:
5272                         normalized_clk = (normalized_clk * 36) / 24;
5273                         break;
5274                 case COLOR_DEPTH_161616:
5275                         normalized_clk = (normalized_clk * 48) / 24;
5276                         break;
5277                 default:
5278                         /* The above depths are the only ones valid for HDMI. */
5279                         return false;
5280                 }
5281                 if (normalized_clk <= info->max_tmds_clock) {
5282                         timing_out->display_color_depth = depth;
5283                         return true;
5284                 }
5285         } while (--depth > COLOR_DEPTH_666);
5286         return false;
5287 }
5288
5289 static void fill_stream_properties_from_drm_display_mode(
5290         struct dc_stream_state *stream,
5291         const struct drm_display_mode *mode_in,
5292         const struct drm_connector *connector,
5293         const struct drm_connector_state *connector_state,
5294         const struct dc_stream_state *old_stream,
5295         int requested_bpc)
5296 {
5297         struct dc_crtc_timing *timing_out = &stream->timing;
5298         const struct drm_display_info *info = &connector->display_info;
5299         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5300         struct hdmi_vendor_infoframe hv_frame;
5301         struct hdmi_avi_infoframe avi_frame;
5302
5303         memset(&hv_frame, 0, sizeof(hv_frame));
5304         memset(&avi_frame, 0, sizeof(avi_frame));
5305
5306         timing_out->h_border_left = 0;
5307         timing_out->h_border_right = 0;
5308         timing_out->v_border_top = 0;
5309         timing_out->v_border_bottom = 0;
5310         /* TODO: un-hardcode */
5311         if (drm_mode_is_420_only(info, mode_in)
5312                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5313                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5314         else if (drm_mode_is_420_also(info, mode_in)
5315                         && aconnector->force_yuv420_output)
5316                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5317         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5318                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5319                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5320         else
5321                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5322
5323         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5324         timing_out->display_color_depth = convert_color_depth_from_display_info(
5325                 connector,
5326                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5327                 requested_bpc);
5328         timing_out->scan_type = SCANNING_TYPE_NODATA;
5329         timing_out->hdmi_vic = 0;
5330
5331         if(old_stream) {
5332                 timing_out->vic = old_stream->timing.vic;
5333                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5334                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5335         } else {
5336                 timing_out->vic = drm_match_cea_mode(mode_in);
5337                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5338                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5339                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5340                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5341         }
5342
5343         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5344                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5345                 timing_out->vic = avi_frame.video_code;
5346                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5347                 timing_out->hdmi_vic = hv_frame.vic;
5348         }
5349
5350         if (is_freesync_video_mode(mode_in, aconnector)) {
5351                 timing_out->h_addressable = mode_in->hdisplay;
5352                 timing_out->h_total = mode_in->htotal;
5353                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5354                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5355                 timing_out->v_total = mode_in->vtotal;
5356                 timing_out->v_addressable = mode_in->vdisplay;
5357                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5358                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5359                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5360         } else {
5361                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5362                 timing_out->h_total = mode_in->crtc_htotal;
5363                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5364                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5365                 timing_out->v_total = mode_in->crtc_vtotal;
5366                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5367                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5368                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5369                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5370         }
5371
5372         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5373
5374         stream->output_color_space = get_output_color_space(timing_out);
5375
5376         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5377         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5378         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5379                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5380                     drm_mode_is_420_also(info, mode_in) &&
5381                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5382                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5383                         adjust_colour_depth_from_display_info(timing_out, info);
5384                 }
5385         }
5386 }
5387
5388 static void fill_audio_info(struct audio_info *audio_info,
5389                             const struct drm_connector *drm_connector,
5390                             const struct dc_sink *dc_sink)
5391 {
5392         int i = 0;
5393         int cea_revision = 0;
5394         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5395
5396         audio_info->manufacture_id = edid_caps->manufacturer_id;
5397         audio_info->product_id = edid_caps->product_id;
5398
5399         cea_revision = drm_connector->display_info.cea_rev;
5400
5401         strscpy(audio_info->display_name,
5402                 edid_caps->display_name,
5403                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5404
5405         if (cea_revision >= 3) {
5406                 audio_info->mode_count = edid_caps->audio_mode_count;
5407
5408                 for (i = 0; i < audio_info->mode_count; ++i) {
5409                         audio_info->modes[i].format_code =
5410                                         (enum audio_format_code)
5411                                         (edid_caps->audio_modes[i].format_code);
5412                         audio_info->modes[i].channel_count =
5413                                         edid_caps->audio_modes[i].channel_count;
5414                         audio_info->modes[i].sample_rates.all =
5415                                         edid_caps->audio_modes[i].sample_rate;
5416                         audio_info->modes[i].sample_size =
5417                                         edid_caps->audio_modes[i].sample_size;
5418                 }
5419         }
5420
5421         audio_info->flags.all = edid_caps->speaker_flags;
5422
5423         /* TODO: We only check for the progressive mode, check for interlace mode too */
5424         if (drm_connector->latency_present[0]) {
5425                 audio_info->video_latency = drm_connector->video_latency[0];
5426                 audio_info->audio_latency = drm_connector->audio_latency[0];
5427         }
5428
5429         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5430
5431 }
5432
5433 static void
5434 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5435                                       struct drm_display_mode *dst_mode)
5436 {
5437         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5438         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5439         dst_mode->crtc_clock = src_mode->crtc_clock;
5440         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5441         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5442         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5443         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5444         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5445         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5446         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5447         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5448         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5449         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5450         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5451 }
5452
5453 static void
5454 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5455                                         const struct drm_display_mode *native_mode,
5456                                         bool scale_enabled)
5457 {
5458         if (scale_enabled) {
5459                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5460         } else if (native_mode->clock == drm_mode->clock &&
5461                         native_mode->htotal == drm_mode->htotal &&
5462                         native_mode->vtotal == drm_mode->vtotal) {
5463                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5464         } else {
5465                 /* no scaling nor amdgpu inserted, no need to patch */
5466         }
5467 }
5468
5469 static struct dc_sink *
5470 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5471 {
5472         struct dc_sink_init_data sink_init_data = { 0 };
5473         struct dc_sink *sink = NULL;
5474         sink_init_data.link = aconnector->dc_link;
5475         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5476
5477         sink = dc_sink_create(&sink_init_data);
5478         if (!sink) {
5479                 DRM_ERROR("Failed to create sink!\n");
5480                 return NULL;
5481         }
5482         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5483
5484         return sink;
5485 }
5486
5487 static void set_multisync_trigger_params(
5488                 struct dc_stream_state *stream)
5489 {
5490         struct dc_stream_state *master = NULL;
5491
5492         if (stream->triggered_crtc_reset.enabled) {
5493                 master = stream->triggered_crtc_reset.event_source;
5494                 stream->triggered_crtc_reset.event =
5495                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5496                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5497                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5498         }
5499 }
5500
5501 static void set_master_stream(struct dc_stream_state *stream_set[],
5502                               int stream_count)
5503 {
5504         int j, highest_rfr = 0, master_stream = 0;
5505
5506         for (j = 0;  j < stream_count; j++) {
5507                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5508                         int refresh_rate = 0;
5509
5510                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5511                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5512                         if (refresh_rate > highest_rfr) {
5513                                 highest_rfr = refresh_rate;
5514                                 master_stream = j;
5515                         }
5516                 }
5517         }
5518         for (j = 0;  j < stream_count; j++) {
5519                 if (stream_set[j])
5520                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5521         }
5522 }
5523
5524 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5525 {
5526         int i = 0;
5527         struct dc_stream_state *stream;
5528
5529         if (context->stream_count < 2)
5530                 return;
5531         for (i = 0; i < context->stream_count ; i++) {
5532                 if (!context->streams[i])
5533                         continue;
5534                 /*
5535                  * TODO: add a function to read AMD VSDB bits and set
5536                  * crtc_sync_master.multi_sync_enabled flag
5537                  * For now it's set to false
5538                  */
5539         }
5540
5541         set_master_stream(context->streams, context->stream_count);
5542
5543         for (i = 0; i < context->stream_count ; i++) {
5544                 stream = context->streams[i];
5545
5546                 if (!stream)
5547                         continue;
5548
5549                 set_multisync_trigger_params(stream);
5550         }
5551 }
5552
5553 #if defined(CONFIG_DRM_AMD_DC_DCN)
5554 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5555                                                         struct dc_sink *sink, struct dc_stream_state *stream,
5556                                                         struct dsc_dec_dpcd_caps *dsc_caps)
5557 {
5558         stream->timing.flags.DSC = 0;
5559
5560         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5561                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5562                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5563                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5564                                       dsc_caps);
5565         }
5566 }
5567
5568 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5569                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
5570                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
5571 {
5572         struct drm_connector *drm_connector = &aconnector->base;
5573         uint32_t link_bandwidth_kbps;
5574
5575         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5576                                                         dc_link_get_link_cap(aconnector->dc_link));
5577         /* Set DSC policy according to dsc_clock_en */
5578         dc_dsc_policy_set_enable_dsc_when_not_needed(
5579                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5580
5581         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5582
5583                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5584                                                 dsc_caps,
5585                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5586                                                 0,
5587                                                 link_bandwidth_kbps,
5588                                                 &stream->timing,
5589                                                 &stream->timing.dsc_cfg)) {
5590                         stream->timing.flags.DSC = 1;
5591                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5592                 }
5593         }
5594
5595         /* Overwrite the stream flag if DSC is enabled through debugfs */
5596         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5597                 stream->timing.flags.DSC = 1;
5598
5599         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5600                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5601
5602         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5603                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5604
5605         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5606                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5607 }
5608 #endif
5609
5610 static struct drm_display_mode *
5611 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5612                           bool use_probed_modes)
5613 {
5614         struct drm_display_mode *m, *m_pref = NULL;
5615         u16 current_refresh, highest_refresh;
5616         struct list_head *list_head = use_probed_modes ?
5617                                                     &aconnector->base.probed_modes :
5618                                                     &aconnector->base.modes;
5619
5620         if (aconnector->freesync_vid_base.clock != 0)
5621                 return &aconnector->freesync_vid_base;
5622
5623         /* Find the preferred mode */
5624         list_for_each_entry (m, list_head, head) {
5625                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5626                         m_pref = m;
5627                         break;
5628                 }
5629         }
5630
5631         if (!m_pref) {
5632                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5633                 m_pref = list_first_entry_or_null(
5634                         &aconnector->base.modes, struct drm_display_mode, head);
5635                 if (!m_pref) {
5636                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5637                         return NULL;
5638                 }
5639         }
5640
5641         highest_refresh = drm_mode_vrefresh(m_pref);
5642
5643         /*
5644          * Find the mode with highest refresh rate with same resolution.
5645          * For some monitors, preferred mode is not the mode with highest
5646          * supported refresh rate.
5647          */
5648         list_for_each_entry (m, list_head, head) {
5649                 current_refresh  = drm_mode_vrefresh(m);
5650
5651                 if (m->hdisplay == m_pref->hdisplay &&
5652                     m->vdisplay == m_pref->vdisplay &&
5653                     highest_refresh < current_refresh) {
5654                         highest_refresh = current_refresh;
5655                         m_pref = m;
5656                 }
5657         }
5658
5659         aconnector->freesync_vid_base = *m_pref;
5660         return m_pref;
5661 }
5662
5663 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5664                                    struct amdgpu_dm_connector *aconnector)
5665 {
5666         struct drm_display_mode *high_mode;
5667         int timing_diff;
5668
5669         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5670         if (!high_mode || !mode)
5671                 return false;
5672
5673         timing_diff = high_mode->vtotal - mode->vtotal;
5674
5675         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5676             high_mode->hdisplay != mode->hdisplay ||
5677             high_mode->vdisplay != mode->vdisplay ||
5678             high_mode->hsync_start != mode->hsync_start ||
5679             high_mode->hsync_end != mode->hsync_end ||
5680             high_mode->htotal != mode->htotal ||
5681             high_mode->hskew != mode->hskew ||
5682             high_mode->vscan != mode->vscan ||
5683             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5684             high_mode->vsync_end - mode->vsync_end != timing_diff)
5685                 return false;
5686         else
5687                 return true;
5688 }
5689
5690 static struct dc_stream_state *
5691 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5692                        const struct drm_display_mode *drm_mode,
5693                        const struct dm_connector_state *dm_state,
5694                        const struct dc_stream_state *old_stream,
5695                        int requested_bpc)
5696 {
5697         struct drm_display_mode *preferred_mode = NULL;
5698         struct drm_connector *drm_connector;
5699         const struct drm_connector_state *con_state =
5700                 dm_state ? &dm_state->base : NULL;
5701         struct dc_stream_state *stream = NULL;
5702         struct drm_display_mode mode = *drm_mode;
5703         struct drm_display_mode saved_mode;
5704         struct drm_display_mode *freesync_mode = NULL;
5705         bool native_mode_found = false;
5706         bool recalculate_timing = false;
5707         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5708         int mode_refresh;
5709         int preferred_refresh = 0;
5710 #if defined(CONFIG_DRM_AMD_DC_DCN)
5711         struct dsc_dec_dpcd_caps dsc_caps;
5712 #endif
5713         struct dc_sink *sink = NULL;
5714
5715         memset(&saved_mode, 0, sizeof(saved_mode));
5716
5717         if (aconnector == NULL) {
5718                 DRM_ERROR("aconnector is NULL!\n");
5719                 return stream;
5720         }
5721
5722         drm_connector = &aconnector->base;
5723
5724         if (!aconnector->dc_sink) {
5725                 sink = create_fake_sink(aconnector);
5726                 if (!sink)
5727                         return stream;
5728         } else {
5729                 sink = aconnector->dc_sink;
5730                 dc_sink_retain(sink);
5731         }
5732
5733         stream = dc_create_stream_for_sink(sink);
5734
5735         if (stream == NULL) {
5736                 DRM_ERROR("Failed to create stream for sink!\n");
5737                 goto finish;
5738         }
5739
5740         stream->dm_stream_context = aconnector;
5741
5742         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5743                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5744
5745         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5746                 /* Search for preferred mode */
5747                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5748                         native_mode_found = true;
5749                         break;
5750                 }
5751         }
5752         if (!native_mode_found)
5753                 preferred_mode = list_first_entry_or_null(
5754                                 &aconnector->base.modes,
5755                                 struct drm_display_mode,
5756                                 head);
5757
5758         mode_refresh = drm_mode_vrefresh(&mode);
5759
5760         if (preferred_mode == NULL) {
5761                 /*
5762                  * This may not be an error, the use case is when we have no
5763                  * usermode calls to reset and set mode upon hotplug. In this
5764                  * case, we call set mode ourselves to restore the previous mode
5765                  * and the modelist may not be filled in in time.
5766                  */
5767                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5768         } else {
5769                 recalculate_timing = amdgpu_freesync_vid_mode &&
5770                                  is_freesync_video_mode(&mode, aconnector);
5771                 if (recalculate_timing) {
5772                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5773                         saved_mode = mode;
5774                         mode = *freesync_mode;
5775                 } else {
5776                         decide_crtc_timing_for_drm_display_mode(
5777                                 &mode, preferred_mode, scale);
5778
5779                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
5780                 }
5781         }
5782
5783         if (recalculate_timing)
5784                 drm_mode_set_crtcinfo(&saved_mode, 0);
5785         else if (!dm_state)
5786                 drm_mode_set_crtcinfo(&mode, 0);
5787
5788        /*
5789         * If scaling is enabled and refresh rate didn't change
5790         * we copy the vic and polarities of the old timings
5791         */
5792         if (!scale || mode_refresh != preferred_refresh)
5793                 fill_stream_properties_from_drm_display_mode(
5794                         stream, &mode, &aconnector->base, con_state, NULL,
5795                         requested_bpc);
5796         else
5797                 fill_stream_properties_from_drm_display_mode(
5798                         stream, &mode, &aconnector->base, con_state, old_stream,
5799                         requested_bpc);
5800
5801 #if defined(CONFIG_DRM_AMD_DC_DCN)
5802         /* SST DSC determination policy */
5803         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5804         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5805                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5806 #endif
5807
5808         update_stream_scaling_settings(&mode, dm_state, stream);
5809
5810         fill_audio_info(
5811                 &stream->audio_info,
5812                 drm_connector,
5813                 sink);
5814
5815         update_stream_signal(stream, sink);
5816
5817         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5818                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5819
5820         if (stream->link->psr_settings.psr_feature_enabled) {
5821                 //
5822                 // should decide stream support vsc sdp colorimetry capability
5823                 // before building vsc info packet
5824                 //
5825                 stream->use_vsc_sdp_for_colorimetry = false;
5826                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5827                         stream->use_vsc_sdp_for_colorimetry =
5828                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5829                 } else {
5830                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5831                                 stream->use_vsc_sdp_for_colorimetry = true;
5832                 }
5833                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5834         }
5835 finish:
5836         dc_sink_release(sink);
5837
5838         return stream;
5839 }
5840
5841 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5842 {
5843         drm_crtc_cleanup(crtc);
5844         kfree(crtc);
5845 }
5846
5847 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5848                                   struct drm_crtc_state *state)
5849 {
5850         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5851
5852         /* TODO Destroy dc_stream objects are stream object is flattened */
5853         if (cur->stream)
5854                 dc_stream_release(cur->stream);
5855
5856
5857         __drm_atomic_helper_crtc_destroy_state(state);
5858
5859
5860         kfree(state);
5861 }
5862
5863 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5864 {
5865         struct dm_crtc_state *state;
5866
5867         if (crtc->state)
5868                 dm_crtc_destroy_state(crtc, crtc->state);
5869
5870         state = kzalloc(sizeof(*state), GFP_KERNEL);
5871         if (WARN_ON(!state))
5872                 return;
5873
5874         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5875 }
5876
5877 static struct drm_crtc_state *
5878 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5879 {
5880         struct dm_crtc_state *state, *cur;
5881
5882         cur = to_dm_crtc_state(crtc->state);
5883
5884         if (WARN_ON(!crtc->state))
5885                 return NULL;
5886
5887         state = kzalloc(sizeof(*state), GFP_KERNEL);
5888         if (!state)
5889                 return NULL;
5890
5891         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5892
5893         if (cur->stream) {
5894                 state->stream = cur->stream;
5895                 dc_stream_retain(state->stream);
5896         }
5897
5898         state->active_planes = cur->active_planes;
5899         state->vrr_infopacket = cur->vrr_infopacket;
5900         state->abm_level = cur->abm_level;
5901         state->vrr_supported = cur->vrr_supported;
5902         state->freesync_config = cur->freesync_config;
5903         state->cm_has_degamma = cur->cm_has_degamma;
5904         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5905         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5906
5907         return &state->base;
5908 }
5909
5910 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5911 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5912 {
5913         crtc_debugfs_init(crtc);
5914
5915         return 0;
5916 }
5917 #endif
5918
5919 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5920 {
5921         enum dc_irq_source irq_source;
5922         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5923         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5924         int rc;
5925
5926         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5927
5928         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5929
5930         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5931                       acrtc->crtc_id, enable ? "en" : "dis", rc);
5932         return rc;
5933 }
5934
5935 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5936 {
5937         enum dc_irq_source irq_source;
5938         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5939         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5940         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5941 #if defined(CONFIG_DRM_AMD_DC_DCN)
5942         struct amdgpu_display_manager *dm = &adev->dm;
5943         unsigned long flags;
5944 #endif
5945         int rc = 0;
5946
5947         if (enable) {
5948                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5949                 if (amdgpu_dm_vrr_active(acrtc_state))
5950                         rc = dm_set_vupdate_irq(crtc, true);
5951         } else {
5952                 /* vblank irq off -> vupdate irq off */
5953                 rc = dm_set_vupdate_irq(crtc, false);
5954         }
5955
5956         if (rc)
5957                 return rc;
5958
5959         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5960
5961         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5962                 return -EBUSY;
5963
5964         if (amdgpu_in_reset(adev))
5965                 return 0;
5966
5967 #if defined(CONFIG_DRM_AMD_DC_DCN)
5968         spin_lock_irqsave(&dm->vblank_lock, flags);
5969         dm->vblank_workqueue->dm = dm;
5970         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5971         dm->vblank_workqueue->enable = enable;
5972         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5973         schedule_work(&dm->vblank_workqueue->mall_work);
5974 #endif
5975
5976         return 0;
5977 }
5978
5979 static int dm_enable_vblank(struct drm_crtc *crtc)
5980 {
5981         return dm_set_vblank(crtc, true);
5982 }
5983
5984 static void dm_disable_vblank(struct drm_crtc *crtc)
5985 {
5986         dm_set_vblank(crtc, false);
5987 }
5988
5989 /* Implemented only the options currently availible for the driver */
5990 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5991         .reset = dm_crtc_reset_state,
5992         .destroy = amdgpu_dm_crtc_destroy,
5993         .set_config = drm_atomic_helper_set_config,
5994         .page_flip = drm_atomic_helper_page_flip,
5995         .atomic_duplicate_state = dm_crtc_duplicate_state,
5996         .atomic_destroy_state = dm_crtc_destroy_state,
5997         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5998         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5999         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6000         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6001         .enable_vblank = dm_enable_vblank,
6002         .disable_vblank = dm_disable_vblank,
6003         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6004 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6005         .late_register = amdgpu_dm_crtc_late_register,
6006 #endif
6007 };
6008
6009 static enum drm_connector_status
6010 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6011 {
6012         bool connected;
6013         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6014
6015         /*
6016          * Notes:
6017          * 1. This interface is NOT called in context of HPD irq.
6018          * 2. This interface *is called* in context of user-mode ioctl. Which
6019          * makes it a bad place for *any* MST-related activity.
6020          */
6021
6022         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6023             !aconnector->fake_enable)
6024                 connected = (aconnector->dc_sink != NULL);
6025         else
6026                 connected = (aconnector->base.force == DRM_FORCE_ON);
6027
6028         update_subconnector_property(aconnector);
6029
6030         return (connected ? connector_status_connected :
6031                         connector_status_disconnected);
6032 }
6033
6034 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6035                                             struct drm_connector_state *connector_state,
6036                                             struct drm_property *property,
6037                                             uint64_t val)
6038 {
6039         struct drm_device *dev = connector->dev;
6040         struct amdgpu_device *adev = drm_to_adev(dev);
6041         struct dm_connector_state *dm_old_state =
6042                 to_dm_connector_state(connector->state);
6043         struct dm_connector_state *dm_new_state =
6044                 to_dm_connector_state(connector_state);
6045
6046         int ret = -EINVAL;
6047
6048         if (property == dev->mode_config.scaling_mode_property) {
6049                 enum amdgpu_rmx_type rmx_type;
6050
6051                 switch (val) {
6052                 case DRM_MODE_SCALE_CENTER:
6053                         rmx_type = RMX_CENTER;
6054                         break;
6055                 case DRM_MODE_SCALE_ASPECT:
6056                         rmx_type = RMX_ASPECT;
6057                         break;
6058                 case DRM_MODE_SCALE_FULLSCREEN:
6059                         rmx_type = RMX_FULL;
6060                         break;
6061                 case DRM_MODE_SCALE_NONE:
6062                 default:
6063                         rmx_type = RMX_OFF;
6064                         break;
6065                 }
6066
6067                 if (dm_old_state->scaling == rmx_type)
6068                         return 0;
6069
6070                 dm_new_state->scaling = rmx_type;
6071                 ret = 0;
6072         } else if (property == adev->mode_info.underscan_hborder_property) {
6073                 dm_new_state->underscan_hborder = val;
6074                 ret = 0;
6075         } else if (property == adev->mode_info.underscan_vborder_property) {
6076                 dm_new_state->underscan_vborder = val;
6077                 ret = 0;
6078         } else if (property == adev->mode_info.underscan_property) {
6079                 dm_new_state->underscan_enable = val;
6080                 ret = 0;
6081         } else if (property == adev->mode_info.abm_level_property) {
6082                 dm_new_state->abm_level = val;
6083                 ret = 0;
6084         }
6085
6086         return ret;
6087 }
6088
6089 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6090                                             const struct drm_connector_state *state,
6091                                             struct drm_property *property,
6092                                             uint64_t *val)
6093 {
6094         struct drm_device *dev = connector->dev;
6095         struct amdgpu_device *adev = drm_to_adev(dev);
6096         struct dm_connector_state *dm_state =
6097                 to_dm_connector_state(state);
6098         int ret = -EINVAL;
6099
6100         if (property == dev->mode_config.scaling_mode_property) {
6101                 switch (dm_state->scaling) {
6102                 case RMX_CENTER:
6103                         *val = DRM_MODE_SCALE_CENTER;
6104                         break;
6105                 case RMX_ASPECT:
6106                         *val = DRM_MODE_SCALE_ASPECT;
6107                         break;
6108                 case RMX_FULL:
6109                         *val = DRM_MODE_SCALE_FULLSCREEN;
6110                         break;
6111                 case RMX_OFF:
6112                 default:
6113                         *val = DRM_MODE_SCALE_NONE;
6114                         break;
6115                 }
6116                 ret = 0;
6117         } else if (property == adev->mode_info.underscan_hborder_property) {
6118                 *val = dm_state->underscan_hborder;
6119                 ret = 0;
6120         } else if (property == adev->mode_info.underscan_vborder_property) {
6121                 *val = dm_state->underscan_vborder;
6122                 ret = 0;
6123         } else if (property == adev->mode_info.underscan_property) {
6124                 *val = dm_state->underscan_enable;
6125                 ret = 0;
6126         } else if (property == adev->mode_info.abm_level_property) {
6127                 *val = dm_state->abm_level;
6128                 ret = 0;
6129         }
6130
6131         return ret;
6132 }
6133
6134 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6135 {
6136         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6137
6138         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6139 }
6140
6141 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6142 {
6143         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6144         const struct dc_link *link = aconnector->dc_link;
6145         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6146         struct amdgpu_display_manager *dm = &adev->dm;
6147
6148         /*
6149          * Call only if mst_mgr was iniitalized before since it's not done
6150          * for all connector types.
6151          */
6152         if (aconnector->mst_mgr.dev)
6153                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6154
6155 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6156         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6157
6158         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6159             link->type != dc_connection_none &&
6160             dm->backlight_dev) {
6161                 backlight_device_unregister(dm->backlight_dev);
6162                 dm->backlight_dev = NULL;
6163         }
6164 #endif
6165
6166         if (aconnector->dc_em_sink)
6167                 dc_sink_release(aconnector->dc_em_sink);
6168         aconnector->dc_em_sink = NULL;
6169         if (aconnector->dc_sink)
6170                 dc_sink_release(aconnector->dc_sink);
6171         aconnector->dc_sink = NULL;
6172
6173         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6174         drm_connector_unregister(connector);
6175         drm_connector_cleanup(connector);
6176         if (aconnector->i2c) {
6177                 i2c_del_adapter(&aconnector->i2c->base);
6178                 kfree(aconnector->i2c);
6179         }
6180         kfree(aconnector->dm_dp_aux.aux.name);
6181
6182         kfree(connector);
6183 }
6184
6185 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6186 {
6187         struct dm_connector_state *state =
6188                 to_dm_connector_state(connector->state);
6189
6190         if (connector->state)
6191                 __drm_atomic_helper_connector_destroy_state(connector->state);
6192
6193         kfree(state);
6194
6195         state = kzalloc(sizeof(*state), GFP_KERNEL);
6196
6197         if (state) {
6198                 state->scaling = RMX_OFF;
6199                 state->underscan_enable = false;
6200                 state->underscan_hborder = 0;
6201                 state->underscan_vborder = 0;
6202                 state->base.max_requested_bpc = 8;
6203                 state->vcpi_slots = 0;
6204                 state->pbn = 0;
6205                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6206                         state->abm_level = amdgpu_dm_abm_level;
6207
6208                 __drm_atomic_helper_connector_reset(connector, &state->base);
6209         }
6210 }
6211
6212 struct drm_connector_state *
6213 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6214 {
6215         struct dm_connector_state *state =
6216                 to_dm_connector_state(connector->state);
6217
6218         struct dm_connector_state *new_state =
6219                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6220
6221         if (!new_state)
6222                 return NULL;
6223
6224         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6225
6226         new_state->freesync_capable = state->freesync_capable;
6227         new_state->abm_level = state->abm_level;
6228         new_state->scaling = state->scaling;
6229         new_state->underscan_enable = state->underscan_enable;
6230         new_state->underscan_hborder = state->underscan_hborder;
6231         new_state->underscan_vborder = state->underscan_vborder;
6232         new_state->vcpi_slots = state->vcpi_slots;
6233         new_state->pbn = state->pbn;
6234         return &new_state->base;
6235 }
6236
6237 static int
6238 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6239 {
6240         struct amdgpu_dm_connector *amdgpu_dm_connector =
6241                 to_amdgpu_dm_connector(connector);
6242         int r;
6243
6244         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6245             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6246                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6247                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6248                 if (r)
6249                         return r;
6250         }
6251
6252 #if defined(CONFIG_DEBUG_FS)
6253         connector_debugfs_init(amdgpu_dm_connector);
6254 #endif
6255
6256         return 0;
6257 }
6258
6259 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6260         .reset = amdgpu_dm_connector_funcs_reset,
6261         .detect = amdgpu_dm_connector_detect,
6262         .fill_modes = drm_helper_probe_single_connector_modes,
6263         .destroy = amdgpu_dm_connector_destroy,
6264         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6265         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6266         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6267         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6268         .late_register = amdgpu_dm_connector_late_register,
6269         .early_unregister = amdgpu_dm_connector_unregister
6270 };
6271
6272 static int get_modes(struct drm_connector *connector)
6273 {
6274         return amdgpu_dm_connector_get_modes(connector);
6275 }
6276
6277 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6278 {
6279         struct dc_sink_init_data init_params = {
6280                         .link = aconnector->dc_link,
6281                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6282         };
6283         struct edid *edid;
6284
6285         if (!aconnector->base.edid_blob_ptr) {
6286                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6287                                 aconnector->base.name);
6288
6289                 aconnector->base.force = DRM_FORCE_OFF;
6290                 aconnector->base.override_edid = false;
6291                 return;
6292         }
6293
6294         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6295
6296         aconnector->edid = edid;
6297
6298         aconnector->dc_em_sink = dc_link_add_remote_sink(
6299                 aconnector->dc_link,
6300                 (uint8_t *)edid,
6301                 (edid->extensions + 1) * EDID_LENGTH,
6302                 &init_params);
6303
6304         if (aconnector->base.force == DRM_FORCE_ON) {
6305                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6306                 aconnector->dc_link->local_sink :
6307                 aconnector->dc_em_sink;
6308                 dc_sink_retain(aconnector->dc_sink);
6309         }
6310 }
6311
6312 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6313 {
6314         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6315
6316         /*
6317          * In case of headless boot with force on for DP managed connector
6318          * Those settings have to be != 0 to get initial modeset
6319          */
6320         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6321                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6322                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6323         }
6324
6325
6326         aconnector->base.override_edid = true;
6327         create_eml_sink(aconnector);
6328 }
6329
6330 static struct dc_stream_state *
6331 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6332                                 const struct drm_display_mode *drm_mode,
6333                                 const struct dm_connector_state *dm_state,
6334                                 const struct dc_stream_state *old_stream)
6335 {
6336         struct drm_connector *connector = &aconnector->base;
6337         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6338         struct dc_stream_state *stream;
6339         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6340         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6341         enum dc_status dc_result = DC_OK;
6342
6343         do {
6344                 stream = create_stream_for_sink(aconnector, drm_mode,
6345                                                 dm_state, old_stream,
6346                                                 requested_bpc);
6347                 if (stream == NULL) {
6348                         DRM_ERROR("Failed to create stream for sink!\n");
6349                         break;
6350                 }
6351
6352                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6353
6354                 if (dc_result != DC_OK) {
6355                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6356                                       drm_mode->hdisplay,
6357                                       drm_mode->vdisplay,
6358                                       drm_mode->clock,
6359                                       dc_result,
6360                                       dc_status_to_str(dc_result));
6361
6362                         dc_stream_release(stream);
6363                         stream = NULL;
6364                         requested_bpc -= 2; /* lower bpc to retry validation */
6365                 }
6366
6367         } while (stream == NULL && requested_bpc >= 6);
6368
6369         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6370                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6371
6372                 aconnector->force_yuv420_output = true;
6373                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6374                                                 dm_state, old_stream);
6375                 aconnector->force_yuv420_output = false;
6376         }
6377
6378         return stream;
6379 }
6380
6381 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6382                                    struct drm_display_mode *mode)
6383 {
6384         int result = MODE_ERROR;
6385         struct dc_sink *dc_sink;
6386         /* TODO: Unhardcode stream count */
6387         struct dc_stream_state *stream;
6388         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6389
6390         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6391                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6392                 return result;
6393
6394         /*
6395          * Only run this the first time mode_valid is called to initilialize
6396          * EDID mgmt
6397          */
6398         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6399                 !aconnector->dc_em_sink)
6400                 handle_edid_mgmt(aconnector);
6401
6402         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6403
6404         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6405                                 aconnector->base.force != DRM_FORCE_ON) {
6406                 DRM_ERROR("dc_sink is NULL!\n");
6407                 goto fail;
6408         }
6409
6410         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6411         if (stream) {
6412                 dc_stream_release(stream);
6413                 result = MODE_OK;
6414         }
6415
6416 fail:
6417         /* TODO: error handling*/
6418         return result;
6419 }
6420
6421 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6422                                 struct dc_info_packet *out)
6423 {
6424         struct hdmi_drm_infoframe frame;
6425         unsigned char buf[30]; /* 26 + 4 */
6426         ssize_t len;
6427         int ret, i;
6428
6429         memset(out, 0, sizeof(*out));
6430
6431         if (!state->hdr_output_metadata)
6432                 return 0;
6433
6434         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6435         if (ret)
6436                 return ret;
6437
6438         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6439         if (len < 0)
6440                 return (int)len;
6441
6442         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6443         if (len != 30)
6444                 return -EINVAL;
6445
6446         /* Prepare the infopacket for DC. */
6447         switch (state->connector->connector_type) {
6448         case DRM_MODE_CONNECTOR_HDMIA:
6449                 out->hb0 = 0x87; /* type */
6450                 out->hb1 = 0x01; /* version */
6451                 out->hb2 = 0x1A; /* length */
6452                 out->sb[0] = buf[3]; /* checksum */
6453                 i = 1;
6454                 break;
6455
6456         case DRM_MODE_CONNECTOR_DisplayPort:
6457         case DRM_MODE_CONNECTOR_eDP:
6458                 out->hb0 = 0x00; /* sdp id, zero */
6459                 out->hb1 = 0x87; /* type */
6460                 out->hb2 = 0x1D; /* payload len - 1 */
6461                 out->hb3 = (0x13 << 2); /* sdp version */
6462                 out->sb[0] = 0x01; /* version */
6463                 out->sb[1] = 0x1A; /* length */
6464                 i = 2;
6465                 break;
6466
6467         default:
6468                 return -EINVAL;
6469         }
6470
6471         memcpy(&out->sb[i], &buf[4], 26);
6472         out->valid = true;
6473
6474         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6475                        sizeof(out->sb), false);
6476
6477         return 0;
6478 }
6479
6480 static int
6481 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6482                                  struct drm_atomic_state *state)
6483 {
6484         struct drm_connector_state *new_con_state =
6485                 drm_atomic_get_new_connector_state(state, conn);
6486         struct drm_connector_state *old_con_state =
6487                 drm_atomic_get_old_connector_state(state, conn);
6488         struct drm_crtc *crtc = new_con_state->crtc;
6489         struct drm_crtc_state *new_crtc_state;
6490         int ret;
6491
6492         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6493
6494         if (!crtc)
6495                 return 0;
6496
6497         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6498                 struct dc_info_packet hdr_infopacket;
6499
6500                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6501                 if (ret)
6502                         return ret;
6503
6504                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6505                 if (IS_ERR(new_crtc_state))
6506                         return PTR_ERR(new_crtc_state);
6507
6508                 /*
6509                  * DC considers the stream backends changed if the
6510                  * static metadata changes. Forcing the modeset also
6511                  * gives a simple way for userspace to switch from
6512                  * 8bpc to 10bpc when setting the metadata to enter
6513                  * or exit HDR.
6514                  *
6515                  * Changing the static metadata after it's been
6516                  * set is permissible, however. So only force a
6517                  * modeset if we're entering or exiting HDR.
6518                  */
6519                 new_crtc_state->mode_changed =
6520                         !old_con_state->hdr_output_metadata ||
6521                         !new_con_state->hdr_output_metadata;
6522         }
6523
6524         return 0;
6525 }
6526
6527 static const struct drm_connector_helper_funcs
6528 amdgpu_dm_connector_helper_funcs = {
6529         /*
6530          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6531          * modes will be filtered by drm_mode_validate_size(), and those modes
6532          * are missing after user start lightdm. So we need to renew modes list.
6533          * in get_modes call back, not just return the modes count
6534          */
6535         .get_modes = get_modes,
6536         .mode_valid = amdgpu_dm_connector_mode_valid,
6537         .atomic_check = amdgpu_dm_connector_atomic_check,
6538 };
6539
6540 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6541 {
6542 }
6543
6544 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6545 {
6546         struct drm_atomic_state *state = new_crtc_state->state;
6547         struct drm_plane *plane;
6548         int num_active = 0;
6549
6550         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6551                 struct drm_plane_state *new_plane_state;
6552
6553                 /* Cursor planes are "fake". */
6554                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6555                         continue;
6556
6557                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6558
6559                 if (!new_plane_state) {
6560                         /*
6561                          * The plane is enable on the CRTC and hasn't changed
6562                          * state. This means that it previously passed
6563                          * validation and is therefore enabled.
6564                          */
6565                         num_active += 1;
6566                         continue;
6567                 }
6568
6569                 /* We need a framebuffer to be considered enabled. */
6570                 num_active += (new_plane_state->fb != NULL);
6571         }
6572
6573         return num_active;
6574 }
6575
6576 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6577                                          struct drm_crtc_state *new_crtc_state)
6578 {
6579         struct dm_crtc_state *dm_new_crtc_state =
6580                 to_dm_crtc_state(new_crtc_state);
6581
6582         dm_new_crtc_state->active_planes = 0;
6583
6584         if (!dm_new_crtc_state->stream)
6585                 return;
6586
6587         dm_new_crtc_state->active_planes =
6588                 count_crtc_active_planes(new_crtc_state);
6589 }
6590
6591 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6592                                        struct drm_atomic_state *state)
6593 {
6594         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6595                                                                           crtc);
6596         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6597         struct dc *dc = adev->dm.dc;
6598         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6599         int ret = -EINVAL;
6600
6601         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6602
6603         dm_update_crtc_active_planes(crtc, crtc_state);
6604
6605         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6606                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6607                 return ret;
6608         }
6609
6610         /*
6611          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6612          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6613          * planes are disabled, which is not supported by the hardware. And there is legacy
6614          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6615          */
6616         if (crtc_state->enable &&
6617             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6618                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6619                 return -EINVAL;
6620         }
6621
6622         /* In some use cases, like reset, no stream is attached */
6623         if (!dm_crtc_state->stream)
6624                 return 0;
6625
6626         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6627                 return 0;
6628
6629         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6630         return ret;
6631 }
6632
6633 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6634                                       const struct drm_display_mode *mode,
6635                                       struct drm_display_mode *adjusted_mode)
6636 {
6637         return true;
6638 }
6639
6640 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6641         .disable = dm_crtc_helper_disable,
6642         .atomic_check = dm_crtc_helper_atomic_check,
6643         .mode_fixup = dm_crtc_helper_mode_fixup,
6644         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6645 };
6646
6647 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6648 {
6649
6650 }
6651
6652 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6653 {
6654         switch (display_color_depth) {
6655                 case COLOR_DEPTH_666:
6656                         return 6;
6657                 case COLOR_DEPTH_888:
6658                         return 8;
6659                 case COLOR_DEPTH_101010:
6660                         return 10;
6661                 case COLOR_DEPTH_121212:
6662                         return 12;
6663                 case COLOR_DEPTH_141414:
6664                         return 14;
6665                 case COLOR_DEPTH_161616:
6666                         return 16;
6667                 default:
6668                         break;
6669                 }
6670         return 0;
6671 }
6672
6673 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6674                                           struct drm_crtc_state *crtc_state,
6675                                           struct drm_connector_state *conn_state)
6676 {
6677         struct drm_atomic_state *state = crtc_state->state;
6678         struct drm_connector *connector = conn_state->connector;
6679         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6680         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6681         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6682         struct drm_dp_mst_topology_mgr *mst_mgr;
6683         struct drm_dp_mst_port *mst_port;
6684         enum dc_color_depth color_depth;
6685         int clock, bpp = 0;
6686         bool is_y420 = false;
6687
6688         if (!aconnector->port || !aconnector->dc_sink)
6689                 return 0;
6690
6691         mst_port = aconnector->port;
6692         mst_mgr = &aconnector->mst_port->mst_mgr;
6693
6694         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6695                 return 0;
6696
6697         if (!state->duplicated) {
6698                 int max_bpc = conn_state->max_requested_bpc;
6699                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6700                                 aconnector->force_yuv420_output;
6701                 color_depth = convert_color_depth_from_display_info(connector,
6702                                                                     is_y420,
6703                                                                     max_bpc);
6704                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6705                 clock = adjusted_mode->clock;
6706                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6707         }
6708         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6709                                                                            mst_mgr,
6710                                                                            mst_port,
6711                                                                            dm_new_connector_state->pbn,
6712                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6713         if (dm_new_connector_state->vcpi_slots < 0) {
6714                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6715                 return dm_new_connector_state->vcpi_slots;
6716         }
6717         return 0;
6718 }
6719
6720 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6721         .disable = dm_encoder_helper_disable,
6722         .atomic_check = dm_encoder_helper_atomic_check
6723 };
6724
6725 #if defined(CONFIG_DRM_AMD_DC_DCN)
6726 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6727                                             struct dc_state *dc_state)
6728 {
6729         struct dc_stream_state *stream = NULL;
6730         struct drm_connector *connector;
6731         struct drm_connector_state *new_con_state;
6732         struct amdgpu_dm_connector *aconnector;
6733         struct dm_connector_state *dm_conn_state;
6734         int i, j, clock, bpp;
6735         int vcpi, pbn_div, pbn = 0;
6736
6737         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6738
6739                 aconnector = to_amdgpu_dm_connector(connector);
6740
6741                 if (!aconnector->port)
6742                         continue;
6743
6744                 if (!new_con_state || !new_con_state->crtc)
6745                         continue;
6746
6747                 dm_conn_state = to_dm_connector_state(new_con_state);
6748
6749                 for (j = 0; j < dc_state->stream_count; j++) {
6750                         stream = dc_state->streams[j];
6751                         if (!stream)
6752                                 continue;
6753
6754                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6755                                 break;
6756
6757                         stream = NULL;
6758                 }
6759
6760                 if (!stream)
6761                         continue;
6762
6763                 if (stream->timing.flags.DSC != 1) {
6764                         drm_dp_mst_atomic_enable_dsc(state,
6765                                                      aconnector->port,
6766                                                      dm_conn_state->pbn,
6767                                                      0,
6768                                                      false);
6769                         continue;
6770                 }
6771
6772                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6773                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6774                 clock = stream->timing.pix_clk_100hz / 10;
6775                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6776                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6777                                                     aconnector->port,
6778                                                     pbn, pbn_div,
6779                                                     true);
6780                 if (vcpi < 0)
6781                         return vcpi;
6782
6783                 dm_conn_state->pbn = pbn;
6784                 dm_conn_state->vcpi_slots = vcpi;
6785         }
6786         return 0;
6787 }
6788 #endif
6789
6790 static void dm_drm_plane_reset(struct drm_plane *plane)
6791 {
6792         struct dm_plane_state *amdgpu_state = NULL;
6793
6794         if (plane->state)
6795                 plane->funcs->atomic_destroy_state(plane, plane->state);
6796
6797         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6798         WARN_ON(amdgpu_state == NULL);
6799
6800         if (amdgpu_state)
6801                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6802 }
6803
6804 static struct drm_plane_state *
6805 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6806 {
6807         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6808
6809         old_dm_plane_state = to_dm_plane_state(plane->state);
6810         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6811         if (!dm_plane_state)
6812                 return NULL;
6813
6814         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6815
6816         if (old_dm_plane_state->dc_state) {
6817                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6818                 dc_plane_state_retain(dm_plane_state->dc_state);
6819         }
6820
6821         return &dm_plane_state->base;
6822 }
6823
6824 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6825                                 struct drm_plane_state *state)
6826 {
6827         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6828
6829         if (dm_plane_state->dc_state)
6830                 dc_plane_state_release(dm_plane_state->dc_state);
6831
6832         drm_atomic_helper_plane_destroy_state(plane, state);
6833 }
6834
6835 static const struct drm_plane_funcs dm_plane_funcs = {
6836         .update_plane   = drm_atomic_helper_update_plane,
6837         .disable_plane  = drm_atomic_helper_disable_plane,
6838         .destroy        = drm_primary_helper_destroy,
6839         .reset = dm_drm_plane_reset,
6840         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6841         .atomic_destroy_state = dm_drm_plane_destroy_state,
6842         .format_mod_supported = dm_plane_format_mod_supported,
6843 };
6844
6845 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6846                                       struct drm_plane_state *new_state)
6847 {
6848         struct amdgpu_framebuffer *afb;
6849         struct drm_gem_object *obj;
6850         struct amdgpu_device *adev;
6851         struct amdgpu_bo *rbo;
6852         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6853         struct list_head list;
6854         struct ttm_validate_buffer tv;
6855         struct ww_acquire_ctx ticket;
6856         uint32_t domain;
6857         int r;
6858
6859         if (!new_state->fb) {
6860                 DRM_DEBUG_KMS("No FB bound\n");
6861                 return 0;
6862         }
6863
6864         afb = to_amdgpu_framebuffer(new_state->fb);
6865         obj = new_state->fb->obj[0];
6866         rbo = gem_to_amdgpu_bo(obj);
6867         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6868         INIT_LIST_HEAD(&list);
6869
6870         tv.bo = &rbo->tbo;
6871         tv.num_shared = 1;
6872         list_add(&tv.head, &list);
6873
6874         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6875         if (r) {
6876                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6877                 return r;
6878         }
6879
6880         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6881                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6882         else
6883                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6884
6885         r = amdgpu_bo_pin(rbo, domain);
6886         if (unlikely(r != 0)) {
6887                 if (r != -ERESTARTSYS)
6888                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6889                 ttm_eu_backoff_reservation(&ticket, &list);
6890                 return r;
6891         }
6892
6893         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6894         if (unlikely(r != 0)) {
6895                 amdgpu_bo_unpin(rbo);
6896                 ttm_eu_backoff_reservation(&ticket, &list);
6897                 DRM_ERROR("%p bind failed\n", rbo);
6898                 return r;
6899         }
6900
6901         ttm_eu_backoff_reservation(&ticket, &list);
6902
6903         afb->address = amdgpu_bo_gpu_offset(rbo);
6904
6905         amdgpu_bo_ref(rbo);
6906
6907         /**
6908          * We don't do surface updates on planes that have been newly created,
6909          * but we also don't have the afb->address during atomic check.
6910          *
6911          * Fill in buffer attributes depending on the address here, but only on
6912          * newly created planes since they're not being used by DC yet and this
6913          * won't modify global state.
6914          */
6915         dm_plane_state_old = to_dm_plane_state(plane->state);
6916         dm_plane_state_new = to_dm_plane_state(new_state);
6917
6918         if (dm_plane_state_new->dc_state &&
6919             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6920                 struct dc_plane_state *plane_state =
6921                         dm_plane_state_new->dc_state;
6922                 bool force_disable_dcc = !plane_state->dcc.enable;
6923
6924                 fill_plane_buffer_attributes(
6925                         adev, afb, plane_state->format, plane_state->rotation,
6926                         afb->tiling_flags,
6927                         &plane_state->tiling_info, &plane_state->plane_size,
6928                         &plane_state->dcc, &plane_state->address,
6929                         afb->tmz_surface, force_disable_dcc);
6930         }
6931
6932         return 0;
6933 }
6934
6935 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6936                                        struct drm_plane_state *old_state)
6937 {
6938         struct amdgpu_bo *rbo;
6939         int r;
6940
6941         if (!old_state->fb)
6942                 return;
6943
6944         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6945         r = amdgpu_bo_reserve(rbo, false);
6946         if (unlikely(r)) {
6947                 DRM_ERROR("failed to reserve rbo before unpin\n");
6948                 return;
6949         }
6950
6951         amdgpu_bo_unpin(rbo);
6952         amdgpu_bo_unreserve(rbo);
6953         amdgpu_bo_unref(&rbo);
6954 }
6955
6956 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6957                                        struct drm_crtc_state *new_crtc_state)
6958 {
6959         struct drm_framebuffer *fb = state->fb;
6960         int min_downscale, max_upscale;
6961         int min_scale = 0;
6962         int max_scale = INT_MAX;
6963
6964         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6965         if (fb && state->crtc) {
6966                 /* Validate viewport to cover the case when only the position changes */
6967                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6968                         int viewport_width = state->crtc_w;
6969                         int viewport_height = state->crtc_h;
6970
6971                         if (state->crtc_x < 0)
6972                                 viewport_width += state->crtc_x;
6973                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6974                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6975
6976                         if (state->crtc_y < 0)
6977                                 viewport_height += state->crtc_y;
6978                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6979                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6980
6981                         if (viewport_width < 0 || viewport_height < 0) {
6982                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6983                                 return -EINVAL;
6984                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6985                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6986                                 return -EINVAL;
6987                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6988                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6989                                 return -EINVAL;
6990                         }
6991
6992                 }
6993
6994                 /* Get min/max allowed scaling factors from plane caps. */
6995                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6996                                              &min_downscale, &max_upscale);
6997                 /*
6998                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6999                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7000                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7001                  */
7002                 min_scale = (1000 << 16) / max_upscale;
7003                 max_scale = (1000 << 16) / min_downscale;
7004         }
7005
7006         return drm_atomic_helper_check_plane_state(
7007                 state, new_crtc_state, min_scale, max_scale, true, true);
7008 }
7009
7010 static int dm_plane_atomic_check(struct drm_plane *plane,
7011                                  struct drm_atomic_state *state)
7012 {
7013         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7014                                                                                  plane);
7015         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7016         struct dc *dc = adev->dm.dc;
7017         struct dm_plane_state *dm_plane_state;
7018         struct dc_scaling_info scaling_info;
7019         struct drm_crtc_state *new_crtc_state;
7020         int ret;
7021
7022         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7023
7024         dm_plane_state = to_dm_plane_state(new_plane_state);
7025
7026         if (!dm_plane_state->dc_state)
7027                 return 0;
7028
7029         new_crtc_state =
7030                 drm_atomic_get_new_crtc_state(state,
7031                                               new_plane_state->crtc);
7032         if (!new_crtc_state)
7033                 return -EINVAL;
7034
7035         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7036         if (ret)
7037                 return ret;
7038
7039         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7040         if (ret)
7041                 return ret;
7042
7043         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7044                 return 0;
7045
7046         return -EINVAL;
7047 }
7048
7049 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7050                                        struct drm_atomic_state *state)
7051 {
7052         /* Only support async updates on cursor planes. */
7053         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7054                 return -EINVAL;
7055
7056         return 0;
7057 }
7058
7059 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7060                                          struct drm_atomic_state *state)
7061 {
7062         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7063                                                                            plane);
7064         struct drm_plane_state *old_state =
7065                 drm_atomic_get_old_plane_state(state, plane);
7066
7067         trace_amdgpu_dm_atomic_update_cursor(new_state);
7068
7069         swap(plane->state->fb, new_state->fb);
7070
7071         plane->state->src_x = new_state->src_x;
7072         plane->state->src_y = new_state->src_y;
7073         plane->state->src_w = new_state->src_w;
7074         plane->state->src_h = new_state->src_h;
7075         plane->state->crtc_x = new_state->crtc_x;
7076         plane->state->crtc_y = new_state->crtc_y;
7077         plane->state->crtc_w = new_state->crtc_w;
7078         plane->state->crtc_h = new_state->crtc_h;
7079
7080         handle_cursor_update(plane, old_state);
7081 }
7082
7083 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7084         .prepare_fb = dm_plane_helper_prepare_fb,
7085         .cleanup_fb = dm_plane_helper_cleanup_fb,
7086         .atomic_check = dm_plane_atomic_check,
7087         .atomic_async_check = dm_plane_atomic_async_check,
7088         .atomic_async_update = dm_plane_atomic_async_update
7089 };
7090
7091 /*
7092  * TODO: these are currently initialized to rgb formats only.
7093  * For future use cases we should either initialize them dynamically based on
7094  * plane capabilities, or initialize this array to all formats, so internal drm
7095  * check will succeed, and let DC implement proper check
7096  */
7097 static const uint32_t rgb_formats[] = {
7098         DRM_FORMAT_XRGB8888,
7099         DRM_FORMAT_ARGB8888,
7100         DRM_FORMAT_RGBA8888,
7101         DRM_FORMAT_XRGB2101010,
7102         DRM_FORMAT_XBGR2101010,
7103         DRM_FORMAT_ARGB2101010,
7104         DRM_FORMAT_ABGR2101010,
7105         DRM_FORMAT_XRGB16161616,
7106         DRM_FORMAT_XBGR16161616,
7107         DRM_FORMAT_ARGB16161616,
7108         DRM_FORMAT_ABGR16161616,
7109         DRM_FORMAT_XBGR8888,
7110         DRM_FORMAT_ABGR8888,
7111         DRM_FORMAT_RGB565,
7112 };
7113
7114 static const uint32_t overlay_formats[] = {
7115         DRM_FORMAT_XRGB8888,
7116         DRM_FORMAT_ARGB8888,
7117         DRM_FORMAT_RGBA8888,
7118         DRM_FORMAT_XBGR8888,
7119         DRM_FORMAT_ABGR8888,
7120         DRM_FORMAT_RGB565
7121 };
7122
7123 static const u32 cursor_formats[] = {
7124         DRM_FORMAT_ARGB8888
7125 };
7126
7127 static int get_plane_formats(const struct drm_plane *plane,
7128                              const struct dc_plane_cap *plane_cap,
7129                              uint32_t *formats, int max_formats)
7130 {
7131         int i, num_formats = 0;
7132
7133         /*
7134          * TODO: Query support for each group of formats directly from
7135          * DC plane caps. This will require adding more formats to the
7136          * caps list.
7137          */
7138
7139         switch (plane->type) {
7140         case DRM_PLANE_TYPE_PRIMARY:
7141                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7142                         if (num_formats >= max_formats)
7143                                 break;
7144
7145                         formats[num_formats++] = rgb_formats[i];
7146                 }
7147
7148                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7149                         formats[num_formats++] = DRM_FORMAT_NV12;
7150                 if (plane_cap && plane_cap->pixel_format_support.p010)
7151                         formats[num_formats++] = DRM_FORMAT_P010;
7152                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7153                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7154                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7155                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7156                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7157                 }
7158                 break;
7159
7160         case DRM_PLANE_TYPE_OVERLAY:
7161                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7162                         if (num_formats >= max_formats)
7163                                 break;
7164
7165                         formats[num_formats++] = overlay_formats[i];
7166                 }
7167                 break;
7168
7169         case DRM_PLANE_TYPE_CURSOR:
7170                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7171                         if (num_formats >= max_formats)
7172                                 break;
7173
7174                         formats[num_formats++] = cursor_formats[i];
7175                 }
7176                 break;
7177         }
7178
7179         return num_formats;
7180 }
7181
7182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7183                                 struct drm_plane *plane,
7184                                 unsigned long possible_crtcs,
7185                                 const struct dc_plane_cap *plane_cap)
7186 {
7187         uint32_t formats[32];
7188         int num_formats;
7189         int res = -EPERM;
7190         unsigned int supported_rotations;
7191         uint64_t *modifiers = NULL;
7192
7193         num_formats = get_plane_formats(plane, plane_cap, formats,
7194                                         ARRAY_SIZE(formats));
7195
7196         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7197         if (res)
7198                 return res;
7199
7200         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7201                                        &dm_plane_funcs, formats, num_formats,
7202                                        modifiers, plane->type, NULL);
7203         kfree(modifiers);
7204         if (res)
7205                 return res;
7206
7207         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7208             plane_cap && plane_cap->per_pixel_alpha) {
7209                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7210                                           BIT(DRM_MODE_BLEND_PREMULTI);
7211
7212                 drm_plane_create_alpha_property(plane);
7213                 drm_plane_create_blend_mode_property(plane, blend_caps);
7214         }
7215
7216         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7217             plane_cap &&
7218             (plane_cap->pixel_format_support.nv12 ||
7219              plane_cap->pixel_format_support.p010)) {
7220                 /* This only affects YUV formats. */
7221                 drm_plane_create_color_properties(
7222                         plane,
7223                         BIT(DRM_COLOR_YCBCR_BT601) |
7224                         BIT(DRM_COLOR_YCBCR_BT709) |
7225                         BIT(DRM_COLOR_YCBCR_BT2020),
7226                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7227                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7228                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7229         }
7230
7231         supported_rotations =
7232                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7233                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7234
7235         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7236             plane->type != DRM_PLANE_TYPE_CURSOR)
7237                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7238                                                    supported_rotations);
7239
7240         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7241
7242         /* Create (reset) the plane state */
7243         if (plane->funcs->reset)
7244                 plane->funcs->reset(plane);
7245
7246         return 0;
7247 }
7248
7249 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7250                                struct drm_plane *plane,
7251                                uint32_t crtc_index)
7252 {
7253         struct amdgpu_crtc *acrtc = NULL;
7254         struct drm_plane *cursor_plane;
7255
7256         int res = -ENOMEM;
7257
7258         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7259         if (!cursor_plane)
7260                 goto fail;
7261
7262         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7263         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7264
7265         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7266         if (!acrtc)
7267                 goto fail;
7268
7269         res = drm_crtc_init_with_planes(
7270                         dm->ddev,
7271                         &acrtc->base,
7272                         plane,
7273                         cursor_plane,
7274                         &amdgpu_dm_crtc_funcs, NULL);
7275
7276         if (res)
7277                 goto fail;
7278
7279         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7280
7281         /* Create (reset) the plane state */
7282         if (acrtc->base.funcs->reset)
7283                 acrtc->base.funcs->reset(&acrtc->base);
7284
7285         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7286         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7287
7288         acrtc->crtc_id = crtc_index;
7289         acrtc->base.enabled = false;
7290         acrtc->otg_inst = -1;
7291
7292         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7293         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7294                                    true, MAX_COLOR_LUT_ENTRIES);
7295         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7296
7297         return 0;
7298
7299 fail:
7300         kfree(acrtc);
7301         kfree(cursor_plane);
7302         return res;
7303 }
7304
7305
7306 static int to_drm_connector_type(enum signal_type st)
7307 {
7308         switch (st) {
7309         case SIGNAL_TYPE_HDMI_TYPE_A:
7310                 return DRM_MODE_CONNECTOR_HDMIA;
7311         case SIGNAL_TYPE_EDP:
7312                 return DRM_MODE_CONNECTOR_eDP;
7313         case SIGNAL_TYPE_LVDS:
7314                 return DRM_MODE_CONNECTOR_LVDS;
7315         case SIGNAL_TYPE_RGB:
7316                 return DRM_MODE_CONNECTOR_VGA;
7317         case SIGNAL_TYPE_DISPLAY_PORT:
7318         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7319                 return DRM_MODE_CONNECTOR_DisplayPort;
7320         case SIGNAL_TYPE_DVI_DUAL_LINK:
7321         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7322                 return DRM_MODE_CONNECTOR_DVID;
7323         case SIGNAL_TYPE_VIRTUAL:
7324                 return DRM_MODE_CONNECTOR_VIRTUAL;
7325
7326         default:
7327                 return DRM_MODE_CONNECTOR_Unknown;
7328         }
7329 }
7330
7331 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7332 {
7333         struct drm_encoder *encoder;
7334
7335         /* There is only one encoder per connector */
7336         drm_connector_for_each_possible_encoder(connector, encoder)
7337                 return encoder;
7338
7339         return NULL;
7340 }
7341
7342 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7343 {
7344         struct drm_encoder *encoder;
7345         struct amdgpu_encoder *amdgpu_encoder;
7346
7347         encoder = amdgpu_dm_connector_to_encoder(connector);
7348
7349         if (encoder == NULL)
7350                 return;
7351
7352         amdgpu_encoder = to_amdgpu_encoder(encoder);
7353
7354         amdgpu_encoder->native_mode.clock = 0;
7355
7356         if (!list_empty(&connector->probed_modes)) {
7357                 struct drm_display_mode *preferred_mode = NULL;
7358
7359                 list_for_each_entry(preferred_mode,
7360                                     &connector->probed_modes,
7361                                     head) {
7362                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7363                                 amdgpu_encoder->native_mode = *preferred_mode;
7364
7365                         break;
7366                 }
7367
7368         }
7369 }
7370
7371 static struct drm_display_mode *
7372 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7373                              char *name,
7374                              int hdisplay, int vdisplay)
7375 {
7376         struct drm_device *dev = encoder->dev;
7377         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7378         struct drm_display_mode *mode = NULL;
7379         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7380
7381         mode = drm_mode_duplicate(dev, native_mode);
7382
7383         if (mode == NULL)
7384                 return NULL;
7385
7386         mode->hdisplay = hdisplay;
7387         mode->vdisplay = vdisplay;
7388         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7389         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7390
7391         return mode;
7392
7393 }
7394
7395 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7396                                                  struct drm_connector *connector)
7397 {
7398         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7399         struct drm_display_mode *mode = NULL;
7400         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7401         struct amdgpu_dm_connector *amdgpu_dm_connector =
7402                                 to_amdgpu_dm_connector(connector);
7403         int i;
7404         int n;
7405         struct mode_size {
7406                 char name[DRM_DISPLAY_MODE_LEN];
7407                 int w;
7408                 int h;
7409         } common_modes[] = {
7410                 {  "640x480",  640,  480},
7411                 {  "800x600",  800,  600},
7412                 { "1024x768", 1024,  768},
7413                 { "1280x720", 1280,  720},
7414                 { "1280x800", 1280,  800},
7415                 {"1280x1024", 1280, 1024},
7416                 { "1440x900", 1440,  900},
7417                 {"1680x1050", 1680, 1050},
7418                 {"1600x1200", 1600, 1200},
7419                 {"1920x1080", 1920, 1080},
7420                 {"1920x1200", 1920, 1200}
7421         };
7422
7423         n = ARRAY_SIZE(common_modes);
7424
7425         for (i = 0; i < n; i++) {
7426                 struct drm_display_mode *curmode = NULL;
7427                 bool mode_existed = false;
7428
7429                 if (common_modes[i].w > native_mode->hdisplay ||
7430                     common_modes[i].h > native_mode->vdisplay ||
7431                    (common_modes[i].w == native_mode->hdisplay &&
7432                     common_modes[i].h == native_mode->vdisplay))
7433                         continue;
7434
7435                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7436                         if (common_modes[i].w == curmode->hdisplay &&
7437                             common_modes[i].h == curmode->vdisplay) {
7438                                 mode_existed = true;
7439                                 break;
7440                         }
7441                 }
7442
7443                 if (mode_existed)
7444                         continue;
7445
7446                 mode = amdgpu_dm_create_common_mode(encoder,
7447                                 common_modes[i].name, common_modes[i].w,
7448                                 common_modes[i].h);
7449                 drm_mode_probed_add(connector, mode);
7450                 amdgpu_dm_connector->num_modes++;
7451         }
7452 }
7453
7454 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7455                                               struct edid *edid)
7456 {
7457         struct amdgpu_dm_connector *amdgpu_dm_connector =
7458                         to_amdgpu_dm_connector(connector);
7459
7460         if (edid) {
7461                 /* empty probed_modes */
7462                 INIT_LIST_HEAD(&connector->probed_modes);
7463                 amdgpu_dm_connector->num_modes =
7464                                 drm_add_edid_modes(connector, edid);
7465
7466                 /* sorting the probed modes before calling function
7467                  * amdgpu_dm_get_native_mode() since EDID can have
7468                  * more than one preferred mode. The modes that are
7469                  * later in the probed mode list could be of higher
7470                  * and preferred resolution. For example, 3840x2160
7471                  * resolution in base EDID preferred timing and 4096x2160
7472                  * preferred resolution in DID extension block later.
7473                  */
7474                 drm_mode_sort(&connector->probed_modes);
7475                 amdgpu_dm_get_native_mode(connector);
7476
7477                 /* Freesync capabilities are reset by calling
7478                  * drm_add_edid_modes() and need to be
7479                  * restored here.
7480                  */
7481                 amdgpu_dm_update_freesync_caps(connector, edid);
7482         } else {
7483                 amdgpu_dm_connector->num_modes = 0;
7484         }
7485 }
7486
7487 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7488                               struct drm_display_mode *mode)
7489 {
7490         struct drm_display_mode *m;
7491
7492         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7493                 if (drm_mode_equal(m, mode))
7494                         return true;
7495         }
7496
7497         return false;
7498 }
7499
7500 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7501 {
7502         const struct drm_display_mode *m;
7503         struct drm_display_mode *new_mode;
7504         uint i;
7505         uint32_t new_modes_count = 0;
7506
7507         /* Standard FPS values
7508          *
7509          * 23.976   - TV/NTSC
7510          * 24       - Cinema
7511          * 25       - TV/PAL
7512          * 29.97    - TV/NTSC
7513          * 30       - TV/NTSC
7514          * 48       - Cinema HFR
7515          * 50       - TV/PAL
7516          * 60       - Commonly used
7517          * 48,72,96 - Multiples of 24
7518          */
7519         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7520                                          48000, 50000, 60000, 72000, 96000 };
7521
7522         /*
7523          * Find mode with highest refresh rate with the same resolution
7524          * as the preferred mode. Some monitors report a preferred mode
7525          * with lower resolution than the highest refresh rate supported.
7526          */
7527
7528         m = get_highest_refresh_rate_mode(aconnector, true);
7529         if (!m)
7530                 return 0;
7531
7532         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7533                 uint64_t target_vtotal, target_vtotal_diff;
7534                 uint64_t num, den;
7535
7536                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7537                         continue;
7538
7539                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7540                     common_rates[i] > aconnector->max_vfreq * 1000)
7541                         continue;
7542
7543                 num = (unsigned long long)m->clock * 1000 * 1000;
7544                 den = common_rates[i] * (unsigned long long)m->htotal;
7545                 target_vtotal = div_u64(num, den);
7546                 target_vtotal_diff = target_vtotal - m->vtotal;
7547
7548                 /* Check for illegal modes */
7549                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7550                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7551                     m->vtotal + target_vtotal_diff < m->vsync_end)
7552                         continue;
7553
7554                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7555                 if (!new_mode)
7556                         goto out;
7557
7558                 new_mode->vtotal += (u16)target_vtotal_diff;
7559                 new_mode->vsync_start += (u16)target_vtotal_diff;
7560                 new_mode->vsync_end += (u16)target_vtotal_diff;
7561                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7562                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7563
7564                 if (!is_duplicate_mode(aconnector, new_mode)) {
7565                         drm_mode_probed_add(&aconnector->base, new_mode);
7566                         new_modes_count += 1;
7567                 } else
7568                         drm_mode_destroy(aconnector->base.dev, new_mode);
7569         }
7570  out:
7571         return new_modes_count;
7572 }
7573
7574 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7575                                                    struct edid *edid)
7576 {
7577         struct amdgpu_dm_connector *amdgpu_dm_connector =
7578                 to_amdgpu_dm_connector(connector);
7579
7580         if (!(amdgpu_freesync_vid_mode && edid))
7581                 return;
7582
7583         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7584                 amdgpu_dm_connector->num_modes +=
7585                         add_fs_modes(amdgpu_dm_connector);
7586 }
7587
7588 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7589 {
7590         struct amdgpu_dm_connector *amdgpu_dm_connector =
7591                         to_amdgpu_dm_connector(connector);
7592         struct drm_encoder *encoder;
7593         struct edid *edid = amdgpu_dm_connector->edid;
7594
7595         encoder = amdgpu_dm_connector_to_encoder(connector);
7596
7597         if (!drm_edid_is_valid(edid)) {
7598                 amdgpu_dm_connector->num_modes =
7599                                 drm_add_modes_noedid(connector, 640, 480);
7600         } else {
7601                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7602                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7603                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7604         }
7605         amdgpu_dm_fbc_init(connector);
7606
7607         return amdgpu_dm_connector->num_modes;
7608 }
7609
7610 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7611                                      struct amdgpu_dm_connector *aconnector,
7612                                      int connector_type,
7613                                      struct dc_link *link,
7614                                      int link_index)
7615 {
7616         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7617
7618         /*
7619          * Some of the properties below require access to state, like bpc.
7620          * Allocate some default initial connector state with our reset helper.
7621          */
7622         if (aconnector->base.funcs->reset)
7623                 aconnector->base.funcs->reset(&aconnector->base);
7624
7625         aconnector->connector_id = link_index;
7626         aconnector->dc_link = link;
7627         aconnector->base.interlace_allowed = false;
7628         aconnector->base.doublescan_allowed = false;
7629         aconnector->base.stereo_allowed = false;
7630         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7631         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7632         aconnector->audio_inst = -1;
7633         mutex_init(&aconnector->hpd_lock);
7634
7635         /*
7636          * configure support HPD hot plug connector_>polled default value is 0
7637          * which means HPD hot plug not supported
7638          */
7639         switch (connector_type) {
7640         case DRM_MODE_CONNECTOR_HDMIA:
7641                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7642                 aconnector->base.ycbcr_420_allowed =
7643                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7644                 break;
7645         case DRM_MODE_CONNECTOR_DisplayPort:
7646                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7647                 aconnector->base.ycbcr_420_allowed =
7648                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7649                 break;
7650         case DRM_MODE_CONNECTOR_DVID:
7651                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7652                 break;
7653         default:
7654                 break;
7655         }
7656
7657         drm_object_attach_property(&aconnector->base.base,
7658                                 dm->ddev->mode_config.scaling_mode_property,
7659                                 DRM_MODE_SCALE_NONE);
7660
7661         drm_object_attach_property(&aconnector->base.base,
7662                                 adev->mode_info.underscan_property,
7663                                 UNDERSCAN_OFF);
7664         drm_object_attach_property(&aconnector->base.base,
7665                                 adev->mode_info.underscan_hborder_property,
7666                                 0);
7667         drm_object_attach_property(&aconnector->base.base,
7668                                 adev->mode_info.underscan_vborder_property,
7669                                 0);
7670
7671         if (!aconnector->mst_port)
7672                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7673
7674         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7675         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7676         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7677
7678         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7679             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7680                 drm_object_attach_property(&aconnector->base.base,
7681                                 adev->mode_info.abm_level_property, 0);
7682         }
7683
7684         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7685             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7686             connector_type == DRM_MODE_CONNECTOR_eDP) {
7687                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7688
7689                 if (!aconnector->mst_port)
7690                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7691
7692 #ifdef CONFIG_DRM_AMD_DC_HDCP
7693                 if (adev->dm.hdcp_workqueue)
7694                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7695 #endif
7696         }
7697 }
7698
7699 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7700                               struct i2c_msg *msgs, int num)
7701 {
7702         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7703         struct ddc_service *ddc_service = i2c->ddc_service;
7704         struct i2c_command cmd;
7705         int i;
7706         int result = -EIO;
7707
7708         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7709
7710         if (!cmd.payloads)
7711                 return result;
7712
7713         cmd.number_of_payloads = num;
7714         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7715         cmd.speed = 100;
7716
7717         for (i = 0; i < num; i++) {
7718                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7719                 cmd.payloads[i].address = msgs[i].addr;
7720                 cmd.payloads[i].length = msgs[i].len;
7721                 cmd.payloads[i].data = msgs[i].buf;
7722         }
7723
7724         if (dc_submit_i2c(
7725                         ddc_service->ctx->dc,
7726                         ddc_service->ddc_pin->hw_info.ddc_channel,
7727                         &cmd))
7728                 result = num;
7729
7730         kfree(cmd.payloads);
7731         return result;
7732 }
7733
7734 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7735 {
7736         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7737 }
7738
7739 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7740         .master_xfer = amdgpu_dm_i2c_xfer,
7741         .functionality = amdgpu_dm_i2c_func,
7742 };
7743
7744 static struct amdgpu_i2c_adapter *
7745 create_i2c(struct ddc_service *ddc_service,
7746            int link_index,
7747            int *res)
7748 {
7749         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7750         struct amdgpu_i2c_adapter *i2c;
7751
7752         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7753         if (!i2c)
7754                 return NULL;
7755         i2c->base.owner = THIS_MODULE;
7756         i2c->base.class = I2C_CLASS_DDC;
7757         i2c->base.dev.parent = &adev->pdev->dev;
7758         i2c->base.algo = &amdgpu_dm_i2c_algo;
7759         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7760         i2c_set_adapdata(&i2c->base, i2c);
7761         i2c->ddc_service = ddc_service;
7762         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7763
7764         return i2c;
7765 }
7766
7767
7768 /*
7769  * Note: this function assumes that dc_link_detect() was called for the
7770  * dc_link which will be represented by this aconnector.
7771  */
7772 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7773                                     struct amdgpu_dm_connector *aconnector,
7774                                     uint32_t link_index,
7775                                     struct amdgpu_encoder *aencoder)
7776 {
7777         int res = 0;
7778         int connector_type;
7779         struct dc *dc = dm->dc;
7780         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7781         struct amdgpu_i2c_adapter *i2c;
7782
7783         link->priv = aconnector;
7784
7785         DRM_DEBUG_DRIVER("%s()\n", __func__);
7786
7787         i2c = create_i2c(link->ddc, link->link_index, &res);
7788         if (!i2c) {
7789                 DRM_ERROR("Failed to create i2c adapter data\n");
7790                 return -ENOMEM;
7791         }
7792
7793         aconnector->i2c = i2c;
7794         res = i2c_add_adapter(&i2c->base);
7795
7796         if (res) {
7797                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7798                 goto out_free;
7799         }
7800
7801         connector_type = to_drm_connector_type(link->connector_signal);
7802
7803         res = drm_connector_init_with_ddc(
7804                         dm->ddev,
7805                         &aconnector->base,
7806                         &amdgpu_dm_connector_funcs,
7807                         connector_type,
7808                         &i2c->base);
7809
7810         if (res) {
7811                 DRM_ERROR("connector_init failed\n");
7812                 aconnector->connector_id = -1;
7813                 goto out_free;
7814         }
7815
7816         drm_connector_helper_add(
7817                         &aconnector->base,
7818                         &amdgpu_dm_connector_helper_funcs);
7819
7820         amdgpu_dm_connector_init_helper(
7821                 dm,
7822                 aconnector,
7823                 connector_type,
7824                 link,
7825                 link_index);
7826
7827         drm_connector_attach_encoder(
7828                 &aconnector->base, &aencoder->base);
7829
7830         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7831                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7832                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7833
7834 out_free:
7835         if (res) {
7836                 kfree(i2c);
7837                 aconnector->i2c = NULL;
7838         }
7839         return res;
7840 }
7841
7842 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7843 {
7844         switch (adev->mode_info.num_crtc) {
7845         case 1:
7846                 return 0x1;
7847         case 2:
7848                 return 0x3;
7849         case 3:
7850                 return 0x7;
7851         case 4:
7852                 return 0xf;
7853         case 5:
7854                 return 0x1f;
7855         case 6:
7856         default:
7857                 return 0x3f;
7858         }
7859 }
7860
7861 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7862                                   struct amdgpu_encoder *aencoder,
7863                                   uint32_t link_index)
7864 {
7865         struct amdgpu_device *adev = drm_to_adev(dev);
7866
7867         int res = drm_encoder_init(dev,
7868                                    &aencoder->base,
7869                                    &amdgpu_dm_encoder_funcs,
7870                                    DRM_MODE_ENCODER_TMDS,
7871                                    NULL);
7872
7873         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7874
7875         if (!res)
7876                 aencoder->encoder_id = link_index;
7877         else
7878                 aencoder->encoder_id = -1;
7879
7880         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7881
7882         return res;
7883 }
7884
7885 static void manage_dm_interrupts(struct amdgpu_device *adev,
7886                                  struct amdgpu_crtc *acrtc,
7887                                  bool enable)
7888 {
7889         /*
7890          * We have no guarantee that the frontend index maps to the same
7891          * backend index - some even map to more than one.
7892          *
7893          * TODO: Use a different interrupt or check DC itself for the mapping.
7894          */
7895         int irq_type =
7896                 amdgpu_display_crtc_idx_to_irq_type(
7897                         adev,
7898                         acrtc->crtc_id);
7899
7900         if (enable) {
7901                 drm_crtc_vblank_on(&acrtc->base);
7902                 amdgpu_irq_get(
7903                         adev,
7904                         &adev->pageflip_irq,
7905                         irq_type);
7906 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7907                 amdgpu_irq_get(
7908                         adev,
7909                         &adev->vline0_irq,
7910                         irq_type);
7911 #endif
7912         } else {
7913 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7914                 amdgpu_irq_put(
7915                         adev,
7916                         &adev->vline0_irq,
7917                         irq_type);
7918 #endif
7919                 amdgpu_irq_put(
7920                         adev,
7921                         &adev->pageflip_irq,
7922                         irq_type);
7923                 drm_crtc_vblank_off(&acrtc->base);
7924         }
7925 }
7926
7927 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7928                                       struct amdgpu_crtc *acrtc)
7929 {
7930         int irq_type =
7931                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7932
7933         /**
7934          * This reads the current state for the IRQ and force reapplies
7935          * the setting to hardware.
7936          */
7937         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7938 }
7939
7940 static bool
7941 is_scaling_state_different(const struct dm_connector_state *dm_state,
7942                            const struct dm_connector_state *old_dm_state)
7943 {
7944         if (dm_state->scaling != old_dm_state->scaling)
7945                 return true;
7946         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7947                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7948                         return true;
7949         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7950                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7951                         return true;
7952         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7953                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7954                 return true;
7955         return false;
7956 }
7957
7958 #ifdef CONFIG_DRM_AMD_DC_HDCP
7959 static bool is_content_protection_different(struct drm_connector_state *state,
7960                                             const struct drm_connector_state *old_state,
7961                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7962 {
7963         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7964         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7965
7966         /* Handle: Type0/1 change */
7967         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7968             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7969                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7970                 return true;
7971         }
7972
7973         /* CP is being re enabled, ignore this
7974          *
7975          * Handles:     ENABLED -> DESIRED
7976          */
7977         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7978             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7979                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7980                 return false;
7981         }
7982
7983         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7984          *
7985          * Handles:     UNDESIRED -> ENABLED
7986          */
7987         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7988             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7989                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7990
7991         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7992          * hot-plug, headless s3, dpms
7993          *
7994          * Handles:     DESIRED -> DESIRED (Special case)
7995          */
7996         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7997             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7998                 dm_con_state->update_hdcp = false;
7999                 return true;
8000         }
8001
8002         /*
8003          * Handles:     UNDESIRED -> UNDESIRED
8004          *              DESIRED -> DESIRED
8005          *              ENABLED -> ENABLED
8006          */
8007         if (old_state->content_protection == state->content_protection)
8008                 return false;
8009
8010         /*
8011          * Handles:     UNDESIRED -> DESIRED
8012          *              DESIRED -> UNDESIRED
8013          *              ENABLED -> UNDESIRED
8014          */
8015         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8016                 return true;
8017
8018         /*
8019          * Handles:     DESIRED -> ENABLED
8020          */
8021         return false;
8022 }
8023
8024 #endif
8025 static void remove_stream(struct amdgpu_device *adev,
8026                           struct amdgpu_crtc *acrtc,
8027                           struct dc_stream_state *stream)
8028 {
8029         /* this is the update mode case */
8030
8031         acrtc->otg_inst = -1;
8032         acrtc->enabled = false;
8033 }
8034
8035 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8036                                struct dc_cursor_position *position)
8037 {
8038         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8039         int x, y;
8040         int xorigin = 0, yorigin = 0;
8041
8042         if (!crtc || !plane->state->fb)
8043                 return 0;
8044
8045         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8046             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8047                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8048                           __func__,
8049                           plane->state->crtc_w,
8050                           plane->state->crtc_h);
8051                 return -EINVAL;
8052         }
8053
8054         x = plane->state->crtc_x;
8055         y = plane->state->crtc_y;
8056
8057         if (x <= -amdgpu_crtc->max_cursor_width ||
8058             y <= -amdgpu_crtc->max_cursor_height)
8059                 return 0;
8060
8061         if (x < 0) {
8062                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8063                 x = 0;
8064         }
8065         if (y < 0) {
8066                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8067                 y = 0;
8068         }
8069         position->enable = true;
8070         position->translate_by_source = true;
8071         position->x = x;
8072         position->y = y;
8073         position->x_hotspot = xorigin;
8074         position->y_hotspot = yorigin;
8075
8076         return 0;
8077 }
8078
8079 static void handle_cursor_update(struct drm_plane *plane,
8080                                  struct drm_plane_state *old_plane_state)
8081 {
8082         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8083         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8084         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8085         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8086         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8087         uint64_t address = afb ? afb->address : 0;
8088         struct dc_cursor_position position = {0};
8089         struct dc_cursor_attributes attributes;
8090         int ret;
8091
8092         if (!plane->state->fb && !old_plane_state->fb)
8093                 return;
8094
8095         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8096                       __func__,
8097                       amdgpu_crtc->crtc_id,
8098                       plane->state->crtc_w,
8099                       plane->state->crtc_h);
8100
8101         ret = get_cursor_position(plane, crtc, &position);
8102         if (ret)
8103                 return;
8104
8105         if (!position.enable) {
8106                 /* turn off cursor */
8107                 if (crtc_state && crtc_state->stream) {
8108                         mutex_lock(&adev->dm.dc_lock);
8109                         dc_stream_set_cursor_position(crtc_state->stream,
8110                                                       &position);
8111                         mutex_unlock(&adev->dm.dc_lock);
8112                 }
8113                 return;
8114         }
8115
8116         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8117         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8118
8119         memset(&attributes, 0, sizeof(attributes));
8120         attributes.address.high_part = upper_32_bits(address);
8121         attributes.address.low_part  = lower_32_bits(address);
8122         attributes.width             = plane->state->crtc_w;
8123         attributes.height            = plane->state->crtc_h;
8124         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8125         attributes.rotation_angle    = 0;
8126         attributes.attribute_flags.value = 0;
8127
8128         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8129
8130         if (crtc_state->stream) {
8131                 mutex_lock(&adev->dm.dc_lock);
8132                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8133                                                          &attributes))
8134                         DRM_ERROR("DC failed to set cursor attributes\n");
8135
8136                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8137                                                    &position))
8138                         DRM_ERROR("DC failed to set cursor position\n");
8139                 mutex_unlock(&adev->dm.dc_lock);
8140         }
8141 }
8142
8143 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8144 {
8145
8146         assert_spin_locked(&acrtc->base.dev->event_lock);
8147         WARN_ON(acrtc->event);
8148
8149         acrtc->event = acrtc->base.state->event;
8150
8151         /* Set the flip status */
8152         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8153
8154         /* Mark this event as consumed */
8155         acrtc->base.state->event = NULL;
8156
8157         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8158                      acrtc->crtc_id);
8159 }
8160
8161 static void update_freesync_state_on_stream(
8162         struct amdgpu_display_manager *dm,
8163         struct dm_crtc_state *new_crtc_state,
8164         struct dc_stream_state *new_stream,
8165         struct dc_plane_state *surface,
8166         u32 flip_timestamp_in_us)
8167 {
8168         struct mod_vrr_params vrr_params;
8169         struct dc_info_packet vrr_infopacket = {0};
8170         struct amdgpu_device *adev = dm->adev;
8171         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8172         unsigned long flags;
8173         bool pack_sdp_v1_3 = false;
8174
8175         if (!new_stream)
8176                 return;
8177
8178         /*
8179          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8180          * For now it's sufficient to just guard against these conditions.
8181          */
8182
8183         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8184                 return;
8185
8186         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8187         vrr_params = acrtc->dm_irq_params.vrr_params;
8188
8189         if (surface) {
8190                 mod_freesync_handle_preflip(
8191                         dm->freesync_module,
8192                         surface,
8193                         new_stream,
8194                         flip_timestamp_in_us,
8195                         &vrr_params);
8196
8197                 if (adev->family < AMDGPU_FAMILY_AI &&
8198                     amdgpu_dm_vrr_active(new_crtc_state)) {
8199                         mod_freesync_handle_v_update(dm->freesync_module,
8200                                                      new_stream, &vrr_params);
8201
8202                         /* Need to call this before the frame ends. */
8203                         dc_stream_adjust_vmin_vmax(dm->dc,
8204                                                    new_crtc_state->stream,
8205                                                    &vrr_params.adjust);
8206                 }
8207         }
8208
8209         mod_freesync_build_vrr_infopacket(
8210                 dm->freesync_module,
8211                 new_stream,
8212                 &vrr_params,
8213                 PACKET_TYPE_VRR,
8214                 TRANSFER_FUNC_UNKNOWN,
8215                 &vrr_infopacket,
8216                 pack_sdp_v1_3);
8217
8218         new_crtc_state->freesync_timing_changed |=
8219                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8220                         &vrr_params.adjust,
8221                         sizeof(vrr_params.adjust)) != 0);
8222
8223         new_crtc_state->freesync_vrr_info_changed |=
8224                 (memcmp(&new_crtc_state->vrr_infopacket,
8225                         &vrr_infopacket,
8226                         sizeof(vrr_infopacket)) != 0);
8227
8228         acrtc->dm_irq_params.vrr_params = vrr_params;
8229         new_crtc_state->vrr_infopacket = vrr_infopacket;
8230
8231         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8232         new_stream->vrr_infopacket = vrr_infopacket;
8233
8234         if (new_crtc_state->freesync_vrr_info_changed)
8235                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8236                               new_crtc_state->base.crtc->base.id,
8237                               (int)new_crtc_state->base.vrr_enabled,
8238                               (int)vrr_params.state);
8239
8240         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8241 }
8242
8243 static void update_stream_irq_parameters(
8244         struct amdgpu_display_manager *dm,
8245         struct dm_crtc_state *new_crtc_state)
8246 {
8247         struct dc_stream_state *new_stream = new_crtc_state->stream;
8248         struct mod_vrr_params vrr_params;
8249         struct mod_freesync_config config = new_crtc_state->freesync_config;
8250         struct amdgpu_device *adev = dm->adev;
8251         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8252         unsigned long flags;
8253
8254         if (!new_stream)
8255                 return;
8256
8257         /*
8258          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8259          * For now it's sufficient to just guard against these conditions.
8260          */
8261         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8262                 return;
8263
8264         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8265         vrr_params = acrtc->dm_irq_params.vrr_params;
8266
8267         if (new_crtc_state->vrr_supported &&
8268             config.min_refresh_in_uhz &&
8269             config.max_refresh_in_uhz) {
8270                 /*
8271                  * if freesync compatible mode was set, config.state will be set
8272                  * in atomic check
8273                  */
8274                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8275                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8276                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8277                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8278                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8279                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8280                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8281                 } else {
8282                         config.state = new_crtc_state->base.vrr_enabled ?
8283                                                      VRR_STATE_ACTIVE_VARIABLE :
8284                                                      VRR_STATE_INACTIVE;
8285                 }
8286         } else {
8287                 config.state = VRR_STATE_UNSUPPORTED;
8288         }
8289
8290         mod_freesync_build_vrr_params(dm->freesync_module,
8291                                       new_stream,
8292                                       &config, &vrr_params);
8293
8294         new_crtc_state->freesync_timing_changed |=
8295                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8296                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8297
8298         new_crtc_state->freesync_config = config;
8299         /* Copy state for access from DM IRQ handler */
8300         acrtc->dm_irq_params.freesync_config = config;
8301         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8302         acrtc->dm_irq_params.vrr_params = vrr_params;
8303         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8304 }
8305
8306 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8307                                             struct dm_crtc_state *new_state)
8308 {
8309         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8310         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8311
8312         if (!old_vrr_active && new_vrr_active) {
8313                 /* Transition VRR inactive -> active:
8314                  * While VRR is active, we must not disable vblank irq, as a
8315                  * reenable after disable would compute bogus vblank/pflip
8316                  * timestamps if it likely happened inside display front-porch.
8317                  *
8318                  * We also need vupdate irq for the actual core vblank handling
8319                  * at end of vblank.
8320                  */
8321                 dm_set_vupdate_irq(new_state->base.crtc, true);
8322                 drm_crtc_vblank_get(new_state->base.crtc);
8323                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8324                                  __func__, new_state->base.crtc->base.id);
8325         } else if (old_vrr_active && !new_vrr_active) {
8326                 /* Transition VRR active -> inactive:
8327                  * Allow vblank irq disable again for fixed refresh rate.
8328                  */
8329                 dm_set_vupdate_irq(new_state->base.crtc, false);
8330                 drm_crtc_vblank_put(new_state->base.crtc);
8331                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8332                                  __func__, new_state->base.crtc->base.id);
8333         }
8334 }
8335
8336 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8337 {
8338         struct drm_plane *plane;
8339         struct drm_plane_state *old_plane_state;
8340         int i;
8341
8342         /*
8343          * TODO: Make this per-stream so we don't issue redundant updates for
8344          * commits with multiple streams.
8345          */
8346         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8347                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8348                         handle_cursor_update(plane, old_plane_state);
8349 }
8350
8351 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8352                                     struct dc_state *dc_state,
8353                                     struct drm_device *dev,
8354                                     struct amdgpu_display_manager *dm,
8355                                     struct drm_crtc *pcrtc,
8356                                     bool wait_for_vblank)
8357 {
8358         uint32_t i;
8359         uint64_t timestamp_ns;
8360         struct drm_plane *plane;
8361         struct drm_plane_state *old_plane_state, *new_plane_state;
8362         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8363         struct drm_crtc_state *new_pcrtc_state =
8364                         drm_atomic_get_new_crtc_state(state, pcrtc);
8365         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8366         struct dm_crtc_state *dm_old_crtc_state =
8367                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8368         int planes_count = 0, vpos, hpos;
8369         long r;
8370         unsigned long flags;
8371         struct amdgpu_bo *abo;
8372         uint32_t target_vblank, last_flip_vblank;
8373         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8374         bool pflip_present = false;
8375         struct {
8376                 struct dc_surface_update surface_updates[MAX_SURFACES];
8377                 struct dc_plane_info plane_infos[MAX_SURFACES];
8378                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8379                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8380                 struct dc_stream_update stream_update;
8381         } *bundle;
8382
8383         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8384
8385         if (!bundle) {
8386                 dm_error("Failed to allocate update bundle\n");
8387                 goto cleanup;
8388         }
8389
8390         /*
8391          * Disable the cursor first if we're disabling all the planes.
8392          * It'll remain on the screen after the planes are re-enabled
8393          * if we don't.
8394          */
8395         if (acrtc_state->active_planes == 0)
8396                 amdgpu_dm_commit_cursors(state);
8397
8398         /* update planes when needed */
8399         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8400                 struct drm_crtc *crtc = new_plane_state->crtc;
8401                 struct drm_crtc_state *new_crtc_state;
8402                 struct drm_framebuffer *fb = new_plane_state->fb;
8403                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8404                 bool plane_needs_flip;
8405                 struct dc_plane_state *dc_plane;
8406                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8407
8408                 /* Cursor plane is handled after stream updates */
8409                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8410                         continue;
8411
8412                 if (!fb || !crtc || pcrtc != crtc)
8413                         continue;
8414
8415                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8416                 if (!new_crtc_state->active)
8417                         continue;
8418
8419                 dc_plane = dm_new_plane_state->dc_state;
8420
8421                 bundle->surface_updates[planes_count].surface = dc_plane;
8422                 if (new_pcrtc_state->color_mgmt_changed) {
8423                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8424                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8425                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8426                 }
8427
8428                 fill_dc_scaling_info(new_plane_state,
8429                                      &bundle->scaling_infos[planes_count]);
8430
8431                 bundle->surface_updates[planes_count].scaling_info =
8432                         &bundle->scaling_infos[planes_count];
8433
8434                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8435
8436                 pflip_present = pflip_present || plane_needs_flip;
8437
8438                 if (!plane_needs_flip) {
8439                         planes_count += 1;
8440                         continue;
8441                 }
8442
8443                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8444
8445                 /*
8446                  * Wait for all fences on this FB. Do limited wait to avoid
8447                  * deadlock during GPU reset when this fence will not signal
8448                  * but we hold reservation lock for the BO.
8449                  */
8450                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8451                                                         false,
8452                                                         msecs_to_jiffies(5000));
8453                 if (unlikely(r <= 0))
8454                         DRM_ERROR("Waiting for fences timed out!");
8455
8456                 fill_dc_plane_info_and_addr(
8457                         dm->adev, new_plane_state,
8458                         afb->tiling_flags,
8459                         &bundle->plane_infos[planes_count],
8460                         &bundle->flip_addrs[planes_count].address,
8461                         afb->tmz_surface, false);
8462
8463                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8464                                  new_plane_state->plane->index,
8465                                  bundle->plane_infos[planes_count].dcc.enable);
8466
8467                 bundle->surface_updates[planes_count].plane_info =
8468                         &bundle->plane_infos[planes_count];
8469
8470                 /*
8471                  * Only allow immediate flips for fast updates that don't
8472                  * change FB pitch, DCC state, rotation or mirroing.
8473                  */
8474                 bundle->flip_addrs[planes_count].flip_immediate =
8475                         crtc->state->async_flip &&
8476                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8477
8478                 timestamp_ns = ktime_get_ns();
8479                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8480                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8481                 bundle->surface_updates[planes_count].surface = dc_plane;
8482
8483                 if (!bundle->surface_updates[planes_count].surface) {
8484                         DRM_ERROR("No surface for CRTC: id=%d\n",
8485                                         acrtc_attach->crtc_id);
8486                         continue;
8487                 }
8488
8489                 if (plane == pcrtc->primary)
8490                         update_freesync_state_on_stream(
8491                                 dm,
8492                                 acrtc_state,
8493                                 acrtc_state->stream,
8494                                 dc_plane,
8495                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8496
8497                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8498                                  __func__,
8499                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8500                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8501
8502                 planes_count += 1;
8503
8504         }
8505
8506         if (pflip_present) {
8507                 if (!vrr_active) {
8508                         /* Use old throttling in non-vrr fixed refresh rate mode
8509                          * to keep flip scheduling based on target vblank counts
8510                          * working in a backwards compatible way, e.g., for
8511                          * clients using the GLX_OML_sync_control extension or
8512                          * DRI3/Present extension with defined target_msc.
8513                          */
8514                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8515                 }
8516                 else {
8517                         /* For variable refresh rate mode only:
8518                          * Get vblank of last completed flip to avoid > 1 vrr
8519                          * flips per video frame by use of throttling, but allow
8520                          * flip programming anywhere in the possibly large
8521                          * variable vrr vblank interval for fine-grained flip
8522                          * timing control and more opportunity to avoid stutter
8523                          * on late submission of flips.
8524                          */
8525                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8526                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8527                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8528                 }
8529
8530                 target_vblank = last_flip_vblank + wait_for_vblank;
8531
8532                 /*
8533                  * Wait until we're out of the vertical blank period before the one
8534                  * targeted by the flip
8535                  */
8536                 while ((acrtc_attach->enabled &&
8537                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8538                                                             0, &vpos, &hpos, NULL,
8539                                                             NULL, &pcrtc->hwmode)
8540                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8541                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8542                         (int)(target_vblank -
8543                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8544                         usleep_range(1000, 1100);
8545                 }
8546
8547                 /**
8548                  * Prepare the flip event for the pageflip interrupt to handle.
8549                  *
8550                  * This only works in the case where we've already turned on the
8551                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8552                  * from 0 -> n planes we have to skip a hardware generated event
8553                  * and rely on sending it from software.
8554                  */
8555                 if (acrtc_attach->base.state->event &&
8556                     acrtc_state->active_planes > 0) {
8557                         drm_crtc_vblank_get(pcrtc);
8558
8559                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8560
8561                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8562                         prepare_flip_isr(acrtc_attach);
8563
8564                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8565                 }
8566
8567                 if (acrtc_state->stream) {
8568                         if (acrtc_state->freesync_vrr_info_changed)
8569                                 bundle->stream_update.vrr_infopacket =
8570                                         &acrtc_state->stream->vrr_infopacket;
8571                 }
8572         }
8573
8574         /* Update the planes if changed or disable if we don't have any. */
8575         if ((planes_count || acrtc_state->active_planes == 0) &&
8576                 acrtc_state->stream) {
8577                 bundle->stream_update.stream = acrtc_state->stream;
8578                 if (new_pcrtc_state->mode_changed) {
8579                         bundle->stream_update.src = acrtc_state->stream->src;
8580                         bundle->stream_update.dst = acrtc_state->stream->dst;
8581                 }
8582
8583                 if (new_pcrtc_state->color_mgmt_changed) {
8584                         /*
8585                          * TODO: This isn't fully correct since we've actually
8586                          * already modified the stream in place.
8587                          */
8588                         bundle->stream_update.gamut_remap =
8589                                 &acrtc_state->stream->gamut_remap_matrix;
8590                         bundle->stream_update.output_csc_transform =
8591                                 &acrtc_state->stream->csc_color_matrix;
8592                         bundle->stream_update.out_transfer_func =
8593                                 acrtc_state->stream->out_transfer_func;
8594                 }
8595
8596                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8597                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8598                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8599
8600                 /*
8601                  * If FreeSync state on the stream has changed then we need to
8602                  * re-adjust the min/max bounds now that DC doesn't handle this
8603                  * as part of commit.
8604                  */
8605                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8606                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8607                         dc_stream_adjust_vmin_vmax(
8608                                 dm->dc, acrtc_state->stream,
8609                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8610                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8611                 }
8612                 mutex_lock(&dm->dc_lock);
8613                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8614                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8615                         amdgpu_dm_psr_disable(acrtc_state->stream);
8616
8617                 dc_commit_updates_for_stream(dm->dc,
8618                                                      bundle->surface_updates,
8619                                                      planes_count,
8620                                                      acrtc_state->stream,
8621                                                      &bundle->stream_update,
8622                                                      dc_state);
8623
8624                 /**
8625                  * Enable or disable the interrupts on the backend.
8626                  *
8627                  * Most pipes are put into power gating when unused.
8628                  *
8629                  * When power gating is enabled on a pipe we lose the
8630                  * interrupt enablement state when power gating is disabled.
8631                  *
8632                  * So we need to update the IRQ control state in hardware
8633                  * whenever the pipe turns on (since it could be previously
8634                  * power gated) or off (since some pipes can't be power gated
8635                  * on some ASICs).
8636                  */
8637                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8638                         dm_update_pflip_irq_state(drm_to_adev(dev),
8639                                                   acrtc_attach);
8640
8641                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8642                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8643                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8644                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8645                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8646                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8647                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8648                         amdgpu_dm_psr_enable(acrtc_state->stream);
8649                 }
8650
8651                 mutex_unlock(&dm->dc_lock);
8652         }
8653
8654         /*
8655          * Update cursor state *after* programming all the planes.
8656          * This avoids redundant programming in the case where we're going
8657          * to be disabling a single plane - those pipes are being disabled.
8658          */
8659         if (acrtc_state->active_planes)
8660                 amdgpu_dm_commit_cursors(state);
8661
8662 cleanup:
8663         kfree(bundle);
8664 }
8665
8666 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8667                                    struct drm_atomic_state *state)
8668 {
8669         struct amdgpu_device *adev = drm_to_adev(dev);
8670         struct amdgpu_dm_connector *aconnector;
8671         struct drm_connector *connector;
8672         struct drm_connector_state *old_con_state, *new_con_state;
8673         struct drm_crtc_state *new_crtc_state;
8674         struct dm_crtc_state *new_dm_crtc_state;
8675         const struct dc_stream_status *status;
8676         int i, inst;
8677
8678         /* Notify device removals. */
8679         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8680                 if (old_con_state->crtc != new_con_state->crtc) {
8681                         /* CRTC changes require notification. */
8682                         goto notify;
8683                 }
8684
8685                 if (!new_con_state->crtc)
8686                         continue;
8687
8688                 new_crtc_state = drm_atomic_get_new_crtc_state(
8689                         state, new_con_state->crtc);
8690
8691                 if (!new_crtc_state)
8692                         continue;
8693
8694                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8695                         continue;
8696
8697         notify:
8698                 aconnector = to_amdgpu_dm_connector(connector);
8699
8700                 mutex_lock(&adev->dm.audio_lock);
8701                 inst = aconnector->audio_inst;
8702                 aconnector->audio_inst = -1;
8703                 mutex_unlock(&adev->dm.audio_lock);
8704
8705                 amdgpu_dm_audio_eld_notify(adev, inst);
8706         }
8707
8708         /* Notify audio device additions. */
8709         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8710                 if (!new_con_state->crtc)
8711                         continue;
8712
8713                 new_crtc_state = drm_atomic_get_new_crtc_state(
8714                         state, new_con_state->crtc);
8715
8716                 if (!new_crtc_state)
8717                         continue;
8718
8719                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8720                         continue;
8721
8722                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8723                 if (!new_dm_crtc_state->stream)
8724                         continue;
8725
8726                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8727                 if (!status)
8728                         continue;
8729
8730                 aconnector = to_amdgpu_dm_connector(connector);
8731
8732                 mutex_lock(&adev->dm.audio_lock);
8733                 inst = status->audio_inst;
8734                 aconnector->audio_inst = inst;
8735                 mutex_unlock(&adev->dm.audio_lock);
8736
8737                 amdgpu_dm_audio_eld_notify(adev, inst);
8738         }
8739 }
8740
8741 /*
8742  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8743  * @crtc_state: the DRM CRTC state
8744  * @stream_state: the DC stream state.
8745  *
8746  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8747  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8748  */
8749 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8750                                                 struct dc_stream_state *stream_state)
8751 {
8752         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8753 }
8754
8755 /**
8756  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8757  * @state: The atomic state to commit
8758  *
8759  * This will tell DC to commit the constructed DC state from atomic_check,
8760  * programming the hardware. Any failures here implies a hardware failure, since
8761  * atomic check should have filtered anything non-kosher.
8762  */
8763 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8764 {
8765         struct drm_device *dev = state->dev;
8766         struct amdgpu_device *adev = drm_to_adev(dev);
8767         struct amdgpu_display_manager *dm = &adev->dm;
8768         struct dm_atomic_state *dm_state;
8769         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8770         uint32_t i, j;
8771         struct drm_crtc *crtc;
8772         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8773         unsigned long flags;
8774         bool wait_for_vblank = true;
8775         struct drm_connector *connector;
8776         struct drm_connector_state *old_con_state, *new_con_state;
8777         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8778         int crtc_disable_count = 0;
8779         bool mode_set_reset_required = false;
8780
8781         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8782
8783         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8784
8785         dm_state = dm_atomic_get_new_state(state);
8786         if (dm_state && dm_state->context) {
8787                 dc_state = dm_state->context;
8788         } else {
8789                 /* No state changes, retain current state. */
8790                 dc_state_temp = dc_create_state(dm->dc);
8791                 ASSERT(dc_state_temp);
8792                 dc_state = dc_state_temp;
8793                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8794         }
8795
8796         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8797                                        new_crtc_state, i) {
8798                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8799
8800                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8801
8802                 if (old_crtc_state->active &&
8803                     (!new_crtc_state->active ||
8804                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8805                         manage_dm_interrupts(adev, acrtc, false);
8806                         dc_stream_release(dm_old_crtc_state->stream);
8807                 }
8808         }
8809
8810         drm_atomic_helper_calc_timestamping_constants(state);
8811
8812         /* update changed items */
8813         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8814                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8815
8816                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8817                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8818
8819                 DRM_DEBUG_ATOMIC(
8820                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8821                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8822                         "connectors_changed:%d\n",
8823                         acrtc->crtc_id,
8824                         new_crtc_state->enable,
8825                         new_crtc_state->active,
8826                         new_crtc_state->planes_changed,
8827                         new_crtc_state->mode_changed,
8828                         new_crtc_state->active_changed,
8829                         new_crtc_state->connectors_changed);
8830
8831                 /* Disable cursor if disabling crtc */
8832                 if (old_crtc_state->active && !new_crtc_state->active) {
8833                         struct dc_cursor_position position;
8834
8835                         memset(&position, 0, sizeof(position));
8836                         mutex_lock(&dm->dc_lock);
8837                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8838                         mutex_unlock(&dm->dc_lock);
8839                 }
8840
8841                 /* Copy all transient state flags into dc state */
8842                 if (dm_new_crtc_state->stream) {
8843                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8844                                                             dm_new_crtc_state->stream);
8845                 }
8846
8847                 /* handles headless hotplug case, updating new_state and
8848                  * aconnector as needed
8849                  */
8850
8851                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8852
8853                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8854
8855                         if (!dm_new_crtc_state->stream) {
8856                                 /*
8857                                  * this could happen because of issues with
8858                                  * userspace notifications delivery.
8859                                  * In this case userspace tries to set mode on
8860                                  * display which is disconnected in fact.
8861                                  * dc_sink is NULL in this case on aconnector.
8862                                  * We expect reset mode will come soon.
8863                                  *
8864                                  * This can also happen when unplug is done
8865                                  * during resume sequence ended
8866                                  *
8867                                  * In this case, we want to pretend we still
8868                                  * have a sink to keep the pipe running so that
8869                                  * hw state is consistent with the sw state
8870                                  */
8871                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8872                                                 __func__, acrtc->base.base.id);
8873                                 continue;
8874                         }
8875
8876                         if (dm_old_crtc_state->stream)
8877                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8878
8879                         pm_runtime_get_noresume(dev->dev);
8880
8881                         acrtc->enabled = true;
8882                         acrtc->hw_mode = new_crtc_state->mode;
8883                         crtc->hwmode = new_crtc_state->mode;
8884                         mode_set_reset_required = true;
8885                 } else if (modereset_required(new_crtc_state)) {
8886                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8887                         /* i.e. reset mode */
8888                         if (dm_old_crtc_state->stream)
8889                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8890
8891                         mode_set_reset_required = true;
8892                 }
8893         } /* for_each_crtc_in_state() */
8894
8895         if (dc_state) {
8896                 /* if there mode set or reset, disable eDP PSR */
8897                 if (mode_set_reset_required)
8898                         amdgpu_dm_psr_disable_all(dm);
8899
8900                 dm_enable_per_frame_crtc_master_sync(dc_state);
8901                 mutex_lock(&dm->dc_lock);
8902                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8903 #if defined(CONFIG_DRM_AMD_DC_DCN)
8904                /* Allow idle optimization when vblank count is 0 for display off */
8905                if (dm->active_vblank_irq_count == 0)
8906                    dc_allow_idle_optimizations(dm->dc,true);
8907 #endif
8908                 mutex_unlock(&dm->dc_lock);
8909         }
8910
8911         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8912                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8913
8914                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8915
8916                 if (dm_new_crtc_state->stream != NULL) {
8917                         const struct dc_stream_status *status =
8918                                         dc_stream_get_status(dm_new_crtc_state->stream);
8919
8920                         if (!status)
8921                                 status = dc_stream_get_status_from_state(dc_state,
8922                                                                          dm_new_crtc_state->stream);
8923                         if (!status)
8924                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8925                         else
8926                                 acrtc->otg_inst = status->primary_otg_inst;
8927                 }
8928         }
8929 #ifdef CONFIG_DRM_AMD_DC_HDCP
8930         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8931                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8932                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8933                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8934
8935                 new_crtc_state = NULL;
8936
8937                 if (acrtc)
8938                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8939
8940                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8941
8942                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8943                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8944                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8945                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8946                         dm_new_con_state->update_hdcp = true;
8947                         continue;
8948                 }
8949
8950                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8951                         hdcp_update_display(
8952                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8953                                 new_con_state->hdcp_content_type,
8954                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8955         }
8956 #endif
8957
8958         /* Handle connector state changes */
8959         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8960                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8961                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8962                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8963                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8964                 struct dc_stream_update stream_update;
8965                 struct dc_info_packet hdr_packet;
8966                 struct dc_stream_status *status = NULL;
8967                 bool abm_changed, hdr_changed, scaling_changed;
8968
8969                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8970                 memset(&stream_update, 0, sizeof(stream_update));
8971
8972                 if (acrtc) {
8973                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8974                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8975                 }
8976
8977                 /* Skip any modesets/resets */
8978                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8979                         continue;
8980
8981                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8982                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8983
8984                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8985                                                              dm_old_con_state);
8986
8987                 abm_changed = dm_new_crtc_state->abm_level !=
8988                               dm_old_crtc_state->abm_level;
8989
8990                 hdr_changed =
8991                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8992
8993                 if (!scaling_changed && !abm_changed && !hdr_changed)
8994                         continue;
8995
8996                 stream_update.stream = dm_new_crtc_state->stream;
8997                 if (scaling_changed) {
8998                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8999                                         dm_new_con_state, dm_new_crtc_state->stream);
9000
9001                         stream_update.src = dm_new_crtc_state->stream->src;
9002                         stream_update.dst = dm_new_crtc_state->stream->dst;
9003                 }
9004
9005                 if (abm_changed) {
9006                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9007
9008                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9009                 }
9010
9011                 if (hdr_changed) {
9012                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9013                         stream_update.hdr_static_metadata = &hdr_packet;
9014                 }
9015
9016                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9017
9018                 if (WARN_ON(!status))
9019                         continue;
9020
9021                 WARN_ON(!status->plane_count);
9022
9023                 /*
9024                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9025                  * Here we create an empty update on each plane.
9026                  * To fix this, DC should permit updating only stream properties.
9027                  */
9028                 for (j = 0; j < status->plane_count; j++)
9029                         dummy_updates[j].surface = status->plane_states[0];
9030
9031
9032                 mutex_lock(&dm->dc_lock);
9033                 dc_commit_updates_for_stream(dm->dc,
9034                                                      dummy_updates,
9035                                                      status->plane_count,
9036                                                      dm_new_crtc_state->stream,
9037                                                      &stream_update,
9038                                                      dc_state);
9039                 mutex_unlock(&dm->dc_lock);
9040         }
9041
9042         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9043         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9044                                       new_crtc_state, i) {
9045                 if (old_crtc_state->active && !new_crtc_state->active)
9046                         crtc_disable_count++;
9047
9048                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9049                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9050
9051                 /* For freesync config update on crtc state and params for irq */
9052                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9053
9054                 /* Handle vrr on->off / off->on transitions */
9055                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9056                                                 dm_new_crtc_state);
9057         }
9058
9059         /**
9060          * Enable interrupts for CRTCs that are newly enabled or went through
9061          * a modeset. It was intentionally deferred until after the front end
9062          * state was modified to wait until the OTG was on and so the IRQ
9063          * handlers didn't access stale or invalid state.
9064          */
9065         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9066                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9067 #ifdef CONFIG_DEBUG_FS
9068                 bool configure_crc = false;
9069                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9070 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9071                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9072 #endif
9073                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9074                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9075                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9076 #endif
9077                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9078
9079                 if (new_crtc_state->active &&
9080                     (!old_crtc_state->active ||
9081                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9082                         dc_stream_retain(dm_new_crtc_state->stream);
9083                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9084                         manage_dm_interrupts(adev, acrtc, true);
9085
9086 #ifdef CONFIG_DEBUG_FS
9087                         /**
9088                          * Frontend may have changed so reapply the CRC capture
9089                          * settings for the stream.
9090                          */
9091                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9092
9093                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9094                                 configure_crc = true;
9095 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9096                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9097                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9098                                         acrtc->dm_irq_params.crc_window.update_win = true;
9099                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9100                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9101                                         crc_rd_wrk->crtc = crtc;
9102                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9103                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9104                                 }
9105 #endif
9106                         }
9107
9108                         if (configure_crc)
9109                                 if (amdgpu_dm_crtc_configure_crc_source(
9110                                         crtc, dm_new_crtc_state, cur_crc_src))
9111                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9112 #endif
9113                 }
9114         }
9115
9116         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9117                 if (new_crtc_state->async_flip)
9118                         wait_for_vblank = false;
9119
9120         /* update planes when needed per crtc*/
9121         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9122                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9123
9124                 if (dm_new_crtc_state->stream)
9125                         amdgpu_dm_commit_planes(state, dc_state, dev,
9126                                                 dm, crtc, wait_for_vblank);
9127         }
9128
9129         /* Update audio instances for each connector. */
9130         amdgpu_dm_commit_audio(dev, state);
9131
9132 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9133         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9134         /* restore the backlight level */
9135         if (dm->backlight_dev)
9136                 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9137 #endif
9138         /*
9139          * send vblank event on all events not handled in flip and
9140          * mark consumed event for drm_atomic_helper_commit_hw_done
9141          */
9142         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9143         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9144
9145                 if (new_crtc_state->event)
9146                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9147
9148                 new_crtc_state->event = NULL;
9149         }
9150         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9151
9152         /* Signal HW programming completion */
9153         drm_atomic_helper_commit_hw_done(state);
9154
9155         if (wait_for_vblank)
9156                 drm_atomic_helper_wait_for_flip_done(dev, state);
9157
9158         drm_atomic_helper_cleanup_planes(dev, state);
9159
9160         /* return the stolen vga memory back to VRAM */
9161         if (!adev->mman.keep_stolen_vga_memory)
9162                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9163         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9164
9165         /*
9166          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9167          * so we can put the GPU into runtime suspend if we're not driving any
9168          * displays anymore
9169          */
9170         for (i = 0; i < crtc_disable_count; i++)
9171                 pm_runtime_put_autosuspend(dev->dev);
9172         pm_runtime_mark_last_busy(dev->dev);
9173
9174         if (dc_state_temp)
9175                 dc_release_state(dc_state_temp);
9176 }
9177
9178
9179 static int dm_force_atomic_commit(struct drm_connector *connector)
9180 {
9181         int ret = 0;
9182         struct drm_device *ddev = connector->dev;
9183         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9184         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9185         struct drm_plane *plane = disconnected_acrtc->base.primary;
9186         struct drm_connector_state *conn_state;
9187         struct drm_crtc_state *crtc_state;
9188         struct drm_plane_state *plane_state;
9189
9190         if (!state)
9191                 return -ENOMEM;
9192
9193         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9194
9195         /* Construct an atomic state to restore previous display setting */
9196
9197         /*
9198          * Attach connectors to drm_atomic_state
9199          */
9200         conn_state = drm_atomic_get_connector_state(state, connector);
9201
9202         ret = PTR_ERR_OR_ZERO(conn_state);
9203         if (ret)
9204                 goto out;
9205
9206         /* Attach crtc to drm_atomic_state*/
9207         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9208
9209         ret = PTR_ERR_OR_ZERO(crtc_state);
9210         if (ret)
9211                 goto out;
9212
9213         /* force a restore */
9214         crtc_state->mode_changed = true;
9215
9216         /* Attach plane to drm_atomic_state */
9217         plane_state = drm_atomic_get_plane_state(state, plane);
9218
9219         ret = PTR_ERR_OR_ZERO(plane_state);
9220         if (ret)
9221                 goto out;
9222
9223         /* Call commit internally with the state we just constructed */
9224         ret = drm_atomic_commit(state);
9225
9226 out:
9227         drm_atomic_state_put(state);
9228         if (ret)
9229                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9230
9231         return ret;
9232 }
9233
9234 /*
9235  * This function handles all cases when set mode does not come upon hotplug.
9236  * This includes when a display is unplugged then plugged back into the
9237  * same port and when running without usermode desktop manager supprot
9238  */
9239 void dm_restore_drm_connector_state(struct drm_device *dev,
9240                                     struct drm_connector *connector)
9241 {
9242         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9243         struct amdgpu_crtc *disconnected_acrtc;
9244         struct dm_crtc_state *acrtc_state;
9245
9246         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9247                 return;
9248
9249         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9250         if (!disconnected_acrtc)
9251                 return;
9252
9253         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9254         if (!acrtc_state->stream)
9255                 return;
9256
9257         /*
9258          * If the previous sink is not released and different from the current,
9259          * we deduce we are in a state where we can not rely on usermode call
9260          * to turn on the display, so we do it here
9261          */
9262         if (acrtc_state->stream->sink != aconnector->dc_sink)
9263                 dm_force_atomic_commit(&aconnector->base);
9264 }
9265
9266 /*
9267  * Grabs all modesetting locks to serialize against any blocking commits,
9268  * Waits for completion of all non blocking commits.
9269  */
9270 static int do_aquire_global_lock(struct drm_device *dev,
9271                                  struct drm_atomic_state *state)
9272 {
9273         struct drm_crtc *crtc;
9274         struct drm_crtc_commit *commit;
9275         long ret;
9276
9277         /*
9278          * Adding all modeset locks to aquire_ctx will
9279          * ensure that when the framework release it the
9280          * extra locks we are locking here will get released to
9281          */
9282         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9283         if (ret)
9284                 return ret;
9285
9286         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9287                 spin_lock(&crtc->commit_lock);
9288                 commit = list_first_entry_or_null(&crtc->commit_list,
9289                                 struct drm_crtc_commit, commit_entry);
9290                 if (commit)
9291                         drm_crtc_commit_get(commit);
9292                 spin_unlock(&crtc->commit_lock);
9293
9294                 if (!commit)
9295                         continue;
9296
9297                 /*
9298                  * Make sure all pending HW programming completed and
9299                  * page flips done
9300                  */
9301                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9302
9303                 if (ret > 0)
9304                         ret = wait_for_completion_interruptible_timeout(
9305                                         &commit->flip_done, 10*HZ);
9306
9307                 if (ret == 0)
9308                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9309                                   "timed out\n", crtc->base.id, crtc->name);
9310
9311                 drm_crtc_commit_put(commit);
9312         }
9313
9314         return ret < 0 ? ret : 0;
9315 }
9316
9317 static void get_freesync_config_for_crtc(
9318         struct dm_crtc_state *new_crtc_state,
9319         struct dm_connector_state *new_con_state)
9320 {
9321         struct mod_freesync_config config = {0};
9322         struct amdgpu_dm_connector *aconnector =
9323                         to_amdgpu_dm_connector(new_con_state->base.connector);
9324         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9325         int vrefresh = drm_mode_vrefresh(mode);
9326         bool fs_vid_mode = false;
9327
9328         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9329                                         vrefresh >= aconnector->min_vfreq &&
9330                                         vrefresh <= aconnector->max_vfreq;
9331
9332         if (new_crtc_state->vrr_supported) {
9333                 new_crtc_state->stream->ignore_msa_timing_param = true;
9334                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9335
9336                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9337                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9338                 config.vsif_supported = true;
9339                 config.btr = true;
9340
9341                 if (fs_vid_mode) {
9342                         config.state = VRR_STATE_ACTIVE_FIXED;
9343                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9344                         goto out;
9345                 } else if (new_crtc_state->base.vrr_enabled) {
9346                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9347                 } else {
9348                         config.state = VRR_STATE_INACTIVE;
9349                 }
9350         }
9351 out:
9352         new_crtc_state->freesync_config = config;
9353 }
9354
9355 static void reset_freesync_config_for_crtc(
9356         struct dm_crtc_state *new_crtc_state)
9357 {
9358         new_crtc_state->vrr_supported = false;
9359
9360         memset(&new_crtc_state->vrr_infopacket, 0,
9361                sizeof(new_crtc_state->vrr_infopacket));
9362 }
9363
9364 static bool
9365 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9366                                  struct drm_crtc_state *new_crtc_state)
9367 {
9368         struct drm_display_mode old_mode, new_mode;
9369
9370         if (!old_crtc_state || !new_crtc_state)
9371                 return false;
9372
9373         old_mode = old_crtc_state->mode;
9374         new_mode = new_crtc_state->mode;
9375
9376         if (old_mode.clock       == new_mode.clock &&
9377             old_mode.hdisplay    == new_mode.hdisplay &&
9378             old_mode.vdisplay    == new_mode.vdisplay &&
9379             old_mode.htotal      == new_mode.htotal &&
9380             old_mode.vtotal      != new_mode.vtotal &&
9381             old_mode.hsync_start == new_mode.hsync_start &&
9382             old_mode.vsync_start != new_mode.vsync_start &&
9383             old_mode.hsync_end   == new_mode.hsync_end &&
9384             old_mode.vsync_end   != new_mode.vsync_end &&
9385             old_mode.hskew       == new_mode.hskew &&
9386             old_mode.vscan       == new_mode.vscan &&
9387             (old_mode.vsync_end - old_mode.vsync_start) ==
9388             (new_mode.vsync_end - new_mode.vsync_start))
9389                 return true;
9390
9391         return false;
9392 }
9393
9394 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9395         uint64_t num, den, res;
9396         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9397
9398         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9399
9400         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9401         den = (unsigned long long)new_crtc_state->mode.htotal *
9402               (unsigned long long)new_crtc_state->mode.vtotal;
9403
9404         res = div_u64(num, den);
9405         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9406 }
9407
9408 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9409                                 struct drm_atomic_state *state,
9410                                 struct drm_crtc *crtc,
9411                                 struct drm_crtc_state *old_crtc_state,
9412                                 struct drm_crtc_state *new_crtc_state,
9413                                 bool enable,
9414                                 bool *lock_and_validation_needed)
9415 {
9416         struct dm_atomic_state *dm_state = NULL;
9417         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9418         struct dc_stream_state *new_stream;
9419         int ret = 0;
9420
9421         /*
9422          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9423          * update changed items
9424          */
9425         struct amdgpu_crtc *acrtc = NULL;
9426         struct amdgpu_dm_connector *aconnector = NULL;
9427         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9428         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9429
9430         new_stream = NULL;
9431
9432         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9433         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9434         acrtc = to_amdgpu_crtc(crtc);
9435         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9436
9437         /* TODO This hack should go away */
9438         if (aconnector && enable) {
9439                 /* Make sure fake sink is created in plug-in scenario */
9440                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9441                                                             &aconnector->base);
9442                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9443                                                             &aconnector->base);
9444
9445                 if (IS_ERR(drm_new_conn_state)) {
9446                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9447                         goto fail;
9448                 }
9449
9450                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9451                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9452
9453                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9454                         goto skip_modeset;
9455
9456                 new_stream = create_validate_stream_for_sink(aconnector,
9457                                                              &new_crtc_state->mode,
9458                                                              dm_new_conn_state,
9459                                                              dm_old_crtc_state->stream);
9460
9461                 /*
9462                  * we can have no stream on ACTION_SET if a display
9463                  * was disconnected during S3, in this case it is not an
9464                  * error, the OS will be updated after detection, and
9465                  * will do the right thing on next atomic commit
9466                  */
9467
9468                 if (!new_stream) {
9469                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9470                                         __func__, acrtc->base.base.id);
9471                         ret = -ENOMEM;
9472                         goto fail;
9473                 }
9474
9475                 /*
9476                  * TODO: Check VSDB bits to decide whether this should
9477                  * be enabled or not.
9478                  */
9479                 new_stream->triggered_crtc_reset.enabled =
9480                         dm->force_timing_sync;
9481
9482                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9483
9484                 ret = fill_hdr_info_packet(drm_new_conn_state,
9485                                            &new_stream->hdr_static_metadata);
9486                 if (ret)
9487                         goto fail;
9488
9489                 /*
9490                  * If we already removed the old stream from the context
9491                  * (and set the new stream to NULL) then we can't reuse
9492                  * the old stream even if the stream and scaling are unchanged.
9493                  * We'll hit the BUG_ON and black screen.
9494                  *
9495                  * TODO: Refactor this function to allow this check to work
9496                  * in all conditions.
9497                  */
9498                 if (amdgpu_freesync_vid_mode &&
9499                     dm_new_crtc_state->stream &&
9500                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9501                         goto skip_modeset;
9502
9503                 if (dm_new_crtc_state->stream &&
9504                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9505                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9506                         new_crtc_state->mode_changed = false;
9507                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9508                                          new_crtc_state->mode_changed);
9509                 }
9510         }
9511
9512         /* mode_changed flag may get updated above, need to check again */
9513         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9514                 goto skip_modeset;
9515
9516         DRM_DEBUG_ATOMIC(
9517                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9518                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9519                 "connectors_changed:%d\n",
9520                 acrtc->crtc_id,
9521                 new_crtc_state->enable,
9522                 new_crtc_state->active,
9523                 new_crtc_state->planes_changed,
9524                 new_crtc_state->mode_changed,
9525                 new_crtc_state->active_changed,
9526                 new_crtc_state->connectors_changed);
9527
9528         /* Remove stream for any changed/disabled CRTC */
9529         if (!enable) {
9530
9531                 if (!dm_old_crtc_state->stream)
9532                         goto skip_modeset;
9533
9534                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9535                     is_timing_unchanged_for_freesync(new_crtc_state,
9536                                                      old_crtc_state)) {
9537                         new_crtc_state->mode_changed = false;
9538                         DRM_DEBUG_DRIVER(
9539                                 "Mode change not required for front porch change, "
9540                                 "setting mode_changed to %d",
9541                                 new_crtc_state->mode_changed);
9542
9543                         set_freesync_fixed_config(dm_new_crtc_state);
9544
9545                         goto skip_modeset;
9546                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9547                            is_freesync_video_mode(&new_crtc_state->mode,
9548                                                   aconnector)) {
9549                         set_freesync_fixed_config(dm_new_crtc_state);
9550                 }
9551
9552                 ret = dm_atomic_get_state(state, &dm_state);
9553                 if (ret)
9554                         goto fail;
9555
9556                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9557                                 crtc->base.id);
9558
9559                 /* i.e. reset mode */
9560                 if (dc_remove_stream_from_ctx(
9561                                 dm->dc,
9562                                 dm_state->context,
9563                                 dm_old_crtc_state->stream) != DC_OK) {
9564                         ret = -EINVAL;
9565                         goto fail;
9566                 }
9567
9568                 dc_stream_release(dm_old_crtc_state->stream);
9569                 dm_new_crtc_state->stream = NULL;
9570
9571                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9572
9573                 *lock_and_validation_needed = true;
9574
9575         } else {/* Add stream for any updated/enabled CRTC */
9576                 /*
9577                  * Quick fix to prevent NULL pointer on new_stream when
9578                  * added MST connectors not found in existing crtc_state in the chained mode
9579                  * TODO: need to dig out the root cause of that
9580                  */
9581                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9582                         goto skip_modeset;
9583
9584                 if (modereset_required(new_crtc_state))
9585                         goto skip_modeset;
9586
9587                 if (modeset_required(new_crtc_state, new_stream,
9588                                      dm_old_crtc_state->stream)) {
9589
9590                         WARN_ON(dm_new_crtc_state->stream);
9591
9592                         ret = dm_atomic_get_state(state, &dm_state);
9593                         if (ret)
9594                                 goto fail;
9595
9596                         dm_new_crtc_state->stream = new_stream;
9597
9598                         dc_stream_retain(new_stream);
9599
9600                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9601                                          crtc->base.id);
9602
9603                         if (dc_add_stream_to_ctx(
9604                                         dm->dc,
9605                                         dm_state->context,
9606                                         dm_new_crtc_state->stream) != DC_OK) {
9607                                 ret = -EINVAL;
9608                                 goto fail;
9609                         }
9610
9611                         *lock_and_validation_needed = true;
9612                 }
9613         }
9614
9615 skip_modeset:
9616         /* Release extra reference */
9617         if (new_stream)
9618                  dc_stream_release(new_stream);
9619
9620         /*
9621          * We want to do dc stream updates that do not require a
9622          * full modeset below.
9623          */
9624         if (!(enable && aconnector && new_crtc_state->active))
9625                 return 0;
9626         /*
9627          * Given above conditions, the dc state cannot be NULL because:
9628          * 1. We're in the process of enabling CRTCs (just been added
9629          *    to the dc context, or already is on the context)
9630          * 2. Has a valid connector attached, and
9631          * 3. Is currently active and enabled.
9632          * => The dc stream state currently exists.
9633          */
9634         BUG_ON(dm_new_crtc_state->stream == NULL);
9635
9636         /* Scaling or underscan settings */
9637         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9638                 update_stream_scaling_settings(
9639                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9640
9641         /* ABM settings */
9642         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9643
9644         /*
9645          * Color management settings. We also update color properties
9646          * when a modeset is needed, to ensure it gets reprogrammed.
9647          */
9648         if (dm_new_crtc_state->base.color_mgmt_changed ||
9649             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9650                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9651                 if (ret)
9652                         goto fail;
9653         }
9654
9655         /* Update Freesync settings. */
9656         get_freesync_config_for_crtc(dm_new_crtc_state,
9657                                      dm_new_conn_state);
9658
9659         return ret;
9660
9661 fail:
9662         if (new_stream)
9663                 dc_stream_release(new_stream);
9664         return ret;
9665 }
9666
9667 static bool should_reset_plane(struct drm_atomic_state *state,
9668                                struct drm_plane *plane,
9669                                struct drm_plane_state *old_plane_state,
9670                                struct drm_plane_state *new_plane_state)
9671 {
9672         struct drm_plane *other;
9673         struct drm_plane_state *old_other_state, *new_other_state;
9674         struct drm_crtc_state *new_crtc_state;
9675         int i;
9676
9677         /*
9678          * TODO: Remove this hack once the checks below are sufficient
9679          * enough to determine when we need to reset all the planes on
9680          * the stream.
9681          */
9682         if (state->allow_modeset)
9683                 return true;
9684
9685         /* Exit early if we know that we're adding or removing the plane. */
9686         if (old_plane_state->crtc != new_plane_state->crtc)
9687                 return true;
9688
9689         /* old crtc == new_crtc == NULL, plane not in context. */
9690         if (!new_plane_state->crtc)
9691                 return false;
9692
9693         new_crtc_state =
9694                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9695
9696         if (!new_crtc_state)
9697                 return true;
9698
9699         /* CRTC Degamma changes currently require us to recreate planes. */
9700         if (new_crtc_state->color_mgmt_changed)
9701                 return true;
9702
9703         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9704                 return true;
9705
9706         /*
9707          * If there are any new primary or overlay planes being added or
9708          * removed then the z-order can potentially change. To ensure
9709          * correct z-order and pipe acquisition the current DC architecture
9710          * requires us to remove and recreate all existing planes.
9711          *
9712          * TODO: Come up with a more elegant solution for this.
9713          */
9714         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9715                 struct amdgpu_framebuffer *old_afb, *new_afb;
9716                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9717                         continue;
9718
9719                 if (old_other_state->crtc != new_plane_state->crtc &&
9720                     new_other_state->crtc != new_plane_state->crtc)
9721                         continue;
9722
9723                 if (old_other_state->crtc != new_other_state->crtc)
9724                         return true;
9725
9726                 /* Src/dst size and scaling updates. */
9727                 if (old_other_state->src_w != new_other_state->src_w ||
9728                     old_other_state->src_h != new_other_state->src_h ||
9729                     old_other_state->crtc_w != new_other_state->crtc_w ||
9730                     old_other_state->crtc_h != new_other_state->crtc_h)
9731                         return true;
9732
9733                 /* Rotation / mirroring updates. */
9734                 if (old_other_state->rotation != new_other_state->rotation)
9735                         return true;
9736
9737                 /* Blending updates. */
9738                 if (old_other_state->pixel_blend_mode !=
9739                     new_other_state->pixel_blend_mode)
9740                         return true;
9741
9742                 /* Alpha updates. */
9743                 if (old_other_state->alpha != new_other_state->alpha)
9744                         return true;
9745
9746                 /* Colorspace changes. */
9747                 if (old_other_state->color_range != new_other_state->color_range ||
9748                     old_other_state->color_encoding != new_other_state->color_encoding)
9749                         return true;
9750
9751                 /* Framebuffer checks fall at the end. */
9752                 if (!old_other_state->fb || !new_other_state->fb)
9753                         continue;
9754
9755                 /* Pixel format changes can require bandwidth updates. */
9756                 if (old_other_state->fb->format != new_other_state->fb->format)
9757                         return true;
9758
9759                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9760                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9761
9762                 /* Tiling and DCC changes also require bandwidth updates. */
9763                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9764                     old_afb->base.modifier != new_afb->base.modifier)
9765                         return true;
9766         }
9767
9768         return false;
9769 }
9770
9771 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9772                               struct drm_plane_state *new_plane_state,
9773                               struct drm_framebuffer *fb)
9774 {
9775         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9776         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9777         unsigned int pitch;
9778         bool linear;
9779
9780         if (fb->width > new_acrtc->max_cursor_width ||
9781             fb->height > new_acrtc->max_cursor_height) {
9782                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9783                                  new_plane_state->fb->width,
9784                                  new_plane_state->fb->height);
9785                 return -EINVAL;
9786         }
9787         if (new_plane_state->src_w != fb->width << 16 ||
9788             new_plane_state->src_h != fb->height << 16) {
9789                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9790                 return -EINVAL;
9791         }
9792
9793         /* Pitch in pixels */
9794         pitch = fb->pitches[0] / fb->format->cpp[0];
9795
9796         if (fb->width != pitch) {
9797                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9798                                  fb->width, pitch);
9799                 return -EINVAL;
9800         }
9801
9802         switch (pitch) {
9803         case 64:
9804         case 128:
9805         case 256:
9806                 /* FB pitch is supported by cursor plane */
9807                 break;
9808         default:
9809                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9810                 return -EINVAL;
9811         }
9812
9813         /* Core DRM takes care of checking FB modifiers, so we only need to
9814          * check tiling flags when the FB doesn't have a modifier. */
9815         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9816                 if (adev->family < AMDGPU_FAMILY_AI) {
9817                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9818                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9819                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9820                 } else {
9821                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9822                 }
9823                 if (!linear) {
9824                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9825                         return -EINVAL;
9826                 }
9827         }
9828
9829         return 0;
9830 }
9831
9832 static int dm_update_plane_state(struct dc *dc,
9833                                  struct drm_atomic_state *state,
9834                                  struct drm_plane *plane,
9835                                  struct drm_plane_state *old_plane_state,
9836                                  struct drm_plane_state *new_plane_state,
9837                                  bool enable,
9838                                  bool *lock_and_validation_needed)
9839 {
9840
9841         struct dm_atomic_state *dm_state = NULL;
9842         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9843         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9844         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9845         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9846         struct amdgpu_crtc *new_acrtc;
9847         bool needs_reset;
9848         int ret = 0;
9849
9850
9851         new_plane_crtc = new_plane_state->crtc;
9852         old_plane_crtc = old_plane_state->crtc;
9853         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9854         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9855
9856         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9857                 if (!enable || !new_plane_crtc ||
9858                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9859                         return 0;
9860
9861                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9862
9863                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9864                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9865                         return -EINVAL;
9866                 }
9867
9868                 if (new_plane_state->fb) {
9869                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9870                                                  new_plane_state->fb);
9871                         if (ret)
9872                                 return ret;
9873                 }
9874
9875                 return 0;
9876         }
9877
9878         needs_reset = should_reset_plane(state, plane, old_plane_state,
9879                                          new_plane_state);
9880
9881         /* Remove any changed/removed planes */
9882         if (!enable) {
9883                 if (!needs_reset)
9884                         return 0;
9885
9886                 if (!old_plane_crtc)
9887                         return 0;
9888
9889                 old_crtc_state = drm_atomic_get_old_crtc_state(
9890                                 state, old_plane_crtc);
9891                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9892
9893                 if (!dm_old_crtc_state->stream)
9894                         return 0;
9895
9896                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9897                                 plane->base.id, old_plane_crtc->base.id);
9898
9899                 ret = dm_atomic_get_state(state, &dm_state);
9900                 if (ret)
9901                         return ret;
9902
9903                 if (!dc_remove_plane_from_context(
9904                                 dc,
9905                                 dm_old_crtc_state->stream,
9906                                 dm_old_plane_state->dc_state,
9907                                 dm_state->context)) {
9908
9909                         return -EINVAL;
9910                 }
9911
9912
9913                 dc_plane_state_release(dm_old_plane_state->dc_state);
9914                 dm_new_plane_state->dc_state = NULL;
9915
9916                 *lock_and_validation_needed = true;
9917
9918         } else { /* Add new planes */
9919                 struct dc_plane_state *dc_new_plane_state;
9920
9921                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9922                         return 0;
9923
9924                 if (!new_plane_crtc)
9925                         return 0;
9926
9927                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9928                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9929
9930                 if (!dm_new_crtc_state->stream)
9931                         return 0;
9932
9933                 if (!needs_reset)
9934                         return 0;
9935
9936                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9937                 if (ret)
9938                         return ret;
9939
9940                 WARN_ON(dm_new_plane_state->dc_state);
9941
9942                 dc_new_plane_state = dc_create_plane_state(dc);
9943                 if (!dc_new_plane_state)
9944                         return -ENOMEM;
9945
9946                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9947                                  plane->base.id, new_plane_crtc->base.id);
9948
9949                 ret = fill_dc_plane_attributes(
9950                         drm_to_adev(new_plane_crtc->dev),
9951                         dc_new_plane_state,
9952                         new_plane_state,
9953                         new_crtc_state);
9954                 if (ret) {
9955                         dc_plane_state_release(dc_new_plane_state);
9956                         return ret;
9957                 }
9958
9959                 ret = dm_atomic_get_state(state, &dm_state);
9960                 if (ret) {
9961                         dc_plane_state_release(dc_new_plane_state);
9962                         return ret;
9963                 }
9964
9965                 /*
9966                  * Any atomic check errors that occur after this will
9967                  * not need a release. The plane state will be attached
9968                  * to the stream, and therefore part of the atomic
9969                  * state. It'll be released when the atomic state is
9970                  * cleaned.
9971                  */
9972                 if (!dc_add_plane_to_context(
9973                                 dc,
9974                                 dm_new_crtc_state->stream,
9975                                 dc_new_plane_state,
9976                                 dm_state->context)) {
9977
9978                         dc_plane_state_release(dc_new_plane_state);
9979                         return -EINVAL;
9980                 }
9981
9982                 dm_new_plane_state->dc_state = dc_new_plane_state;
9983
9984                 /* Tell DC to do a full surface update every time there
9985                  * is a plane change. Inefficient, but works for now.
9986                  */
9987                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9988
9989                 *lock_and_validation_needed = true;
9990         }
9991
9992
9993         return ret;
9994 }
9995
9996 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9997                                 struct drm_crtc *crtc,
9998                                 struct drm_crtc_state *new_crtc_state)
9999 {
10000         struct drm_plane_state *new_cursor_state, *new_primary_state;
10001         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10002
10003         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10004          * cursor per pipe but it's going to inherit the scaling and
10005          * positioning from the underlying pipe. Check the cursor plane's
10006          * blending properties match the primary plane's. */
10007
10008         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10009         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10010         if (!new_cursor_state || !new_primary_state ||
10011             !new_cursor_state->fb || !new_primary_state->fb) {
10012                 return 0;
10013         }
10014
10015         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10016                          (new_cursor_state->src_w >> 16);
10017         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10018                          (new_cursor_state->src_h >> 16);
10019
10020         primary_scale_w = new_primary_state->crtc_w * 1000 /
10021                          (new_primary_state->src_w >> 16);
10022         primary_scale_h = new_primary_state->crtc_h * 1000 /
10023                          (new_primary_state->src_h >> 16);
10024
10025         if (cursor_scale_w != primary_scale_w ||
10026             cursor_scale_h != primary_scale_h) {
10027                 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10028                 return -EINVAL;
10029         }
10030
10031         return 0;
10032 }
10033
10034 #if defined(CONFIG_DRM_AMD_DC_DCN)
10035 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10036 {
10037         struct drm_connector *connector;
10038         struct drm_connector_state *conn_state;
10039         struct amdgpu_dm_connector *aconnector = NULL;
10040         int i;
10041         for_each_new_connector_in_state(state, connector, conn_state, i) {
10042                 if (conn_state->crtc != crtc)
10043                         continue;
10044
10045                 aconnector = to_amdgpu_dm_connector(connector);
10046                 if (!aconnector->port || !aconnector->mst_port)
10047                         aconnector = NULL;
10048                 else
10049                         break;
10050         }
10051
10052         if (!aconnector)
10053                 return 0;
10054
10055         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10056 }
10057 #endif
10058
10059 static int validate_overlay(struct drm_atomic_state *state)
10060 {
10061         int i;
10062         struct drm_plane *plane;
10063         struct drm_plane_state *new_plane_state;
10064         struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
10065
10066         /* Check if primary plane is contained inside overlay */
10067         for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10068                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10069                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10070                                 return 0;
10071
10072                         overlay_state = new_plane_state;
10073                         continue;
10074                 }
10075         }
10076
10077         /* check if we're making changes to the overlay plane */
10078         if (!overlay_state)
10079                 return 0;
10080
10081         /* check if overlay plane is enabled */
10082         if (!overlay_state->crtc)
10083                 return 0;
10084
10085         /* find the primary plane for the CRTC that the overlay is enabled on */
10086         primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10087         if (IS_ERR(primary_state))
10088                 return PTR_ERR(primary_state);
10089
10090         /* check if primary plane is enabled */
10091         if (!primary_state->crtc)
10092                 return 0;
10093
10094         /* check if cursor plane is enabled */
10095         cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10096         if (IS_ERR(cursor_state))
10097                 return PTR_ERR(cursor_state);
10098
10099         if (drm_atomic_plane_disabling(plane->state, cursor_state))
10100                 return 0;
10101
10102         /* Perform the bounds check to ensure the overlay plane covers the primary */
10103         if (primary_state->crtc_x < overlay_state->crtc_x ||
10104             primary_state->crtc_y < overlay_state->crtc_y ||
10105             primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10106             primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10107                 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10108                 return -EINVAL;
10109         }
10110
10111         return 0;
10112 }
10113
10114 /**
10115  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10116  * @dev: The DRM device
10117  * @state: The atomic state to commit
10118  *
10119  * Validate that the given atomic state is programmable by DC into hardware.
10120  * This involves constructing a &struct dc_state reflecting the new hardware
10121  * state we wish to commit, then querying DC to see if it is programmable. It's
10122  * important not to modify the existing DC state. Otherwise, atomic_check
10123  * may unexpectedly commit hardware changes.
10124  *
10125  * When validating the DC state, it's important that the right locks are
10126  * acquired. For full updates case which removes/adds/updates streams on one
10127  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10128  * that any such full update commit will wait for completion of any outstanding
10129  * flip using DRMs synchronization events.
10130  *
10131  * Note that DM adds the affected connectors for all CRTCs in state, when that
10132  * might not seem necessary. This is because DC stream creation requires the
10133  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10134  * be possible but non-trivial - a possible TODO item.
10135  *
10136  * Return: -Error code if validation failed.
10137  */
10138 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10139                                   struct drm_atomic_state *state)
10140 {
10141         struct amdgpu_device *adev = drm_to_adev(dev);
10142         struct dm_atomic_state *dm_state = NULL;
10143         struct dc *dc = adev->dm.dc;
10144         struct drm_connector *connector;
10145         struct drm_connector_state *old_con_state, *new_con_state;
10146         struct drm_crtc *crtc;
10147         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10148         struct drm_plane *plane;
10149         struct drm_plane_state *old_plane_state, *new_plane_state;
10150         enum dc_status status;
10151         int ret, i;
10152         bool lock_and_validation_needed = false;
10153         struct dm_crtc_state *dm_old_crtc_state;
10154
10155         trace_amdgpu_dm_atomic_check_begin(state);
10156
10157         ret = drm_atomic_helper_check_modeset(dev, state);
10158         if (ret)
10159                 goto fail;
10160
10161         /* Check connector changes */
10162         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10163                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10164                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10165
10166                 /* Skip connectors that are disabled or part of modeset already. */
10167                 if (!old_con_state->crtc && !new_con_state->crtc)
10168                         continue;
10169
10170                 if (!new_con_state->crtc)
10171                         continue;
10172
10173                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10174                 if (IS_ERR(new_crtc_state)) {
10175                         ret = PTR_ERR(new_crtc_state);
10176                         goto fail;
10177                 }
10178
10179                 if (dm_old_con_state->abm_level !=
10180                     dm_new_con_state->abm_level)
10181                         new_crtc_state->connectors_changed = true;
10182         }
10183
10184 #if defined(CONFIG_DRM_AMD_DC_DCN)
10185         if (dc_resource_is_dsc_encoding_supported(dc)) {
10186                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10187                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10188                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10189                                 if (ret)
10190                                         goto fail;
10191                         }
10192                 }
10193         }
10194 #endif
10195         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10196                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10197
10198                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10199                     !new_crtc_state->color_mgmt_changed &&
10200                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10201                         dm_old_crtc_state->dsc_force_changed == false)
10202                         continue;
10203
10204                 if (!new_crtc_state->enable)
10205                         continue;
10206
10207                 ret = drm_atomic_add_affected_connectors(state, crtc);
10208                 if (ret)
10209                         return ret;
10210
10211                 ret = drm_atomic_add_affected_planes(state, crtc);
10212                 if (ret)
10213                         goto fail;
10214
10215                 if (dm_old_crtc_state->dsc_force_changed)
10216                         new_crtc_state->mode_changed = true;
10217         }
10218
10219         /*
10220          * Add all primary and overlay planes on the CRTC to the state
10221          * whenever a plane is enabled to maintain correct z-ordering
10222          * and to enable fast surface updates.
10223          */
10224         drm_for_each_crtc(crtc, dev) {
10225                 bool modified = false;
10226
10227                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10228                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10229                                 continue;
10230
10231                         if (new_plane_state->crtc == crtc ||
10232                             old_plane_state->crtc == crtc) {
10233                                 modified = true;
10234                                 break;
10235                         }
10236                 }
10237
10238                 if (!modified)
10239                         continue;
10240
10241                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10242                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10243                                 continue;
10244
10245                         new_plane_state =
10246                                 drm_atomic_get_plane_state(state, plane);
10247
10248                         if (IS_ERR(new_plane_state)) {
10249                                 ret = PTR_ERR(new_plane_state);
10250                                 goto fail;
10251                         }
10252                 }
10253         }
10254
10255         /* Remove exiting planes if they are modified */
10256         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10257                 ret = dm_update_plane_state(dc, state, plane,
10258                                             old_plane_state,
10259                                             new_plane_state,
10260                                             false,
10261                                             &lock_and_validation_needed);
10262                 if (ret)
10263                         goto fail;
10264         }
10265
10266         /* Disable all crtcs which require disable */
10267         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10268                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10269                                            old_crtc_state,
10270                                            new_crtc_state,
10271                                            false,
10272                                            &lock_and_validation_needed);
10273                 if (ret)
10274                         goto fail;
10275         }
10276
10277         /* Enable all crtcs which require enable */
10278         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10279                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10280                                            old_crtc_state,
10281                                            new_crtc_state,
10282                                            true,
10283                                            &lock_and_validation_needed);
10284                 if (ret)
10285                         goto fail;
10286         }
10287
10288         ret = validate_overlay(state);
10289         if (ret)
10290                 goto fail;
10291
10292         /* Add new/modified planes */
10293         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10294                 ret = dm_update_plane_state(dc, state, plane,
10295                                             old_plane_state,
10296                                             new_plane_state,
10297                                             true,
10298                                             &lock_and_validation_needed);
10299                 if (ret)
10300                         goto fail;
10301         }
10302
10303         /* Run this here since we want to validate the streams we created */
10304         ret = drm_atomic_helper_check_planes(dev, state);
10305         if (ret)
10306                 goto fail;
10307
10308         /* Check cursor planes scaling */
10309         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10310                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10311                 if (ret)
10312                         goto fail;
10313         }
10314
10315         if (state->legacy_cursor_update) {
10316                 /*
10317                  * This is a fast cursor update coming from the plane update
10318                  * helper, check if it can be done asynchronously for better
10319                  * performance.
10320                  */
10321                 state->async_update =
10322                         !drm_atomic_helper_async_check(dev, state);
10323
10324                 /*
10325                  * Skip the remaining global validation if this is an async
10326                  * update. Cursor updates can be done without affecting
10327                  * state or bandwidth calcs and this avoids the performance
10328                  * penalty of locking the private state object and
10329                  * allocating a new dc_state.
10330                  */
10331                 if (state->async_update)
10332                         return 0;
10333         }
10334
10335         /* Check scaling and underscan changes*/
10336         /* TODO Removed scaling changes validation due to inability to commit
10337          * new stream into context w\o causing full reset. Need to
10338          * decide how to handle.
10339          */
10340         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10341                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10342                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10343                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10344
10345                 /* Skip any modesets/resets */
10346                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10347                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10348                         continue;
10349
10350                 /* Skip any thing not scale or underscan changes */
10351                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10352                         continue;
10353
10354                 lock_and_validation_needed = true;
10355         }
10356
10357         /**
10358          * Streams and planes are reset when there are changes that affect
10359          * bandwidth. Anything that affects bandwidth needs to go through
10360          * DC global validation to ensure that the configuration can be applied
10361          * to hardware.
10362          *
10363          * We have to currently stall out here in atomic_check for outstanding
10364          * commits to finish in this case because our IRQ handlers reference
10365          * DRM state directly - we can end up disabling interrupts too early
10366          * if we don't.
10367          *
10368          * TODO: Remove this stall and drop DM state private objects.
10369          */
10370         if (lock_and_validation_needed) {
10371                 ret = dm_atomic_get_state(state, &dm_state);
10372                 if (ret)
10373                         goto fail;
10374
10375                 ret = do_aquire_global_lock(dev, state);
10376                 if (ret)
10377                         goto fail;
10378
10379 #if defined(CONFIG_DRM_AMD_DC_DCN)
10380                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10381                         goto fail;
10382
10383                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10384                 if (ret)
10385                         goto fail;
10386 #endif
10387
10388                 /*
10389                  * Perform validation of MST topology in the state:
10390                  * We need to perform MST atomic check before calling
10391                  * dc_validate_global_state(), or there is a chance
10392                  * to get stuck in an infinite loop and hang eventually.
10393                  */
10394                 ret = drm_dp_mst_atomic_check(state);
10395                 if (ret)
10396                         goto fail;
10397                 status = dc_validate_global_state(dc, dm_state->context, false);
10398                 if (status != DC_OK) {
10399                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10400                                        dc_status_to_str(status), status);
10401                         ret = -EINVAL;
10402                         goto fail;
10403                 }
10404         } else {
10405                 /*
10406                  * The commit is a fast update. Fast updates shouldn't change
10407                  * the DC context, affect global validation, and can have their
10408                  * commit work done in parallel with other commits not touching
10409                  * the same resource. If we have a new DC context as part of
10410                  * the DM atomic state from validation we need to free it and
10411                  * retain the existing one instead.
10412                  *
10413                  * Furthermore, since the DM atomic state only contains the DC
10414                  * context and can safely be annulled, we can free the state
10415                  * and clear the associated private object now to free
10416                  * some memory and avoid a possible use-after-free later.
10417                  */
10418
10419                 for (i = 0; i < state->num_private_objs; i++) {
10420                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10421
10422                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10423                                 int j = state->num_private_objs-1;
10424
10425                                 dm_atomic_destroy_state(obj,
10426                                                 state->private_objs[i].state);
10427
10428                                 /* If i is not at the end of the array then the
10429                                  * last element needs to be moved to where i was
10430                                  * before the array can safely be truncated.
10431                                  */
10432                                 if (i != j)
10433                                         state->private_objs[i] =
10434                                                 state->private_objs[j];
10435
10436                                 state->private_objs[j].ptr = NULL;
10437                                 state->private_objs[j].state = NULL;
10438                                 state->private_objs[j].old_state = NULL;
10439                                 state->private_objs[j].new_state = NULL;
10440
10441                                 state->num_private_objs = j;
10442                                 break;
10443                         }
10444                 }
10445         }
10446
10447         /* Store the overall update type for use later in atomic check. */
10448         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10449                 struct dm_crtc_state *dm_new_crtc_state =
10450                         to_dm_crtc_state(new_crtc_state);
10451
10452                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10453                                                          UPDATE_TYPE_FULL :
10454                                                          UPDATE_TYPE_FAST;
10455         }
10456
10457         /* Must be success */
10458         WARN_ON(ret);
10459
10460         trace_amdgpu_dm_atomic_check_finish(state, ret);
10461
10462         return ret;
10463
10464 fail:
10465         if (ret == -EDEADLK)
10466                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10467         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10468                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10469         else
10470                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10471
10472         trace_amdgpu_dm_atomic_check_finish(state, ret);
10473
10474         return ret;
10475 }
10476
10477 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10478                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10479 {
10480         uint8_t dpcd_data;
10481         bool capable = false;
10482
10483         if (amdgpu_dm_connector->dc_link &&
10484                 dm_helpers_dp_read_dpcd(
10485                                 NULL,
10486                                 amdgpu_dm_connector->dc_link,
10487                                 DP_DOWN_STREAM_PORT_COUNT,
10488                                 &dpcd_data,
10489                                 sizeof(dpcd_data))) {
10490                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10491         }
10492
10493         return capable;
10494 }
10495
10496 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10497                 uint8_t *edid_ext, int len,
10498                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10499 {
10500         int i;
10501         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10502         struct dc *dc = adev->dm.dc;
10503
10504         /* send extension block to DMCU for parsing */
10505         for (i = 0; i < len; i += 8) {
10506                 bool res;
10507                 int offset;
10508
10509                 /* send 8 bytes a time */
10510                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10511                         return false;
10512
10513                 if (i+8 == len) {
10514                         /* EDID block sent completed, expect result */
10515                         int version, min_rate, max_rate;
10516
10517                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10518                         if (res) {
10519                                 /* amd vsdb found */
10520                                 vsdb_info->freesync_supported = 1;
10521                                 vsdb_info->amd_vsdb_version = version;
10522                                 vsdb_info->min_refresh_rate_hz = min_rate;
10523                                 vsdb_info->max_refresh_rate_hz = max_rate;
10524                                 return true;
10525                         }
10526                         /* not amd vsdb */
10527                         return false;
10528                 }
10529
10530                 /* check for ack*/
10531                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10532                 if (!res)
10533                         return false;
10534         }
10535
10536         return false;
10537 }
10538
10539 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10540                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10541 {
10542         uint8_t *edid_ext = NULL;
10543         int i;
10544         bool valid_vsdb_found = false;
10545
10546         /*----- drm_find_cea_extension() -----*/
10547         /* No EDID or EDID extensions */
10548         if (edid == NULL || edid->extensions == 0)
10549                 return -ENODEV;
10550
10551         /* Find CEA extension */
10552         for (i = 0; i < edid->extensions; i++) {
10553                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10554                 if (edid_ext[0] == CEA_EXT)
10555                         break;
10556         }
10557
10558         if (i == edid->extensions)
10559                 return -ENODEV;
10560
10561         /*----- cea_db_offsets() -----*/
10562         if (edid_ext[0] != CEA_EXT)
10563                 return -ENODEV;
10564
10565         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10566
10567         return valid_vsdb_found ? i : -ENODEV;
10568 }
10569
10570 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10571                                         struct edid *edid)
10572 {
10573         int i = 0;
10574         struct detailed_timing *timing;
10575         struct detailed_non_pixel *data;
10576         struct detailed_data_monitor_range *range;
10577         struct amdgpu_dm_connector *amdgpu_dm_connector =
10578                         to_amdgpu_dm_connector(connector);
10579         struct dm_connector_state *dm_con_state = NULL;
10580
10581         struct drm_device *dev = connector->dev;
10582         struct amdgpu_device *adev = drm_to_adev(dev);
10583         bool freesync_capable = false;
10584         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10585
10586         if (!connector->state) {
10587                 DRM_ERROR("%s - Connector has no state", __func__);
10588                 goto update;
10589         }
10590
10591         if (!edid) {
10592                 dm_con_state = to_dm_connector_state(connector->state);
10593
10594                 amdgpu_dm_connector->min_vfreq = 0;
10595                 amdgpu_dm_connector->max_vfreq = 0;
10596                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10597
10598                 goto update;
10599         }
10600
10601         dm_con_state = to_dm_connector_state(connector->state);
10602
10603         if (!amdgpu_dm_connector->dc_sink) {
10604                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10605                 goto update;
10606         }
10607         if (!adev->dm.freesync_module)
10608                 goto update;
10609
10610
10611         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10612                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10613                 bool edid_check_required = false;
10614
10615                 if (edid) {
10616                         edid_check_required = is_dp_capable_without_timing_msa(
10617                                                 adev->dm.dc,
10618                                                 amdgpu_dm_connector);
10619                 }
10620
10621                 if (edid_check_required == true && (edid->version > 1 ||
10622                    (edid->version == 1 && edid->revision > 1))) {
10623                         for (i = 0; i < 4; i++) {
10624
10625                                 timing  = &edid->detailed_timings[i];
10626                                 data    = &timing->data.other_data;
10627                                 range   = &data->data.range;
10628                                 /*
10629                                  * Check if monitor has continuous frequency mode
10630                                  */
10631                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10632                                         continue;
10633                                 /*
10634                                  * Check for flag range limits only. If flag == 1 then
10635                                  * no additional timing information provided.
10636                                  * Default GTF, GTF Secondary curve and CVT are not
10637                                  * supported
10638                                  */
10639                                 if (range->flags != 1)
10640                                         continue;
10641
10642                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10643                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10644                                 amdgpu_dm_connector->pixel_clock_mhz =
10645                                         range->pixel_clock_mhz * 10;
10646
10647                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10648                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10649
10650                                 break;
10651                         }
10652
10653                         if (amdgpu_dm_connector->max_vfreq -
10654                             amdgpu_dm_connector->min_vfreq > 10) {
10655
10656                                 freesync_capable = true;
10657                         }
10658                 }
10659         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10660                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10661                 if (i >= 0 && vsdb_info.freesync_supported) {
10662                         timing  = &edid->detailed_timings[i];
10663                         data    = &timing->data.other_data;
10664
10665                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10666                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10667                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10668                                 freesync_capable = true;
10669
10670                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10671                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10672                 }
10673         }
10674
10675 update:
10676         if (dm_con_state)
10677                 dm_con_state->freesync_capable = freesync_capable;
10678
10679         if (connector->vrr_capable_property)
10680                 drm_connector_set_vrr_capable_property(connector,
10681                                                        freesync_capable);
10682 }
10683
10684 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10685 {
10686         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10687
10688         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10689                 return;
10690         if (link->type == dc_connection_none)
10691                 return;
10692         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10693                                         dpcd_data, sizeof(dpcd_data))) {
10694                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10695
10696                 if (dpcd_data[0] == 0) {
10697                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10698                         link->psr_settings.psr_feature_enabled = false;
10699                 } else {
10700                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10701                         link->psr_settings.psr_feature_enabled = true;
10702                 }
10703
10704                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10705         }
10706 }
10707
10708 /*
10709  * amdgpu_dm_link_setup_psr() - configure psr link
10710  * @stream: stream state
10711  *
10712  * Return: true if success
10713  */
10714 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10715 {
10716         struct dc_link *link = NULL;
10717         struct psr_config psr_config = {0};
10718         struct psr_context psr_context = {0};
10719         bool ret = false;
10720
10721         if (stream == NULL)
10722                 return false;
10723
10724         link = stream->link;
10725
10726         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10727
10728         if (psr_config.psr_version > 0) {
10729                 psr_config.psr_exit_link_training_required = 0x1;
10730                 psr_config.psr_frame_capture_indication_req = 0;
10731                 psr_config.psr_rfb_setup_time = 0x37;
10732                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10733                 psr_config.allow_smu_optimizations = 0x0;
10734
10735                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10736
10737         }
10738         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10739
10740         return ret;
10741 }
10742
10743 /*
10744  * amdgpu_dm_psr_enable() - enable psr f/w
10745  * @stream: stream state
10746  *
10747  * Return: true if success
10748  */
10749 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10750 {
10751         struct dc_link *link = stream->link;
10752         unsigned int vsync_rate_hz = 0;
10753         struct dc_static_screen_params params = {0};
10754         /* Calculate number of static frames before generating interrupt to
10755          * enter PSR.
10756          */
10757         // Init fail safe of 2 frames static
10758         unsigned int num_frames_static = 2;
10759
10760         DRM_DEBUG_DRIVER("Enabling psr...\n");
10761
10762         vsync_rate_hz = div64_u64(div64_u64((
10763                         stream->timing.pix_clk_100hz * 100),
10764                         stream->timing.v_total),
10765                         stream->timing.h_total);
10766
10767         /* Round up
10768          * Calculate number of frames such that at least 30 ms of time has
10769          * passed.
10770          */
10771         if (vsync_rate_hz != 0) {
10772                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10773                 num_frames_static = (30000 / frame_time_microsec) + 1;
10774         }
10775
10776         params.triggers.cursor_update = true;
10777         params.triggers.overlay_update = true;
10778         params.triggers.surface_update = true;
10779         params.num_frames = num_frames_static;
10780
10781         dc_stream_set_static_screen_params(link->ctx->dc,
10782                                            &stream, 1,
10783                                            &params);
10784
10785         return dc_link_set_psr_allow_active(link, true, false, false);
10786 }
10787
10788 /*
10789  * amdgpu_dm_psr_disable() - disable psr f/w
10790  * @stream:  stream state
10791  *
10792  * Return: true if success
10793  */
10794 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10795 {
10796
10797         DRM_DEBUG_DRIVER("Disabling psr...\n");
10798
10799         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10800 }
10801
10802 /*
10803  * amdgpu_dm_psr_disable() - disable psr f/w
10804  * if psr is enabled on any stream
10805  *
10806  * Return: true if success
10807  */
10808 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10809 {
10810         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10811         return dc_set_psr_allow_active(dm->dc, false);
10812 }
10813
10814 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10815 {
10816         struct amdgpu_device *adev = drm_to_adev(dev);
10817         struct dc *dc = adev->dm.dc;
10818         int i;
10819
10820         mutex_lock(&adev->dm.dc_lock);
10821         if (dc->current_state) {
10822                 for (i = 0; i < dc->current_state->stream_count; ++i)
10823                         dc->current_state->streams[i]
10824                                 ->triggered_crtc_reset.enabled =
10825                                 adev->dm.force_timing_sync;
10826
10827                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10828                 dc_trigger_sync(dc, dc->current_state);
10829         }
10830         mutex_unlock(&adev->dm.dc_lock);
10831 }
10832
10833 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10834                        uint32_t value, const char *func_name)
10835 {
10836 #ifdef DM_CHECK_ADDR_0
10837         if (address == 0) {
10838                 DC_ERR("invalid register write. address = 0");
10839                 return;
10840         }
10841 #endif
10842         cgs_write_register(ctx->cgs_device, address, value);
10843         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10844 }
10845
10846 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10847                           const char *func_name)
10848 {
10849         uint32_t value;
10850 #ifdef DM_CHECK_ADDR_0
10851         if (address == 0) {
10852                 DC_ERR("invalid register read; address = 0\n");
10853                 return 0;
10854         }
10855 #endif
10856
10857         if (ctx->dmub_srv &&
10858             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10859             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10860                 ASSERT(false);
10861                 return 0;
10862         }
10863
10864         value = cgs_read_register(ctx->cgs_device, address);
10865
10866         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10867
10868         return value;
10869 }
10870
10871 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10872                                 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10873 {
10874         struct amdgpu_device *adev = ctx->driver_context;
10875         int ret = 0;
10876
10877         dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10878         ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10879         if (ret == 0) {
10880                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10881                 return -1;
10882         }
10883         *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10884
10885         if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10886                 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10887
10888                 // For read case, Copy data to payload
10889                 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10890                 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10891                         memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10892                         adev->dm.dmub_notify->aux_reply.length);
10893         }
10894
10895         return adev->dm.dmub_notify->aux_reply.length;
10896 }