Merge tag 'net-5.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
121
122 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
124
125 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
130
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
133
134 /**
135  * DOC: overview
136  *
137  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139  * requests into DC requests, and DC responses into DRM responses.
140  *
141  * The root control structure is &struct amdgpu_display_manager.
142  */
143
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
148
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150 {
151         switch (link->dpcd_caps.dongle_type) {
152         case DISPLAY_DONGLE_NONE:
153                 return DRM_MODE_SUBCONNECTOR_Native;
154         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155                 return DRM_MODE_SUBCONNECTOR_VGA;
156         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157         case DISPLAY_DONGLE_DP_DVI_DONGLE:
158                 return DRM_MODE_SUBCONNECTOR_DVID;
159         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161                 return DRM_MODE_SUBCONNECTOR_HDMIA;
162         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163         default:
164                 return DRM_MODE_SUBCONNECTOR_Unknown;
165         }
166 }
167
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169 {
170         struct dc_link *link = aconnector->dc_link;
171         struct drm_connector *connector = &aconnector->base;
172         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173
174         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175                 return;
176
177         if (aconnector->dc_sink)
178                 subconnector = get_subconnector_type(link);
179
180         drm_object_property_set_value(&connector->base,
181                         connector->dev->mode_config.dp_subconnector_property,
182                         subconnector);
183 }
184
185 /*
186  * initializes drm_device display related structures, based on the information
187  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188  * drm_encoder, drm_mode_config
189  *
190  * Returns 0 on success
191  */
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197                                 struct drm_plane *plane,
198                                 unsigned long possible_crtcs,
199                                 const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201                                struct drm_plane *plane,
202                                uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
205                                     uint32_t link_index,
206                                     struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208                                   struct amdgpu_encoder *aencoder,
209                                   uint32_t link_index);
210
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216                                   struct drm_atomic_state *state);
217
218 static void handle_cursor_update(struct drm_plane *plane,
219                                  struct drm_plane_state *old_plane_state);
220
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
226
227 static bool
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229                                  struct drm_crtc_state *new_crtc_state);
230 /*
231  * dm_vblank_get_counter
232  *
233  * @brief
234  * Get counter for number of vertical blanks
235  *
236  * @param
237  * struct amdgpu_device *adev - [in] desired amdgpu device
238  * int disp_idx - [in] which CRTC to get the counter from
239  *
240  * @return
241  * Counter for vertical blanks
242  */
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244 {
245         if (crtc >= adev->mode_info.num_crtc)
246                 return 0;
247         else {
248                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249
250                 if (acrtc->dm_irq_params.stream == NULL) {
251                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252                                   crtc);
253                         return 0;
254                 }
255
256                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
257         }
258 }
259
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261                                   u32 *vbl, u32 *position)
262 {
263         uint32_t v_blank_start, v_blank_end, h_position, v_position;
264
265         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266                 return -EINVAL;
267         else {
268                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269
270                 if (acrtc->dm_irq_params.stream ==  NULL) {
271                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272                                   crtc);
273                         return 0;
274                 }
275
276                 /*
277                  * TODO rework base driver to use values directly.
278                  * for now parse it back into reg-format
279                  */
280                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
281                                          &v_blank_start,
282                                          &v_blank_end,
283                                          &h_position,
284                                          &v_position);
285
286                 *position = v_position | (h_position << 16);
287                 *vbl = v_blank_start | (v_blank_end << 16);
288         }
289
290         return 0;
291 }
292
293 static bool dm_is_idle(void *handle)
294 {
295         /* XXX todo */
296         return true;
297 }
298
299 static int dm_wait_for_idle(void *handle)
300 {
301         /* XXX todo */
302         return 0;
303 }
304
305 static bool dm_check_soft_reset(void *handle)
306 {
307         return false;
308 }
309
310 static int dm_soft_reset(void *handle)
311 {
312         /* XXX todo */
313         return 0;
314 }
315
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
318                      int otg_inst)
319 {
320         struct drm_device *dev = adev_to_drm(adev);
321         struct drm_crtc *crtc;
322         struct amdgpu_crtc *amdgpu_crtc;
323
324         if (WARN_ON(otg_inst == -1))
325                 return adev->mode_info.crtcs[0];
326
327         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328                 amdgpu_crtc = to_amdgpu_crtc(crtc);
329
330                 if (amdgpu_crtc->otg_inst == otg_inst)
331                         return amdgpu_crtc;
332         }
333
334         return NULL;
335 }
336
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338 {
339         return acrtc->dm_irq_params.freesync_config.state ==
340                        VRR_STATE_ACTIVE_VARIABLE ||
341                acrtc->dm_irq_params.freesync_config.state ==
342                        VRR_STATE_ACTIVE_FIXED;
343 }
344
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346 {
347         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349 }
350
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352                                               struct dm_crtc_state *new_state)
353 {
354         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
355                 return true;
356         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357                 return true;
358         else
359                 return false;
360 }
361
362 /**
363  * dm_pflip_high_irq() - Handle pageflip interrupt
364  * @interrupt_params: ignored
365  *
366  * Handles the pageflip interrupt by notifying all interested parties
367  * that the pageflip has been completed.
368  */
369 static void dm_pflip_high_irq(void *interrupt_params)
370 {
371         struct amdgpu_crtc *amdgpu_crtc;
372         struct common_irq_params *irq_params = interrupt_params;
373         struct amdgpu_device *adev = irq_params->adev;
374         unsigned long flags;
375         struct drm_pending_vblank_event *e;
376         uint32_t vpos, hpos, v_blank_start, v_blank_end;
377         bool vrr_active;
378
379         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380
381         /* IRQ could occur when in initial stage */
382         /* TODO work and BO cleanup */
383         if (amdgpu_crtc == NULL) {
384                 DC_LOG_PFLIP("CRTC is null, returning.\n");
385                 return;
386         }
387
388         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
389
390         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392                                                  amdgpu_crtc->pflip_status,
393                                                  AMDGPU_FLIP_SUBMITTED,
394                                                  amdgpu_crtc->crtc_id,
395                                                  amdgpu_crtc);
396                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397                 return;
398         }
399
400         /* page flip completed. */
401         e = amdgpu_crtc->event;
402         amdgpu_crtc->event = NULL;
403
404         WARN_ON(!e);
405
406         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
407
408         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
409         if (!vrr_active ||
410             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411                                       &v_blank_end, &hpos, &vpos) ||
412             (vpos < v_blank_start)) {
413                 /* Update to correct count and vblank timestamp if racing with
414                  * vblank irq. This also updates to the correct vblank timestamp
415                  * even in VRR mode, as scanout is past the front-porch atm.
416                  */
417                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
418
419                 /* Wake up userspace by sending the pageflip event with proper
420                  * count and timestamp of vblank of flip completion.
421                  */
422                 if (e) {
423                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424
425                         /* Event sent, so done with vblank for this flip */
426                         drm_crtc_vblank_put(&amdgpu_crtc->base);
427                 }
428         } else if (e) {
429                 /* VRR active and inside front-porch: vblank count and
430                  * timestamp for pageflip event will only be up to date after
431                  * drm_crtc_handle_vblank() has been executed from late vblank
432                  * irq handler after start of back-porch (vline 0). We queue the
433                  * pageflip event for send-out by drm_crtc_handle_vblank() with
434                  * updated timestamp and count, once it runs after us.
435                  *
436                  * We need to open-code this instead of using the helper
437                  * drm_crtc_arm_vblank_event(), as that helper would
438                  * call drm_crtc_accurate_vblank_count(), which we must
439                  * not call in VRR mode while we are in front-porch!
440                  */
441
442                 /* sequence will be replaced by real count during send-out. */
443                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444                 e->pipe = amdgpu_crtc->crtc_id;
445
446                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447                 e = NULL;
448         }
449
450         /* Keep track of vblank of this flip for flip throttling. We use the
451          * cooked hw counter, as that one incremented at start of this vblank
452          * of pageflip completion, so last_flip_vblank is the forbidden count
453          * for queueing new pageflips if vsync + VRR is enabled.
454          */
455         amdgpu_crtc->dm_irq_params.last_flip_vblank =
456                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
457
458         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
460
461         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462                      amdgpu_crtc->crtc_id, amdgpu_crtc,
463                      vrr_active, (int) !e);
464 }
465
466 static void dm_vupdate_high_irq(void *interrupt_params)
467 {
468         struct common_irq_params *irq_params = interrupt_params;
469         struct amdgpu_device *adev = irq_params->adev;
470         struct amdgpu_crtc *acrtc;
471         struct drm_device *drm_dev;
472         struct drm_vblank_crtc *vblank;
473         ktime_t frame_duration_ns, previous_timestamp;
474         unsigned long flags;
475         int vrr_active;
476
477         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
478
479         if (acrtc) {
480                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
481                 drm_dev = acrtc->base.dev;
482                 vblank = &drm_dev->vblank[acrtc->base.index];
483                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
484                 frame_duration_ns = vblank->time - previous_timestamp;
485
486                 if (frame_duration_ns > 0) {
487                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
488                                                 frame_duration_ns,
489                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
490                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
491                 }
492
493                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
494                               acrtc->crtc_id,
495                               vrr_active);
496
497                 /* Core vblank handling is done here after end of front-porch in
498                  * vrr mode, as vblank timestamping will give valid results
499                  * while now done after front-porch. This will also deliver
500                  * page-flip completion events that have been queued to us
501                  * if a pageflip happened inside front-porch.
502                  */
503                 if (vrr_active) {
504                         drm_crtc_handle_vblank(&acrtc->base);
505
506                         /* BTR processing for pre-DCE12 ASICs */
507                         if (acrtc->dm_irq_params.stream &&
508                             adev->family < AMDGPU_FAMILY_AI) {
509                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
510                                 mod_freesync_handle_v_update(
511                                     adev->dm.freesync_module,
512                                     acrtc->dm_irq_params.stream,
513                                     &acrtc->dm_irq_params.vrr_params);
514
515                                 dc_stream_adjust_vmin_vmax(
516                                     adev->dm.dc,
517                                     acrtc->dm_irq_params.stream,
518                                     &acrtc->dm_irq_params.vrr_params.adjust);
519                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520                         }
521                 }
522         }
523 }
524
525 /**
526  * dm_crtc_high_irq() - Handles CRTC interrupt
527  * @interrupt_params: used for determining the CRTC instance
528  *
529  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
530  * event handler.
531  */
532 static void dm_crtc_high_irq(void *interrupt_params)
533 {
534         struct common_irq_params *irq_params = interrupt_params;
535         struct amdgpu_device *adev = irq_params->adev;
536         struct amdgpu_crtc *acrtc;
537         unsigned long flags;
538         int vrr_active;
539
540         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
541         if (!acrtc)
542                 return;
543
544         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
545
546         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
547                       vrr_active, acrtc->dm_irq_params.active_planes);
548
549         /**
550          * Core vblank handling at start of front-porch is only possible
551          * in non-vrr mode, as only there vblank timestamping will give
552          * valid results while done in front-porch. Otherwise defer it
553          * to dm_vupdate_high_irq after end of front-porch.
554          */
555         if (!vrr_active)
556                 drm_crtc_handle_vblank(&acrtc->base);
557
558         /**
559          * Following stuff must happen at start of vblank, for crc
560          * computation and below-the-range btr support in vrr mode.
561          */
562         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
563
564         /* BTR updates need to happen before VUPDATE on Vega and above. */
565         if (adev->family < AMDGPU_FAMILY_AI)
566                 return;
567
568         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
569
570         if (acrtc->dm_irq_params.stream &&
571             acrtc->dm_irq_params.vrr_params.supported &&
572             acrtc->dm_irq_params.freesync_config.state ==
573                     VRR_STATE_ACTIVE_VARIABLE) {
574                 mod_freesync_handle_v_update(adev->dm.freesync_module,
575                                              acrtc->dm_irq_params.stream,
576                                              &acrtc->dm_irq_params.vrr_params);
577
578                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
579                                            &acrtc->dm_irq_params.vrr_params.adjust);
580         }
581
582         /*
583          * If there aren't any active_planes then DCH HUBP may be clock-gated.
584          * In that case, pageflip completion interrupts won't fire and pageflip
585          * completion events won't get delivered. Prevent this by sending
586          * pending pageflip events from here if a flip is still pending.
587          *
588          * If any planes are enabled, use dm_pflip_high_irq() instead, to
589          * avoid race conditions between flip programming and completion,
590          * which could cause too early flip completion events.
591          */
592         if (adev->family >= AMDGPU_FAMILY_RV &&
593             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
594             acrtc->dm_irq_params.active_planes == 0) {
595                 if (acrtc->event) {
596                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
597                         acrtc->event = NULL;
598                         drm_crtc_vblank_put(&acrtc->base);
599                 }
600                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
601         }
602
603         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
604 }
605
606 #if defined(CONFIG_DRM_AMD_DC_DCN)
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
608 /**
609  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610  * DCN generation ASICs
611  * @interrupt_params: interrupt parameters
612  *
613  * Used to set crc window/read out crc value at vertical line 0 position
614  */
615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
616 {
617         struct common_irq_params *irq_params = interrupt_params;
618         struct amdgpu_device *adev = irq_params->adev;
619         struct amdgpu_crtc *acrtc;
620
621         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622
623         if (!acrtc)
624                 return;
625
626         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 }
628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
629
630 /**
631  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
632  * @adev: amdgpu_device pointer
633  * @notify: dmub notification structure
634  *
635  * Dmub AUX or SET_CONFIG command completion processing callback
636  * Copies dmub notification to DM which is to be read by AUX command.
637  * issuing thread and also signals the event to wake up the thread.
638  */
639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640                                         struct dmub_notification *notify)
641 {
642         if (adev->dm.dmub_notify)
643                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645                 complete(&adev->dm.dmub_aux_transfer_done);
646 }
647
648 /**
649  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650  * @adev: amdgpu_device pointer
651  * @notify: dmub notification structure
652  *
653  * Dmub Hpd interrupt processing callback. Gets displayindex through the
654  * ink index and calls helper to do the processing.
655  */
656 static void dmub_hpd_callback(struct amdgpu_device *adev,
657                               struct dmub_notification *notify)
658 {
659         struct amdgpu_dm_connector *aconnector;
660         struct amdgpu_dm_connector *hpd_aconnector = NULL;
661         struct drm_connector *connector;
662         struct drm_connector_list_iter iter;
663         struct dc_link *link;
664         uint8_t link_index = 0;
665         struct drm_device *dev;
666
667         if (adev == NULL)
668                 return;
669
670         if (notify == NULL) {
671                 DRM_ERROR("DMUB HPD callback notification was NULL");
672                 return;
673         }
674
675         if (notify->link_index > adev->dm.dc->link_count) {
676                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
677                 return;
678         }
679
680         link_index = notify->link_index;
681         link = adev->dm.dc->links[link_index];
682         dev = adev->dm.ddev;
683
684         drm_connector_list_iter_begin(dev, &iter);
685         drm_for_each_connector_iter(connector, &iter) {
686                 aconnector = to_amdgpu_dm_connector(connector);
687                 if (link && aconnector->dc_link == link) {
688                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
689                         hpd_aconnector = aconnector;
690                         break;
691                 }
692         }
693         drm_connector_list_iter_end(&iter);
694
695         if (hpd_aconnector) {
696                 if (notify->type == DMUB_NOTIFICATION_HPD)
697                         handle_hpd_irq_helper(hpd_aconnector);
698                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699                         handle_hpd_rx_irq(hpd_aconnector);
700         }
701 }
702
703 /**
704  * register_dmub_notify_callback - Sets callback for DMUB notify
705  * @adev: amdgpu_device pointer
706  * @type: Type of dmub notification
707  * @callback: Dmub interrupt callback function
708  * @dmub_int_thread_offload: offload indicator
709  *
710  * API to register a dmub callback handler for a dmub notification
711  * Also sets indicator whether callback processing to be offloaded.
712  * to dmub interrupt handling thread
713  * Return: true if successfully registered, false if there is existing registration
714  */
715 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716                                           enum dmub_notification_type type,
717                                           dmub_notify_interrupt_callback_t callback,
718                                           bool dmub_int_thread_offload)
719 {
720         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721                 adev->dm.dmub_callback[type] = callback;
722                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
723         } else
724                 return false;
725
726         return true;
727 }
728
729 static void dm_handle_hpd_work(struct work_struct *work)
730 {
731         struct dmub_hpd_work *dmub_hpd_wrk;
732
733         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
734
735         if (!dmub_hpd_wrk->dmub_notify) {
736                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
737                 return;
738         }
739
740         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742                 dmub_hpd_wrk->dmub_notify);
743         }
744
745         kfree(dmub_hpd_wrk->dmub_notify);
746         kfree(dmub_hpd_wrk);
747
748 }
749
750 #define DMUB_TRACE_MAX_READ 64
751 /**
752  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753  * @interrupt_params: used for determining the Outbox instance
754  *
755  * Handles the Outbox Interrupt
756  * event handler.
757  */
758 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
759 {
760         struct dmub_notification notify;
761         struct common_irq_params *irq_params = interrupt_params;
762         struct amdgpu_device *adev = irq_params->adev;
763         struct amdgpu_display_manager *dm = &adev->dm;
764         struct dmcub_trace_buf_entry entry = { 0 };
765         uint32_t count = 0;
766         struct dmub_hpd_work *dmub_hpd_wrk;
767         struct dc_link *plink = NULL;
768
769         if (dc_enable_dmub_notifications(adev->dm.dc) &&
770                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
771
772                 do {
773                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
774                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
775                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
776                                 continue;
777                         }
778                         if (!dm->dmub_callback[notify.type]) {
779                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
780                                 continue;
781                         }
782                         if (dm->dmub_thread_offload[notify.type] == true) {
783                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
784                                 if (!dmub_hpd_wrk) {
785                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
786                                         return;
787                                 }
788                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789                                 if (!dmub_hpd_wrk->dmub_notify) {
790                                         kfree(dmub_hpd_wrk);
791                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
792                                         return;
793                                 }
794                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795                                 if (dmub_hpd_wrk->dmub_notify)
796                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
797                                 dmub_hpd_wrk->adev = adev;
798                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
799                                         plink = adev->dm.dc->links[notify.link_index];
800                                         if (plink) {
801                                                 plink->hpd_status =
802                                                         notify.hpd_status == DP_HPD_PLUG;
803                                         }
804                                 }
805                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
806                         } else {
807                                 dm->dmub_callback[notify.type](adev, &notify);
808                         }
809                 } while (notify.pending_notification);
810         }
811
812
813         do {
814                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816                                                         entry.param0, entry.param1);
817
818                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
820                 } else
821                         break;
822
823                 count++;
824
825         } while (count <= DMUB_TRACE_MAX_READ);
826
827         if (count > DMUB_TRACE_MAX_READ)
828                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
829 }
830 #endif /* CONFIG_DRM_AMD_DC_DCN */
831
832 static int dm_set_clockgating_state(void *handle,
833                   enum amd_clockgating_state state)
834 {
835         return 0;
836 }
837
838 static int dm_set_powergating_state(void *handle,
839                   enum amd_powergating_state state)
840 {
841         return 0;
842 }
843
844 /* Prototypes of private functions */
845 static int dm_early_init(void* handle);
846
847 /* Allocate memory for FBC compressed data  */
848 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
849 {
850         struct drm_device *dev = connector->dev;
851         struct amdgpu_device *adev = drm_to_adev(dev);
852         struct dm_compressor_info *compressor = &adev->dm.compressor;
853         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
854         struct drm_display_mode *mode;
855         unsigned long max_size = 0;
856
857         if (adev->dm.dc->fbc_compressor == NULL)
858                 return;
859
860         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
861                 return;
862
863         if (compressor->bo_ptr)
864                 return;
865
866
867         list_for_each_entry(mode, &connector->modes, head) {
868                 if (max_size < mode->htotal * mode->vtotal)
869                         max_size = mode->htotal * mode->vtotal;
870         }
871
872         if (max_size) {
873                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
874                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
875                             &compressor->gpu_addr, &compressor->cpu_addr);
876
877                 if (r)
878                         DRM_ERROR("DM: Failed to initialize FBC\n");
879                 else {
880                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
881                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
882                 }
883
884         }
885
886 }
887
888 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
889                                           int pipe, bool *enabled,
890                                           unsigned char *buf, int max_bytes)
891 {
892         struct drm_device *dev = dev_get_drvdata(kdev);
893         struct amdgpu_device *adev = drm_to_adev(dev);
894         struct drm_connector *connector;
895         struct drm_connector_list_iter conn_iter;
896         struct amdgpu_dm_connector *aconnector;
897         int ret = 0;
898
899         *enabled = false;
900
901         mutex_lock(&adev->dm.audio_lock);
902
903         drm_connector_list_iter_begin(dev, &conn_iter);
904         drm_for_each_connector_iter(connector, &conn_iter) {
905                 aconnector = to_amdgpu_dm_connector(connector);
906                 if (aconnector->audio_inst != port)
907                         continue;
908
909                 *enabled = true;
910                 ret = drm_eld_size(connector->eld);
911                 memcpy(buf, connector->eld, min(max_bytes, ret));
912
913                 break;
914         }
915         drm_connector_list_iter_end(&conn_iter);
916
917         mutex_unlock(&adev->dm.audio_lock);
918
919         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
920
921         return ret;
922 }
923
924 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
925         .get_eld = amdgpu_dm_audio_component_get_eld,
926 };
927
928 static int amdgpu_dm_audio_component_bind(struct device *kdev,
929                                        struct device *hda_kdev, void *data)
930 {
931         struct drm_device *dev = dev_get_drvdata(kdev);
932         struct amdgpu_device *adev = drm_to_adev(dev);
933         struct drm_audio_component *acomp = data;
934
935         acomp->ops = &amdgpu_dm_audio_component_ops;
936         acomp->dev = kdev;
937         adev->dm.audio_component = acomp;
938
939         return 0;
940 }
941
942 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
943                                           struct device *hda_kdev, void *data)
944 {
945         struct drm_device *dev = dev_get_drvdata(kdev);
946         struct amdgpu_device *adev = drm_to_adev(dev);
947         struct drm_audio_component *acomp = data;
948
949         acomp->ops = NULL;
950         acomp->dev = NULL;
951         adev->dm.audio_component = NULL;
952 }
953
954 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
955         .bind   = amdgpu_dm_audio_component_bind,
956         .unbind = amdgpu_dm_audio_component_unbind,
957 };
958
959 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
960 {
961         int i, ret;
962
963         if (!amdgpu_audio)
964                 return 0;
965
966         adev->mode_info.audio.enabled = true;
967
968         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
969
970         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
971                 adev->mode_info.audio.pin[i].channels = -1;
972                 adev->mode_info.audio.pin[i].rate = -1;
973                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
974                 adev->mode_info.audio.pin[i].status_bits = 0;
975                 adev->mode_info.audio.pin[i].category_code = 0;
976                 adev->mode_info.audio.pin[i].connected = false;
977                 adev->mode_info.audio.pin[i].id =
978                         adev->dm.dc->res_pool->audios[i]->inst;
979                 adev->mode_info.audio.pin[i].offset = 0;
980         }
981
982         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
983         if (ret < 0)
984                 return ret;
985
986         adev->dm.audio_registered = true;
987
988         return 0;
989 }
990
991 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
992 {
993         if (!amdgpu_audio)
994                 return;
995
996         if (!adev->mode_info.audio.enabled)
997                 return;
998
999         if (adev->dm.audio_registered) {
1000                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001                 adev->dm.audio_registered = false;
1002         }
1003
1004         /* TODO: Disable audio? */
1005
1006         adev->mode_info.audio.enabled = false;
1007 }
1008
1009 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1010 {
1011         struct drm_audio_component *acomp = adev->dm.audio_component;
1012
1013         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1014                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1015
1016                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1017                                                  pin, -1);
1018         }
1019 }
1020
1021 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1022 {
1023         const struct dmcub_firmware_header_v1_0 *hdr;
1024         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1025         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1026         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1027         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1028         struct abm *abm = adev->dm.dc->res_pool->abm;
1029         struct dmub_srv_hw_params hw_params;
1030         enum dmub_status status;
1031         const unsigned char *fw_inst_const, *fw_bss_data;
1032         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1033         bool has_hw_support;
1034
1035         if (!dmub_srv)
1036                 /* DMUB isn't supported on the ASIC. */
1037                 return 0;
1038
1039         if (!fb_info) {
1040                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1041                 return -EINVAL;
1042         }
1043
1044         if (!dmub_fw) {
1045                 /* Firmware required for DMUB support. */
1046                 DRM_ERROR("No firmware provided for DMUB.\n");
1047                 return -EINVAL;
1048         }
1049
1050         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1051         if (status != DMUB_STATUS_OK) {
1052                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1053                 return -EINVAL;
1054         }
1055
1056         if (!has_hw_support) {
1057                 DRM_INFO("DMUB unsupported on ASIC\n");
1058                 return 0;
1059         }
1060
1061         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1062         status = dmub_srv_hw_reset(dmub_srv);
1063         if (status != DMUB_STATUS_OK)
1064                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1065
1066         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1067
1068         fw_inst_const = dmub_fw->data +
1069                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070                         PSP_HEADER_BYTES;
1071
1072         fw_bss_data = dmub_fw->data +
1073                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1074                       le32_to_cpu(hdr->inst_const_bytes);
1075
1076         /* Copy firmware and bios info into FB memory. */
1077         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1078                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1079
1080         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1081
1082         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1083          * amdgpu_ucode_init_single_fw will load dmub firmware
1084          * fw_inst_const part to cw0; otherwise, the firmware back door load
1085          * will be done by dm_dmub_hw_init
1086          */
1087         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1088                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1089                                 fw_inst_const_size);
1090         }
1091
1092         if (fw_bss_data_size)
1093                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1094                        fw_bss_data, fw_bss_data_size);
1095
1096         /* Copy firmware bios info into FB memory. */
1097         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1098                adev->bios_size);
1099
1100         /* Reset regions that need to be reset. */
1101         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1102         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1103
1104         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1105                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1106
1107         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1108                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1109
1110         /* Initialize hardware. */
1111         memset(&hw_params, 0, sizeof(hw_params));
1112         hw_params.fb_base = adev->gmc.fb_start;
1113         hw_params.fb_offset = adev->gmc.aper_base;
1114
1115         /* backdoor load firmware and trigger dmub running */
1116         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1117                 hw_params.load_inst_const = true;
1118
1119         if (dmcu)
1120                 hw_params.psp_version = dmcu->psp_version;
1121
1122         for (i = 0; i < fb_info->num_fb; ++i)
1123                 hw_params.fb[i] = &fb_info->fb[i];
1124
1125         switch (adev->ip_versions[DCE_HWIP][0]) {
1126         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1127                 hw_params.dpia_supported = true;
1128 #if defined(CONFIG_DRM_AMD_DC_DCN)
1129                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1130 #endif
1131                 break;
1132         default:
1133                 break;
1134         }
1135
1136         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1137         if (status != DMUB_STATUS_OK) {
1138                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1139                 return -EINVAL;
1140         }
1141
1142         /* Wait for firmware load to finish. */
1143         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1144         if (status != DMUB_STATUS_OK)
1145                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1146
1147         /* Init DMCU and ABM if available. */
1148         if (dmcu && abm) {
1149                 dmcu->funcs->dmcu_init(dmcu);
1150                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1151         }
1152
1153         if (!adev->dm.dc->ctx->dmub_srv)
1154                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1155         if (!adev->dm.dc->ctx->dmub_srv) {
1156                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1157                 return -ENOMEM;
1158         }
1159
1160         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1161                  adev->dm.dmcub_fw_version);
1162
1163         return 0;
1164 }
1165
1166 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1167 {
1168         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1169         enum dmub_status status;
1170         bool init;
1171
1172         if (!dmub_srv) {
1173                 /* DMUB isn't supported on the ASIC. */
1174                 return;
1175         }
1176
1177         status = dmub_srv_is_hw_init(dmub_srv, &init);
1178         if (status != DMUB_STATUS_OK)
1179                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1180
1181         if (status == DMUB_STATUS_OK && init) {
1182                 /* Wait for firmware load to finish. */
1183                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1184                 if (status != DMUB_STATUS_OK)
1185                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1186         } else {
1187                 /* Perform the full hardware initialization. */
1188                 dm_dmub_hw_init(adev);
1189         }
1190 }
1191
1192 #if defined(CONFIG_DRM_AMD_DC_DCN)
1193 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1194 {
1195         uint64_t pt_base;
1196         uint32_t logical_addr_low;
1197         uint32_t logical_addr_high;
1198         uint32_t agp_base, agp_bot, agp_top;
1199         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1200
1201         memset(pa_config, 0, sizeof(*pa_config));
1202
1203         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1204         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1205
1206         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1207                 /*
1208                  * Raven2 has a HW issue that it is unable to use the vram which
1209                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1210                  * workaround that increase system aperture high address (add 1)
1211                  * to get rid of the VM fault and hardware hang.
1212                  */
1213                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1214         else
1215                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1216
1217         agp_base = 0;
1218         agp_bot = adev->gmc.agp_start >> 24;
1219         agp_top = adev->gmc.agp_end >> 24;
1220
1221
1222         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1223         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1224         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1225         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1226         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1227         page_table_base.low_part = lower_32_bits(pt_base);
1228
1229         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1230         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1231
1232         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1233         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1234         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1235
1236         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1237         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1238         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1239
1240         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1241         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1242         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1243
1244         pa_config->is_hvm_enabled = 0;
1245
1246 }
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_DC_DCN)
1249 static void vblank_control_worker(struct work_struct *work)
1250 {
1251         struct vblank_control_work *vblank_work =
1252                 container_of(work, struct vblank_control_work, work);
1253         struct amdgpu_display_manager *dm = vblank_work->dm;
1254
1255         mutex_lock(&dm->dc_lock);
1256
1257         if (vblank_work->enable)
1258                 dm->active_vblank_irq_count++;
1259         else if(dm->active_vblank_irq_count)
1260                 dm->active_vblank_irq_count--;
1261
1262         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1263
1264         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1265
1266         /* Control PSR based on vblank requirements from OS */
1267         if (vblank_work->stream && vblank_work->stream->link) {
1268                 if (vblank_work->enable) {
1269                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1270                                 amdgpu_dm_psr_disable(vblank_work->stream);
1271                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1272                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1273                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1274                         amdgpu_dm_psr_enable(vblank_work->stream);
1275                 }
1276         }
1277
1278         mutex_unlock(&dm->dc_lock);
1279
1280         dc_stream_release(vblank_work->stream);
1281
1282         kfree(vblank_work);
1283 }
1284
1285 #endif
1286
1287 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288 {
1289         struct hpd_rx_irq_offload_work *offload_work;
1290         struct amdgpu_dm_connector *aconnector;
1291         struct dc_link *dc_link;
1292         struct amdgpu_device *adev;
1293         enum dc_connection_type new_connection_type = dc_connection_none;
1294         unsigned long flags;
1295
1296         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297         aconnector = offload_work->offload_wq->aconnector;
1298
1299         if (!aconnector) {
1300                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1301                 goto skip;
1302         }
1303
1304         adev = drm_to_adev(aconnector->base.dev);
1305         dc_link = aconnector->dc_link;
1306
1307         mutex_lock(&aconnector->hpd_lock);
1308         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309                 DRM_ERROR("KMS: Failed to detect connector\n");
1310         mutex_unlock(&aconnector->hpd_lock);
1311
1312         if (new_connection_type == dc_connection_none)
1313                 goto skip;
1314
1315         if (amdgpu_in_reset(adev))
1316                 goto skip;
1317
1318         mutex_lock(&adev->dm.dc_lock);
1319         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320                 dc_link_dp_handle_automated_test(dc_link);
1321         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324                 dc_link_dp_handle_link_loss(dc_link);
1325                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326                 offload_work->offload_wq->is_handling_link_loss = false;
1327                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328         }
1329         mutex_unlock(&adev->dm.dc_lock);
1330
1331 skip:
1332         kfree(offload_work);
1333
1334 }
1335
1336 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337 {
1338         int max_caps = dc->caps.max_links;
1339         int i = 0;
1340         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341
1342         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343
1344         if (!hpd_rx_offload_wq)
1345                 return NULL;
1346
1347
1348         for (i = 0; i < max_caps; i++) {
1349                 hpd_rx_offload_wq[i].wq =
1350                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351
1352                 if (hpd_rx_offload_wq[i].wq == NULL) {
1353                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1354                         return NULL;
1355                 }
1356
1357                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1358         }
1359
1360         return hpd_rx_offload_wq;
1361 }
1362
1363 struct amdgpu_stutter_quirk {
1364         u16 chip_vendor;
1365         u16 chip_device;
1366         u16 subsys_vendor;
1367         u16 subsys_device;
1368         u8 revision;
1369 };
1370
1371 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1374         { 0, 0, 0, 0, 0 },
1375 };
1376
1377 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378 {
1379         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380
1381         while (p && p->chip_device != 0) {
1382                 if (pdev->vendor == p->chip_vendor &&
1383                     pdev->device == p->chip_device &&
1384                     pdev->subsystem_vendor == p->subsys_vendor &&
1385                     pdev->subsystem_device == p->subsys_device &&
1386                     pdev->revision == p->revision) {
1387                         return true;
1388                 }
1389                 ++p;
1390         }
1391         return false;
1392 }
1393
1394 static int amdgpu_dm_init(struct amdgpu_device *adev)
1395 {
1396         struct dc_init_data init_data;
1397 #ifdef CONFIG_DRM_AMD_DC_HDCP
1398         struct dc_callback_init init_params;
1399 #endif
1400         int r;
1401
1402         adev->dm.ddev = adev_to_drm(adev);
1403         adev->dm.adev = adev;
1404
1405         /* Zero all the fields */
1406         memset(&init_data, 0, sizeof(init_data));
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408         memset(&init_params, 0, sizeof(init_params));
1409 #endif
1410
1411         mutex_init(&adev->dm.dc_lock);
1412         mutex_init(&adev->dm.audio_lock);
1413 #if defined(CONFIG_DRM_AMD_DC_DCN)
1414         spin_lock_init(&adev->dm.vblank_lock);
1415 #endif
1416
1417         if(amdgpu_dm_irq_init(adev)) {
1418                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1419                 goto error;
1420         }
1421
1422         init_data.asic_id.chip_family = adev->family;
1423
1424         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1425         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1426         init_data.asic_id.chip_id = adev->pdev->device;
1427
1428         init_data.asic_id.vram_width = adev->gmc.vram_width;
1429         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1430         init_data.asic_id.atombios_base_address =
1431                 adev->mode_info.atom_context->bios;
1432
1433         init_data.driver = adev;
1434
1435         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1436
1437         if (!adev->dm.cgs_device) {
1438                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1439                 goto error;
1440         }
1441
1442         init_data.cgs_device = adev->dm.cgs_device;
1443
1444         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1445
1446         switch (adev->ip_versions[DCE_HWIP][0]) {
1447         case IP_VERSION(2, 1, 0):
1448                 switch (adev->dm.dmcub_fw_version) {
1449                 case 0: /* development */
1450                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1451                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1452                         init_data.flags.disable_dmcu = false;
1453                         break;
1454                 default:
1455                         init_data.flags.disable_dmcu = true;
1456                 }
1457                 break;
1458         case IP_VERSION(2, 0, 3):
1459                 init_data.flags.disable_dmcu = true;
1460                 break;
1461         default:
1462                 break;
1463         }
1464
1465         switch (adev->asic_type) {
1466         case CHIP_CARRIZO:
1467         case CHIP_STONEY:
1468                 init_data.flags.gpu_vm_support = true;
1469                 break;
1470         default:
1471                 switch (adev->ip_versions[DCE_HWIP][0]) {
1472                 case IP_VERSION(1, 0, 0):
1473                 case IP_VERSION(1, 0, 1):
1474                         /* enable S/G on PCO and RV2 */
1475                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1476                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1477                                 init_data.flags.gpu_vm_support = true;
1478                         break;
1479                 case IP_VERSION(2, 1, 0):
1480                 case IP_VERSION(3, 0, 1):
1481                 case IP_VERSION(3, 1, 2):
1482                 case IP_VERSION(3, 1, 3):
1483                 case IP_VERSION(3, 1, 5):
1484                 case IP_VERSION(3, 1, 6):
1485                         init_data.flags.gpu_vm_support = true;
1486                         break;
1487                 default:
1488                         break;
1489                 }
1490                 break;
1491         }
1492
1493         if (init_data.flags.gpu_vm_support)
1494                 adev->mode_info.gpu_vm_support = true;
1495
1496         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1497                 init_data.flags.fbc_support = true;
1498
1499         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1500                 init_data.flags.multi_mon_pp_mclk_switch = true;
1501
1502         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1503                 init_data.flags.disable_fractional_pwm = true;
1504
1505         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1506                 init_data.flags.edp_no_power_sequencing = true;
1507
1508 #ifdef CONFIG_DRM_AMD_DC_DCN
1509         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1510                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1511         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1512                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1513 #endif
1514
1515         init_data.flags.seamless_boot_edp_requested = false;
1516
1517         if (check_seamless_boot_capability(adev)) {
1518                 init_data.flags.seamless_boot_edp_requested = true;
1519                 init_data.flags.allow_seamless_boot_optimization = true;
1520                 DRM_INFO("Seamless boot condition check passed\n");
1521         }
1522
1523         INIT_LIST_HEAD(&adev->dm.da_list);
1524         /* Display Core create. */
1525         adev->dm.dc = dc_create(&init_data);
1526
1527         if (adev->dm.dc) {
1528                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1529         } else {
1530                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1531                 goto error;
1532         }
1533
1534         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1535                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1536                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1537         }
1538
1539         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1540                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1541         if (dm_should_disable_stutter(adev->pdev))
1542                 adev->dm.dc->debug.disable_stutter = true;
1543
1544         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1545                 adev->dm.dc->debug.disable_stutter = true;
1546
1547         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1548                 adev->dm.dc->debug.disable_dsc = true;
1549                 adev->dm.dc->debug.disable_dsc_edp = true;
1550         }
1551
1552         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1553                 adev->dm.dc->debug.disable_clock_gate = true;
1554
1555         r = dm_dmub_hw_init(adev);
1556         if (r) {
1557                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1558                 goto error;
1559         }
1560
1561         dc_hardware_init(adev->dm.dc);
1562
1563         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1564         if (!adev->dm.hpd_rx_offload_wq) {
1565                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1566                 goto error;
1567         }
1568
1569 #if defined(CONFIG_DRM_AMD_DC_DCN)
1570         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1571                 struct dc_phy_addr_space_config pa_config;
1572
1573                 mmhub_read_system_context(adev, &pa_config);
1574
1575                 // Call the DC init_memory func
1576                 dc_setup_system_context(adev->dm.dc, &pa_config);
1577         }
1578 #endif
1579
1580         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1581         if (!adev->dm.freesync_module) {
1582                 DRM_ERROR(
1583                 "amdgpu: failed to initialize freesync_module.\n");
1584         } else
1585                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1586                                 adev->dm.freesync_module);
1587
1588         amdgpu_dm_init_color_mod();
1589
1590 #if defined(CONFIG_DRM_AMD_DC_DCN)
1591         if (adev->dm.dc->caps.max_links > 0) {
1592                 adev->dm.vblank_control_workqueue =
1593                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1594                 if (!adev->dm.vblank_control_workqueue)
1595                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1596         }
1597 #endif
1598
1599 #ifdef CONFIG_DRM_AMD_DC_HDCP
1600         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1601                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1602
1603                 if (!adev->dm.hdcp_workqueue)
1604                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1605                 else
1606                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1607
1608                 dc_init_callbacks(adev->dm.dc, &init_params);
1609         }
1610 #endif
1611 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1612         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1613 #endif
1614         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1615                 init_completion(&adev->dm.dmub_aux_transfer_done);
1616                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1617                 if (!adev->dm.dmub_notify) {
1618                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1619                         goto error;
1620                 }
1621
1622                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1623                 if (!adev->dm.delayed_hpd_wq) {
1624                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1625                         goto error;
1626                 }
1627
1628                 amdgpu_dm_outbox_init(adev);
1629 #if defined(CONFIG_DRM_AMD_DC_DCN)
1630                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1631                         dmub_aux_setconfig_callback, false)) {
1632                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1633                         goto error;
1634                 }
1635                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1636                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1637                         goto error;
1638                 }
1639                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1640                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1641                         goto error;
1642                 }
1643 #endif /* CONFIG_DRM_AMD_DC_DCN */
1644         }
1645
1646         if (amdgpu_dm_initialize_drm_device(adev)) {
1647                 DRM_ERROR(
1648                 "amdgpu: failed to initialize sw for display support.\n");
1649                 goto error;
1650         }
1651
1652         /* create fake encoders for MST */
1653         dm_dp_create_fake_mst_encoders(adev);
1654
1655         /* TODO: Add_display_info? */
1656
1657         /* TODO use dynamic cursor width */
1658         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1659         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1660
1661         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1662                 DRM_ERROR(
1663                 "amdgpu: failed to initialize sw for display support.\n");
1664                 goto error;
1665         }
1666
1667
1668         DRM_DEBUG_DRIVER("KMS initialized.\n");
1669
1670         return 0;
1671 error:
1672         amdgpu_dm_fini(adev);
1673
1674         return -EINVAL;
1675 }
1676
1677 static int amdgpu_dm_early_fini(void *handle)
1678 {
1679         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1680
1681         amdgpu_dm_audio_fini(adev);
1682
1683         return 0;
1684 }
1685
1686 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1687 {
1688         int i;
1689
1690 #if defined(CONFIG_DRM_AMD_DC_DCN)
1691         if (adev->dm.vblank_control_workqueue) {
1692                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1693                 adev->dm.vblank_control_workqueue = NULL;
1694         }
1695 #endif
1696
1697         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1698                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1699         }
1700
1701         amdgpu_dm_destroy_drm_device(&adev->dm);
1702
1703 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1704         if (adev->dm.crc_rd_wrk) {
1705                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1706                 kfree(adev->dm.crc_rd_wrk);
1707                 adev->dm.crc_rd_wrk = NULL;
1708         }
1709 #endif
1710 #ifdef CONFIG_DRM_AMD_DC_HDCP
1711         if (adev->dm.hdcp_workqueue) {
1712                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1713                 adev->dm.hdcp_workqueue = NULL;
1714         }
1715
1716         if (adev->dm.dc)
1717                 dc_deinit_callbacks(adev->dm.dc);
1718 #endif
1719
1720         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1721
1722         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1723                 kfree(adev->dm.dmub_notify);
1724                 adev->dm.dmub_notify = NULL;
1725                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1726                 adev->dm.delayed_hpd_wq = NULL;
1727         }
1728
1729         if (adev->dm.dmub_bo)
1730                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1731                                       &adev->dm.dmub_bo_gpu_addr,
1732                                       &adev->dm.dmub_bo_cpu_addr);
1733
1734         if (adev->dm.hpd_rx_offload_wq) {
1735                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1736                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1737                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1738                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1739                         }
1740                 }
1741
1742                 kfree(adev->dm.hpd_rx_offload_wq);
1743                 adev->dm.hpd_rx_offload_wq = NULL;
1744         }
1745
1746         /* DC Destroy TODO: Replace destroy DAL */
1747         if (adev->dm.dc)
1748                 dc_destroy(&adev->dm.dc);
1749         /*
1750          * TODO: pageflip, vlank interrupt
1751          *
1752          * amdgpu_dm_irq_fini(adev);
1753          */
1754
1755         if (adev->dm.cgs_device) {
1756                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1757                 adev->dm.cgs_device = NULL;
1758         }
1759         if (adev->dm.freesync_module) {
1760                 mod_freesync_destroy(adev->dm.freesync_module);
1761                 adev->dm.freesync_module = NULL;
1762         }
1763
1764         mutex_destroy(&adev->dm.audio_lock);
1765         mutex_destroy(&adev->dm.dc_lock);
1766
1767         return;
1768 }
1769
1770 static int load_dmcu_fw(struct amdgpu_device *adev)
1771 {
1772         const char *fw_name_dmcu = NULL;
1773         int r;
1774         const struct dmcu_firmware_header_v1_0 *hdr;
1775
1776         switch(adev->asic_type) {
1777 #if defined(CONFIG_DRM_AMD_DC_SI)
1778         case CHIP_TAHITI:
1779         case CHIP_PITCAIRN:
1780         case CHIP_VERDE:
1781         case CHIP_OLAND:
1782 #endif
1783         case CHIP_BONAIRE:
1784         case CHIP_HAWAII:
1785         case CHIP_KAVERI:
1786         case CHIP_KABINI:
1787         case CHIP_MULLINS:
1788         case CHIP_TONGA:
1789         case CHIP_FIJI:
1790         case CHIP_CARRIZO:
1791         case CHIP_STONEY:
1792         case CHIP_POLARIS11:
1793         case CHIP_POLARIS10:
1794         case CHIP_POLARIS12:
1795         case CHIP_VEGAM:
1796         case CHIP_VEGA10:
1797         case CHIP_VEGA12:
1798         case CHIP_VEGA20:
1799                 return 0;
1800         case CHIP_NAVI12:
1801                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1802                 break;
1803         case CHIP_RAVEN:
1804                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1805                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1806                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1807                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1808                 else
1809                         return 0;
1810                 break;
1811         default:
1812                 switch (adev->ip_versions[DCE_HWIP][0]) {
1813                 case IP_VERSION(2, 0, 2):
1814                 case IP_VERSION(2, 0, 3):
1815                 case IP_VERSION(2, 0, 0):
1816                 case IP_VERSION(2, 1, 0):
1817                 case IP_VERSION(3, 0, 0):
1818                 case IP_VERSION(3, 0, 2):
1819                 case IP_VERSION(3, 0, 3):
1820                 case IP_VERSION(3, 0, 1):
1821                 case IP_VERSION(3, 1, 2):
1822                 case IP_VERSION(3, 1, 3):
1823                 case IP_VERSION(3, 1, 5):
1824                 case IP_VERSION(3, 1, 6):
1825                         return 0;
1826                 default:
1827                         break;
1828                 }
1829                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1830                 return -EINVAL;
1831         }
1832
1833         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1834                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1835                 return 0;
1836         }
1837
1838         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1839         if (r == -ENOENT) {
1840                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1841                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1842                 adev->dm.fw_dmcu = NULL;
1843                 return 0;
1844         }
1845         if (r) {
1846                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1847                         fw_name_dmcu);
1848                 return r;
1849         }
1850
1851         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1852         if (r) {
1853                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1854                         fw_name_dmcu);
1855                 release_firmware(adev->dm.fw_dmcu);
1856                 adev->dm.fw_dmcu = NULL;
1857                 return r;
1858         }
1859
1860         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1861         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1862         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1863         adev->firmware.fw_size +=
1864                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1865
1866         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1867         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1868         adev->firmware.fw_size +=
1869                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1870
1871         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1872
1873         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1874
1875         return 0;
1876 }
1877
1878 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1879 {
1880         struct amdgpu_device *adev = ctx;
1881
1882         return dm_read_reg(adev->dm.dc->ctx, address);
1883 }
1884
1885 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1886                                      uint32_t value)
1887 {
1888         struct amdgpu_device *adev = ctx;
1889
1890         return dm_write_reg(adev->dm.dc->ctx, address, value);
1891 }
1892
1893 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1894 {
1895         struct dmub_srv_create_params create_params;
1896         struct dmub_srv_region_params region_params;
1897         struct dmub_srv_region_info region_info;
1898         struct dmub_srv_fb_params fb_params;
1899         struct dmub_srv_fb_info *fb_info;
1900         struct dmub_srv *dmub_srv;
1901         const struct dmcub_firmware_header_v1_0 *hdr;
1902         const char *fw_name_dmub;
1903         enum dmub_asic dmub_asic;
1904         enum dmub_status status;
1905         int r;
1906
1907         switch (adev->ip_versions[DCE_HWIP][0]) {
1908         case IP_VERSION(2, 1, 0):
1909                 dmub_asic = DMUB_ASIC_DCN21;
1910                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1911                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1912                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1913                 break;
1914         case IP_VERSION(3, 0, 0):
1915                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1916                         dmub_asic = DMUB_ASIC_DCN30;
1917                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1918                 } else {
1919                         dmub_asic = DMUB_ASIC_DCN30;
1920                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1921                 }
1922                 break;
1923         case IP_VERSION(3, 0, 1):
1924                 dmub_asic = DMUB_ASIC_DCN301;
1925                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1926                 break;
1927         case IP_VERSION(3, 0, 2):
1928                 dmub_asic = DMUB_ASIC_DCN302;
1929                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1930                 break;
1931         case IP_VERSION(3, 0, 3):
1932                 dmub_asic = DMUB_ASIC_DCN303;
1933                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1934                 break;
1935         case IP_VERSION(3, 1, 2):
1936         case IP_VERSION(3, 1, 3):
1937                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1938                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1939                 break;
1940         case IP_VERSION(3, 1, 5):
1941                 dmub_asic = DMUB_ASIC_DCN315;
1942                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1943                 break;
1944         case IP_VERSION(3, 1, 6):
1945                 dmub_asic = DMUB_ASIC_DCN316;
1946                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1947                 break;
1948         default:
1949                 /* ASIC doesn't support DMUB. */
1950                 return 0;
1951         }
1952
1953         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1954         if (r) {
1955                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1956                 return 0;
1957         }
1958
1959         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1960         if (r) {
1961                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1962                 return 0;
1963         }
1964
1965         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1966         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1967
1968         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1969                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1970                         AMDGPU_UCODE_ID_DMCUB;
1971                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1972                         adev->dm.dmub_fw;
1973                 adev->firmware.fw_size +=
1974                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1975
1976                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1977                          adev->dm.dmcub_fw_version);
1978         }
1979
1980
1981         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1982         dmub_srv = adev->dm.dmub_srv;
1983
1984         if (!dmub_srv) {
1985                 DRM_ERROR("Failed to allocate DMUB service!\n");
1986                 return -ENOMEM;
1987         }
1988
1989         memset(&create_params, 0, sizeof(create_params));
1990         create_params.user_ctx = adev;
1991         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1992         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1993         create_params.asic = dmub_asic;
1994
1995         /* Create the DMUB service. */
1996         status = dmub_srv_create(dmub_srv, &create_params);
1997         if (status != DMUB_STATUS_OK) {
1998                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1999                 return -EINVAL;
2000         }
2001
2002         /* Calculate the size of all the regions for the DMUB service. */
2003         memset(&region_params, 0, sizeof(region_params));
2004
2005         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2006                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2007         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2008         region_params.vbios_size = adev->bios_size;
2009         region_params.fw_bss_data = region_params.bss_data_size ?
2010                 adev->dm.dmub_fw->data +
2011                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2012                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2013         region_params.fw_inst_const =
2014                 adev->dm.dmub_fw->data +
2015                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2016                 PSP_HEADER_BYTES;
2017
2018         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2019                                            &region_info);
2020
2021         if (status != DMUB_STATUS_OK) {
2022                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2023                 return -EINVAL;
2024         }
2025
2026         /*
2027          * Allocate a framebuffer based on the total size of all the regions.
2028          * TODO: Move this into GART.
2029          */
2030         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2031                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2032                                     &adev->dm.dmub_bo_gpu_addr,
2033                                     &adev->dm.dmub_bo_cpu_addr);
2034         if (r)
2035                 return r;
2036
2037         /* Rebase the regions on the framebuffer address. */
2038         memset(&fb_params, 0, sizeof(fb_params));
2039         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2040         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2041         fb_params.region_info = &region_info;
2042
2043         adev->dm.dmub_fb_info =
2044                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2045         fb_info = adev->dm.dmub_fb_info;
2046
2047         if (!fb_info) {
2048                 DRM_ERROR(
2049                         "Failed to allocate framebuffer info for DMUB service!\n");
2050                 return -ENOMEM;
2051         }
2052
2053         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2054         if (status != DMUB_STATUS_OK) {
2055                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2056                 return -EINVAL;
2057         }
2058
2059         return 0;
2060 }
2061
2062 static int dm_sw_init(void *handle)
2063 {
2064         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2065         int r;
2066
2067         r = dm_dmub_sw_init(adev);
2068         if (r)
2069                 return r;
2070
2071         return load_dmcu_fw(adev);
2072 }
2073
2074 static int dm_sw_fini(void *handle)
2075 {
2076         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2077
2078         kfree(adev->dm.dmub_fb_info);
2079         adev->dm.dmub_fb_info = NULL;
2080
2081         if (adev->dm.dmub_srv) {
2082                 dmub_srv_destroy(adev->dm.dmub_srv);
2083                 adev->dm.dmub_srv = NULL;
2084         }
2085
2086         release_firmware(adev->dm.dmub_fw);
2087         adev->dm.dmub_fw = NULL;
2088
2089         release_firmware(adev->dm.fw_dmcu);
2090         adev->dm.fw_dmcu = NULL;
2091
2092         return 0;
2093 }
2094
2095 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2096 {
2097         struct amdgpu_dm_connector *aconnector;
2098         struct drm_connector *connector;
2099         struct drm_connector_list_iter iter;
2100         int ret = 0;
2101
2102         drm_connector_list_iter_begin(dev, &iter);
2103         drm_for_each_connector_iter(connector, &iter) {
2104                 aconnector = to_amdgpu_dm_connector(connector);
2105                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2106                     aconnector->mst_mgr.aux) {
2107                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2108                                          aconnector,
2109                                          aconnector->base.base.id);
2110
2111                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2112                         if (ret < 0) {
2113                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2114                                 aconnector->dc_link->type =
2115                                         dc_connection_single;
2116                                 break;
2117                         }
2118                 }
2119         }
2120         drm_connector_list_iter_end(&iter);
2121
2122         return ret;
2123 }
2124
2125 static int dm_late_init(void *handle)
2126 {
2127         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2128
2129         struct dmcu_iram_parameters params;
2130         unsigned int linear_lut[16];
2131         int i;
2132         struct dmcu *dmcu = NULL;
2133
2134         dmcu = adev->dm.dc->res_pool->dmcu;
2135
2136         for (i = 0; i < 16; i++)
2137                 linear_lut[i] = 0xFFFF * i / 15;
2138
2139         params.set = 0;
2140         params.backlight_ramping_override = false;
2141         params.backlight_ramping_start = 0xCCCC;
2142         params.backlight_ramping_reduction = 0xCCCCCCCC;
2143         params.backlight_lut_array_size = 16;
2144         params.backlight_lut_array = linear_lut;
2145
2146         /* Min backlight level after ABM reduction,  Don't allow below 1%
2147          * 0xFFFF x 0.01 = 0x28F
2148          */
2149         params.min_abm_backlight = 0x28F;
2150         /* In the case where abm is implemented on dmcub,
2151         * dmcu object will be null.
2152         * ABM 2.4 and up are implemented on dmcub.
2153         */
2154         if (dmcu) {
2155                 if (!dmcu_load_iram(dmcu, params))
2156                         return -EINVAL;
2157         } else if (adev->dm.dc->ctx->dmub_srv) {
2158                 struct dc_link *edp_links[MAX_NUM_EDP];
2159                 int edp_num;
2160
2161                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2162                 for (i = 0; i < edp_num; i++) {
2163                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2164                                 return -EINVAL;
2165                 }
2166         }
2167
2168         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2169 }
2170
2171 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2172 {
2173         struct amdgpu_dm_connector *aconnector;
2174         struct drm_connector *connector;
2175         struct drm_connector_list_iter iter;
2176         struct drm_dp_mst_topology_mgr *mgr;
2177         int ret;
2178         bool need_hotplug = false;
2179
2180         drm_connector_list_iter_begin(dev, &iter);
2181         drm_for_each_connector_iter(connector, &iter) {
2182                 aconnector = to_amdgpu_dm_connector(connector);
2183                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2184                     aconnector->mst_port)
2185                         continue;
2186
2187                 mgr = &aconnector->mst_mgr;
2188
2189                 if (suspend) {
2190                         drm_dp_mst_topology_mgr_suspend(mgr);
2191                 } else {
2192                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2193                         if (ret < 0) {
2194                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2195                                 need_hotplug = true;
2196                         }
2197                 }
2198         }
2199         drm_connector_list_iter_end(&iter);
2200
2201         if (need_hotplug)
2202                 drm_kms_helper_hotplug_event(dev);
2203 }
2204
2205 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2206 {
2207         int ret = 0;
2208
2209         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2210          * on window driver dc implementation.
2211          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2212          * should be passed to smu during boot up and resume from s3.
2213          * boot up: dc calculate dcn watermark clock settings within dc_create,
2214          * dcn20_resource_construct
2215          * then call pplib functions below to pass the settings to smu:
2216          * smu_set_watermarks_for_clock_ranges
2217          * smu_set_watermarks_table
2218          * navi10_set_watermarks_table
2219          * smu_write_watermarks_table
2220          *
2221          * For Renoir, clock settings of dcn watermark are also fixed values.
2222          * dc has implemented different flow for window driver:
2223          * dc_hardware_init / dc_set_power_state
2224          * dcn10_init_hw
2225          * notify_wm_ranges
2226          * set_wm_ranges
2227          * -- Linux
2228          * smu_set_watermarks_for_clock_ranges
2229          * renoir_set_watermarks_table
2230          * smu_write_watermarks_table
2231          *
2232          * For Linux,
2233          * dc_hardware_init -> amdgpu_dm_init
2234          * dc_set_power_state --> dm_resume
2235          *
2236          * therefore, this function apply to navi10/12/14 but not Renoir
2237          * *
2238          */
2239         switch (adev->ip_versions[DCE_HWIP][0]) {
2240         case IP_VERSION(2, 0, 2):
2241         case IP_VERSION(2, 0, 0):
2242                 break;
2243         default:
2244                 return 0;
2245         }
2246
2247         ret = amdgpu_dpm_write_watermarks_table(adev);
2248         if (ret) {
2249                 DRM_ERROR("Failed to update WMTABLE!\n");
2250                 return ret;
2251         }
2252
2253         return 0;
2254 }
2255
2256 /**
2257  * dm_hw_init() - Initialize DC device
2258  * @handle: The base driver device containing the amdgpu_dm device.
2259  *
2260  * Initialize the &struct amdgpu_display_manager device. This involves calling
2261  * the initializers of each DM component, then populating the struct with them.
2262  *
2263  * Although the function implies hardware initialization, both hardware and
2264  * software are initialized here. Splitting them out to their relevant init
2265  * hooks is a future TODO item.
2266  *
2267  * Some notable things that are initialized here:
2268  *
2269  * - Display Core, both software and hardware
2270  * - DC modules that we need (freesync and color management)
2271  * - DRM software states
2272  * - Interrupt sources and handlers
2273  * - Vblank support
2274  * - Debug FS entries, if enabled
2275  */
2276 static int dm_hw_init(void *handle)
2277 {
2278         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2279         /* Create DAL display manager */
2280         amdgpu_dm_init(adev);
2281         amdgpu_dm_hpd_init(adev);
2282
2283         return 0;
2284 }
2285
2286 /**
2287  * dm_hw_fini() - Teardown DC device
2288  * @handle: The base driver device containing the amdgpu_dm device.
2289  *
2290  * Teardown components within &struct amdgpu_display_manager that require
2291  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2292  * were loaded. Also flush IRQ workqueues and disable them.
2293  */
2294 static int dm_hw_fini(void *handle)
2295 {
2296         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2297
2298         amdgpu_dm_hpd_fini(adev);
2299
2300         amdgpu_dm_irq_fini(adev);
2301         amdgpu_dm_fini(adev);
2302         return 0;
2303 }
2304
2305
2306 static int dm_enable_vblank(struct drm_crtc *crtc);
2307 static void dm_disable_vblank(struct drm_crtc *crtc);
2308
2309 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2310                                  struct dc_state *state, bool enable)
2311 {
2312         enum dc_irq_source irq_source;
2313         struct amdgpu_crtc *acrtc;
2314         int rc = -EBUSY;
2315         int i = 0;
2316
2317         for (i = 0; i < state->stream_count; i++) {
2318                 acrtc = get_crtc_by_otg_inst(
2319                                 adev, state->stream_status[i].primary_otg_inst);
2320
2321                 if (acrtc && state->stream_status[i].plane_count != 0) {
2322                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2323                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2324                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2325                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2326                         if (rc)
2327                                 DRM_WARN("Failed to %s pflip interrupts\n",
2328                                          enable ? "enable" : "disable");
2329
2330                         if (enable) {
2331                                 rc = dm_enable_vblank(&acrtc->base);
2332                                 if (rc)
2333                                         DRM_WARN("Failed to enable vblank interrupts\n");
2334                         } else {
2335                                 dm_disable_vblank(&acrtc->base);
2336                         }
2337
2338                 }
2339         }
2340
2341 }
2342
2343 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2344 {
2345         struct dc_state *context = NULL;
2346         enum dc_status res = DC_ERROR_UNEXPECTED;
2347         int i;
2348         struct dc_stream_state *del_streams[MAX_PIPES];
2349         int del_streams_count = 0;
2350
2351         memset(del_streams, 0, sizeof(del_streams));
2352
2353         context = dc_create_state(dc);
2354         if (context == NULL)
2355                 goto context_alloc_fail;
2356
2357         dc_resource_state_copy_construct_current(dc, context);
2358
2359         /* First remove from context all streams */
2360         for (i = 0; i < context->stream_count; i++) {
2361                 struct dc_stream_state *stream = context->streams[i];
2362
2363                 del_streams[del_streams_count++] = stream;
2364         }
2365
2366         /* Remove all planes for removed streams and then remove the streams */
2367         for (i = 0; i < del_streams_count; i++) {
2368                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2369                         res = DC_FAIL_DETACH_SURFACES;
2370                         goto fail;
2371                 }
2372
2373                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2374                 if (res != DC_OK)
2375                         goto fail;
2376         }
2377
2378         res = dc_commit_state(dc, context);
2379
2380 fail:
2381         dc_release_state(context);
2382
2383 context_alloc_fail:
2384         return res;
2385 }
2386
2387 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2388 {
2389         int i;
2390
2391         if (dm->hpd_rx_offload_wq) {
2392                 for (i = 0; i < dm->dc->caps.max_links; i++)
2393                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2394         }
2395 }
2396
2397 static int dm_suspend(void *handle)
2398 {
2399         struct amdgpu_device *adev = handle;
2400         struct amdgpu_display_manager *dm = &adev->dm;
2401         int ret = 0;
2402
2403         if (amdgpu_in_reset(adev)) {
2404                 mutex_lock(&dm->dc_lock);
2405
2406 #if defined(CONFIG_DRM_AMD_DC_DCN)
2407                 dc_allow_idle_optimizations(adev->dm.dc, false);
2408 #endif
2409
2410                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2411
2412                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2413
2414                 amdgpu_dm_commit_zero_streams(dm->dc);
2415
2416                 amdgpu_dm_irq_suspend(adev);
2417
2418                 hpd_rx_irq_work_suspend(dm);
2419
2420                 return ret;
2421         }
2422
2423         WARN_ON(adev->dm.cached_state);
2424         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2425
2426         s3_handle_mst(adev_to_drm(adev), true);
2427
2428         amdgpu_dm_irq_suspend(adev);
2429
2430         hpd_rx_irq_work_suspend(dm);
2431
2432         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2433
2434         return 0;
2435 }
2436
2437 struct amdgpu_dm_connector *
2438 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2439                                              struct drm_crtc *crtc)
2440 {
2441         uint32_t i;
2442         struct drm_connector_state *new_con_state;
2443         struct drm_connector *connector;
2444         struct drm_crtc *crtc_from_state;
2445
2446         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2447                 crtc_from_state = new_con_state->crtc;
2448
2449                 if (crtc_from_state == crtc)
2450                         return to_amdgpu_dm_connector(connector);
2451         }
2452
2453         return NULL;
2454 }
2455
2456 static void emulated_link_detect(struct dc_link *link)
2457 {
2458         struct dc_sink_init_data sink_init_data = { 0 };
2459         struct display_sink_capability sink_caps = { 0 };
2460         enum dc_edid_status edid_status;
2461         struct dc_context *dc_ctx = link->ctx;
2462         struct dc_sink *sink = NULL;
2463         struct dc_sink *prev_sink = NULL;
2464
2465         link->type = dc_connection_none;
2466         prev_sink = link->local_sink;
2467
2468         if (prev_sink)
2469                 dc_sink_release(prev_sink);
2470
2471         switch (link->connector_signal) {
2472         case SIGNAL_TYPE_HDMI_TYPE_A: {
2473                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2474                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2475                 break;
2476         }
2477
2478         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2479                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2480                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2481                 break;
2482         }
2483
2484         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2485                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2486                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2487                 break;
2488         }
2489
2490         case SIGNAL_TYPE_LVDS: {
2491                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2492                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2493                 break;
2494         }
2495
2496         case SIGNAL_TYPE_EDP: {
2497                 sink_caps.transaction_type =
2498                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2499                 sink_caps.signal = SIGNAL_TYPE_EDP;
2500                 break;
2501         }
2502
2503         case SIGNAL_TYPE_DISPLAY_PORT: {
2504                 sink_caps.transaction_type =
2505                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2506                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2507                 break;
2508         }
2509
2510         default:
2511                 DC_ERROR("Invalid connector type! signal:%d\n",
2512                         link->connector_signal);
2513                 return;
2514         }
2515
2516         sink_init_data.link = link;
2517         sink_init_data.sink_signal = sink_caps.signal;
2518
2519         sink = dc_sink_create(&sink_init_data);
2520         if (!sink) {
2521                 DC_ERROR("Failed to create sink!\n");
2522                 return;
2523         }
2524
2525         /* dc_sink_create returns a new reference */
2526         link->local_sink = sink;
2527
2528         edid_status = dm_helpers_read_local_edid(
2529                         link->ctx,
2530                         link,
2531                         sink);
2532
2533         if (edid_status != EDID_OK)
2534                 DC_ERROR("Failed to read EDID");
2535
2536 }
2537
2538 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2539                                      struct amdgpu_display_manager *dm)
2540 {
2541         struct {
2542                 struct dc_surface_update surface_updates[MAX_SURFACES];
2543                 struct dc_plane_info plane_infos[MAX_SURFACES];
2544                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2545                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2546                 struct dc_stream_update stream_update;
2547         } * bundle;
2548         int k, m;
2549
2550         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2551
2552         if (!bundle) {
2553                 dm_error("Failed to allocate update bundle\n");
2554                 goto cleanup;
2555         }
2556
2557         for (k = 0; k < dc_state->stream_count; k++) {
2558                 bundle->stream_update.stream = dc_state->streams[k];
2559
2560                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2561                         bundle->surface_updates[m].surface =
2562                                 dc_state->stream_status->plane_states[m];
2563                         bundle->surface_updates[m].surface->force_full_update =
2564                                 true;
2565                 }
2566                 dc_commit_updates_for_stream(
2567                         dm->dc, bundle->surface_updates,
2568                         dc_state->stream_status->plane_count,
2569                         dc_state->streams[k], &bundle->stream_update, dc_state);
2570         }
2571
2572 cleanup:
2573         kfree(bundle);
2574
2575         return;
2576 }
2577
2578 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2579 {
2580         struct dc_stream_state *stream_state;
2581         struct amdgpu_dm_connector *aconnector = link->priv;
2582         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2583         struct dc_stream_update stream_update;
2584         bool dpms_off = true;
2585
2586         memset(&stream_update, 0, sizeof(stream_update));
2587         stream_update.dpms_off = &dpms_off;
2588
2589         mutex_lock(&adev->dm.dc_lock);
2590         stream_state = dc_stream_find_from_link(link);
2591
2592         if (stream_state == NULL) {
2593                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2594                 mutex_unlock(&adev->dm.dc_lock);
2595                 return;
2596         }
2597
2598         stream_update.stream = stream_state;
2599         acrtc_state->force_dpms_off = true;
2600         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2601                                      stream_state, &stream_update,
2602                                      stream_state->ctx->dc->current_state);
2603         mutex_unlock(&adev->dm.dc_lock);
2604 }
2605
2606 static int dm_resume(void *handle)
2607 {
2608         struct amdgpu_device *adev = handle;
2609         struct drm_device *ddev = adev_to_drm(adev);
2610         struct amdgpu_display_manager *dm = &adev->dm;
2611         struct amdgpu_dm_connector *aconnector;
2612         struct drm_connector *connector;
2613         struct drm_connector_list_iter iter;
2614         struct drm_crtc *crtc;
2615         struct drm_crtc_state *new_crtc_state;
2616         struct dm_crtc_state *dm_new_crtc_state;
2617         struct drm_plane *plane;
2618         struct drm_plane_state *new_plane_state;
2619         struct dm_plane_state *dm_new_plane_state;
2620         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2621         enum dc_connection_type new_connection_type = dc_connection_none;
2622         struct dc_state *dc_state;
2623         int i, r, j;
2624
2625         if (amdgpu_in_reset(adev)) {
2626                 dc_state = dm->cached_dc_state;
2627
2628                 /*
2629                  * The dc->current_state is backed up into dm->cached_dc_state
2630                  * before we commit 0 streams.
2631                  *
2632                  * DC will clear link encoder assignments on the real state
2633                  * but the changes won't propagate over to the copy we made
2634                  * before the 0 streams commit.
2635                  *
2636                  * DC expects that link encoder assignments are *not* valid
2637                  * when committing a state, so as a workaround we can copy
2638                  * off of the current state.
2639                  *
2640                  * We lose the previous assignments, but we had already
2641                  * commit 0 streams anyway.
2642                  */
2643                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2644
2645                 if (dc_enable_dmub_notifications(adev->dm.dc))
2646                         amdgpu_dm_outbox_init(adev);
2647
2648                 r = dm_dmub_hw_init(adev);
2649                 if (r)
2650                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2651
2652                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2653                 dc_resume(dm->dc);
2654
2655                 amdgpu_dm_irq_resume_early(adev);
2656
2657                 for (i = 0; i < dc_state->stream_count; i++) {
2658                         dc_state->streams[i]->mode_changed = true;
2659                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2660                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2661                                         = 0xffffffff;
2662                         }
2663                 }
2664
2665                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2666
2667                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2668
2669                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2670
2671                 dc_release_state(dm->cached_dc_state);
2672                 dm->cached_dc_state = NULL;
2673
2674                 amdgpu_dm_irq_resume_late(adev);
2675
2676                 mutex_unlock(&dm->dc_lock);
2677
2678                 return 0;
2679         }
2680         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2681         dc_release_state(dm_state->context);
2682         dm_state->context = dc_create_state(dm->dc);
2683         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2684         dc_resource_state_construct(dm->dc, dm_state->context);
2685
2686         /* Re-enable outbox interrupts for DPIA. */
2687         if (dc_enable_dmub_notifications(adev->dm.dc))
2688                 amdgpu_dm_outbox_init(adev);
2689
2690         /* Before powering on DC we need to re-initialize DMUB. */
2691         dm_dmub_hw_resume(adev);
2692
2693         /* power on hardware */
2694         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2695
2696         /* program HPD filter */
2697         dc_resume(dm->dc);
2698
2699         /*
2700          * early enable HPD Rx IRQ, should be done before set mode as short
2701          * pulse interrupts are used for MST
2702          */
2703         amdgpu_dm_irq_resume_early(adev);
2704
2705         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2706         s3_handle_mst(ddev, false);
2707
2708         /* Do detection*/
2709         drm_connector_list_iter_begin(ddev, &iter);
2710         drm_for_each_connector_iter(connector, &iter) {
2711                 aconnector = to_amdgpu_dm_connector(connector);
2712
2713                 /*
2714                  * this is the case when traversing through already created
2715                  * MST connectors, should be skipped
2716                  */
2717                 if (aconnector->dc_link &&
2718                     aconnector->dc_link->type == dc_connection_mst_branch)
2719                         continue;
2720
2721                 mutex_lock(&aconnector->hpd_lock);
2722                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2723                         DRM_ERROR("KMS: Failed to detect connector\n");
2724
2725                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2726                         emulated_link_detect(aconnector->dc_link);
2727                 else
2728                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2729
2730                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2731                         aconnector->fake_enable = false;
2732
2733                 if (aconnector->dc_sink)
2734                         dc_sink_release(aconnector->dc_sink);
2735                 aconnector->dc_sink = NULL;
2736                 amdgpu_dm_update_connector_after_detect(aconnector);
2737                 mutex_unlock(&aconnector->hpd_lock);
2738         }
2739         drm_connector_list_iter_end(&iter);
2740
2741         /* Force mode set in atomic commit */
2742         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2743                 new_crtc_state->active_changed = true;
2744
2745         /*
2746          * atomic_check is expected to create the dc states. We need to release
2747          * them here, since they were duplicated as part of the suspend
2748          * procedure.
2749          */
2750         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2751                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2752                 if (dm_new_crtc_state->stream) {
2753                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2754                         dc_stream_release(dm_new_crtc_state->stream);
2755                         dm_new_crtc_state->stream = NULL;
2756                 }
2757         }
2758
2759         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2760                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2761                 if (dm_new_plane_state->dc_state) {
2762                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2763                         dc_plane_state_release(dm_new_plane_state->dc_state);
2764                         dm_new_plane_state->dc_state = NULL;
2765                 }
2766         }
2767
2768         drm_atomic_helper_resume(ddev, dm->cached_state);
2769
2770         dm->cached_state = NULL;
2771
2772         amdgpu_dm_irq_resume_late(adev);
2773
2774         amdgpu_dm_smu_write_watermarks_table(adev);
2775
2776         return 0;
2777 }
2778
2779 /**
2780  * DOC: DM Lifecycle
2781  *
2782  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2783  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2784  * the base driver's device list to be initialized and torn down accordingly.
2785  *
2786  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2787  */
2788
2789 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2790         .name = "dm",
2791         .early_init = dm_early_init,
2792         .late_init = dm_late_init,
2793         .sw_init = dm_sw_init,
2794         .sw_fini = dm_sw_fini,
2795         .early_fini = amdgpu_dm_early_fini,
2796         .hw_init = dm_hw_init,
2797         .hw_fini = dm_hw_fini,
2798         .suspend = dm_suspend,
2799         .resume = dm_resume,
2800         .is_idle = dm_is_idle,
2801         .wait_for_idle = dm_wait_for_idle,
2802         .check_soft_reset = dm_check_soft_reset,
2803         .soft_reset = dm_soft_reset,
2804         .set_clockgating_state = dm_set_clockgating_state,
2805         .set_powergating_state = dm_set_powergating_state,
2806 };
2807
2808 const struct amdgpu_ip_block_version dm_ip_block =
2809 {
2810         .type = AMD_IP_BLOCK_TYPE_DCE,
2811         .major = 1,
2812         .minor = 0,
2813         .rev = 0,
2814         .funcs = &amdgpu_dm_funcs,
2815 };
2816
2817
2818 /**
2819  * DOC: atomic
2820  *
2821  * *WIP*
2822  */
2823
2824 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2825         .fb_create = amdgpu_display_user_framebuffer_create,
2826         .get_format_info = amd_get_format_info,
2827         .output_poll_changed = drm_fb_helper_output_poll_changed,
2828         .atomic_check = amdgpu_dm_atomic_check,
2829         .atomic_commit = drm_atomic_helper_commit,
2830 };
2831
2832 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2833         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2834 };
2835
2836 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2837 {
2838         u32 max_cll, min_cll, max, min, q, r;
2839         struct amdgpu_dm_backlight_caps *caps;
2840         struct amdgpu_display_manager *dm;
2841         struct drm_connector *conn_base;
2842         struct amdgpu_device *adev;
2843         struct dc_link *link = NULL;
2844         static const u8 pre_computed_values[] = {
2845                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2846                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2847         int i;
2848
2849         if (!aconnector || !aconnector->dc_link)
2850                 return;
2851
2852         link = aconnector->dc_link;
2853         if (link->connector_signal != SIGNAL_TYPE_EDP)
2854                 return;
2855
2856         conn_base = &aconnector->base;
2857         adev = drm_to_adev(conn_base->dev);
2858         dm = &adev->dm;
2859         for (i = 0; i < dm->num_of_edps; i++) {
2860                 if (link == dm->backlight_link[i])
2861                         break;
2862         }
2863         if (i >= dm->num_of_edps)
2864                 return;
2865         caps = &dm->backlight_caps[i];
2866         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2867         caps->aux_support = false;
2868         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2869         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2870
2871         if (caps->ext_caps->bits.oled == 1 /*||
2872             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2873             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2874                 caps->aux_support = true;
2875
2876         if (amdgpu_backlight == 0)
2877                 caps->aux_support = false;
2878         else if (amdgpu_backlight == 1)
2879                 caps->aux_support = true;
2880
2881         /* From the specification (CTA-861-G), for calculating the maximum
2882          * luminance we need to use:
2883          *      Luminance = 50*2**(CV/32)
2884          * Where CV is a one-byte value.
2885          * For calculating this expression we may need float point precision;
2886          * to avoid this complexity level, we take advantage that CV is divided
2887          * by a constant. From the Euclids division algorithm, we know that CV
2888          * can be written as: CV = 32*q + r. Next, we replace CV in the
2889          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2890          * need to pre-compute the value of r/32. For pre-computing the values
2891          * We just used the following Ruby line:
2892          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2893          * The results of the above expressions can be verified at
2894          * pre_computed_values.
2895          */
2896         q = max_cll >> 5;
2897         r = max_cll % 32;
2898         max = (1 << q) * pre_computed_values[r];
2899
2900         // min luminance: maxLum * (CV/255)^2 / 100
2901         q = DIV_ROUND_CLOSEST(min_cll, 255);
2902         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2903
2904         caps->aux_max_input_signal = max;
2905         caps->aux_min_input_signal = min;
2906 }
2907
2908 void amdgpu_dm_update_connector_after_detect(
2909                 struct amdgpu_dm_connector *aconnector)
2910 {
2911         struct drm_connector *connector = &aconnector->base;
2912         struct drm_device *dev = connector->dev;
2913         struct dc_sink *sink;
2914
2915         /* MST handled by drm_mst framework */
2916         if (aconnector->mst_mgr.mst_state == true)
2917                 return;
2918
2919         sink = aconnector->dc_link->local_sink;
2920         if (sink)
2921                 dc_sink_retain(sink);
2922
2923         /*
2924          * Edid mgmt connector gets first update only in mode_valid hook and then
2925          * the connector sink is set to either fake or physical sink depends on link status.
2926          * Skip if already done during boot.
2927          */
2928         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2929                         && aconnector->dc_em_sink) {
2930
2931                 /*
2932                  * For S3 resume with headless use eml_sink to fake stream
2933                  * because on resume connector->sink is set to NULL
2934                  */
2935                 mutex_lock(&dev->mode_config.mutex);
2936
2937                 if (sink) {
2938                         if (aconnector->dc_sink) {
2939                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2940                                 /*
2941                                  * retain and release below are used to
2942                                  * bump up refcount for sink because the link doesn't point
2943                                  * to it anymore after disconnect, so on next crtc to connector
2944                                  * reshuffle by UMD we will get into unwanted dc_sink release
2945                                  */
2946                                 dc_sink_release(aconnector->dc_sink);
2947                         }
2948                         aconnector->dc_sink = sink;
2949                         dc_sink_retain(aconnector->dc_sink);
2950                         amdgpu_dm_update_freesync_caps(connector,
2951                                         aconnector->edid);
2952                 } else {
2953                         amdgpu_dm_update_freesync_caps(connector, NULL);
2954                         if (!aconnector->dc_sink) {
2955                                 aconnector->dc_sink = aconnector->dc_em_sink;
2956                                 dc_sink_retain(aconnector->dc_sink);
2957                         }
2958                 }
2959
2960                 mutex_unlock(&dev->mode_config.mutex);
2961
2962                 if (sink)
2963                         dc_sink_release(sink);
2964                 return;
2965         }
2966
2967         /*
2968          * TODO: temporary guard to look for proper fix
2969          * if this sink is MST sink, we should not do anything
2970          */
2971         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2972                 dc_sink_release(sink);
2973                 return;
2974         }
2975
2976         if (aconnector->dc_sink == sink) {
2977                 /*
2978                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2979                  * Do nothing!!
2980                  */
2981                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2982                                 aconnector->connector_id);
2983                 if (sink)
2984                         dc_sink_release(sink);
2985                 return;
2986         }
2987
2988         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2989                 aconnector->connector_id, aconnector->dc_sink, sink);
2990
2991         mutex_lock(&dev->mode_config.mutex);
2992
2993         /*
2994          * 1. Update status of the drm connector
2995          * 2. Send an event and let userspace tell us what to do
2996          */
2997         if (sink) {
2998                 /*
2999                  * TODO: check if we still need the S3 mode update workaround.
3000                  * If yes, put it here.
3001                  */
3002                 if (aconnector->dc_sink) {
3003                         amdgpu_dm_update_freesync_caps(connector, NULL);
3004                         dc_sink_release(aconnector->dc_sink);
3005                 }
3006
3007                 aconnector->dc_sink = sink;
3008                 dc_sink_retain(aconnector->dc_sink);
3009                 if (sink->dc_edid.length == 0) {
3010                         aconnector->edid = NULL;
3011                         if (aconnector->dc_link->aux_mode) {
3012                                 drm_dp_cec_unset_edid(
3013                                         &aconnector->dm_dp_aux.aux);
3014                         }
3015                 } else {
3016                         aconnector->edid =
3017                                 (struct edid *)sink->dc_edid.raw_edid;
3018
3019                         if (aconnector->dc_link->aux_mode)
3020                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3021                                                     aconnector->edid);
3022                 }
3023
3024                 drm_connector_update_edid_property(connector, aconnector->edid);
3025                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3026                 update_connector_ext_caps(aconnector);
3027         } else {
3028                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3029                 amdgpu_dm_update_freesync_caps(connector, NULL);
3030                 drm_connector_update_edid_property(connector, NULL);
3031                 aconnector->num_modes = 0;
3032                 dc_sink_release(aconnector->dc_sink);
3033                 aconnector->dc_sink = NULL;
3034                 aconnector->edid = NULL;
3035 #ifdef CONFIG_DRM_AMD_DC_HDCP
3036                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3037                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3038                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3039 #endif
3040         }
3041
3042         mutex_unlock(&dev->mode_config.mutex);
3043
3044         update_subconnector_property(aconnector);
3045
3046         if (sink)
3047                 dc_sink_release(sink);
3048 }
3049
3050 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3051 {
3052         struct drm_connector *connector = &aconnector->base;
3053         struct drm_device *dev = connector->dev;
3054         enum dc_connection_type new_connection_type = dc_connection_none;
3055         struct amdgpu_device *adev = drm_to_adev(dev);
3056         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3057         struct dm_crtc_state *dm_crtc_state = NULL;
3058
3059         if (adev->dm.disable_hpd_irq)
3060                 return;
3061
3062         if (dm_con_state->base.state && dm_con_state->base.crtc)
3063                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3064                                         dm_con_state->base.state,
3065                                         dm_con_state->base.crtc));
3066         /*
3067          * In case of failure or MST no need to update connector status or notify the OS
3068          * since (for MST case) MST does this in its own context.
3069          */
3070         mutex_lock(&aconnector->hpd_lock);
3071
3072 #ifdef CONFIG_DRM_AMD_DC_HDCP
3073         if (adev->dm.hdcp_workqueue) {
3074                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3075                 dm_con_state->update_hdcp = true;
3076         }
3077 #endif
3078         if (aconnector->fake_enable)
3079                 aconnector->fake_enable = false;
3080
3081         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3082                 DRM_ERROR("KMS: Failed to detect connector\n");
3083
3084         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3085                 emulated_link_detect(aconnector->dc_link);
3086
3087                 drm_modeset_lock_all(dev);
3088                 dm_restore_drm_connector_state(dev, connector);
3089                 drm_modeset_unlock_all(dev);
3090
3091                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3092                         drm_kms_helper_connector_hotplug_event(connector);
3093
3094         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3095                 if (new_connection_type == dc_connection_none &&
3096                     aconnector->dc_link->type == dc_connection_none &&
3097                     dm_crtc_state)
3098                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3099
3100                 amdgpu_dm_update_connector_after_detect(aconnector);
3101
3102                 drm_modeset_lock_all(dev);
3103                 dm_restore_drm_connector_state(dev, connector);
3104                 drm_modeset_unlock_all(dev);
3105
3106                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3107                         drm_kms_helper_connector_hotplug_event(connector);
3108         }
3109         mutex_unlock(&aconnector->hpd_lock);
3110
3111 }
3112
3113 static void handle_hpd_irq(void *param)
3114 {
3115         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3116
3117         handle_hpd_irq_helper(aconnector);
3118
3119 }
3120
3121 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3122 {
3123         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3124         uint8_t dret;
3125         bool new_irq_handled = false;
3126         int dpcd_addr;
3127         int dpcd_bytes_to_read;
3128
3129         const int max_process_count = 30;
3130         int process_count = 0;
3131
3132         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3133
3134         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3135                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3136                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3137                 dpcd_addr = DP_SINK_COUNT;
3138         } else {
3139                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3140                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3141                 dpcd_addr = DP_SINK_COUNT_ESI;
3142         }
3143
3144         dret = drm_dp_dpcd_read(
3145                 &aconnector->dm_dp_aux.aux,
3146                 dpcd_addr,
3147                 esi,
3148                 dpcd_bytes_to_read);
3149
3150         while (dret == dpcd_bytes_to_read &&
3151                 process_count < max_process_count) {
3152                 uint8_t retry;
3153                 dret = 0;
3154
3155                 process_count++;
3156
3157                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3158                 /* handle HPD short pulse irq */
3159                 if (aconnector->mst_mgr.mst_state)
3160                         drm_dp_mst_hpd_irq(
3161                                 &aconnector->mst_mgr,
3162                                 esi,
3163                                 &new_irq_handled);
3164
3165                 if (new_irq_handled) {
3166                         /* ACK at DPCD to notify down stream */
3167                         const int ack_dpcd_bytes_to_write =
3168                                 dpcd_bytes_to_read - 1;
3169
3170                         for (retry = 0; retry < 3; retry++) {
3171                                 uint8_t wret;
3172
3173                                 wret = drm_dp_dpcd_write(
3174                                         &aconnector->dm_dp_aux.aux,
3175                                         dpcd_addr + 1,
3176                                         &esi[1],
3177                                         ack_dpcd_bytes_to_write);
3178                                 if (wret == ack_dpcd_bytes_to_write)
3179                                         break;
3180                         }
3181
3182                         /* check if there is new irq to be handled */
3183                         dret = drm_dp_dpcd_read(
3184                                 &aconnector->dm_dp_aux.aux,
3185                                 dpcd_addr,
3186                                 esi,
3187                                 dpcd_bytes_to_read);
3188
3189                         new_irq_handled = false;
3190                 } else {
3191                         break;
3192                 }
3193         }
3194
3195         if (process_count == max_process_count)
3196                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3197 }
3198
3199 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3200                                                         union hpd_irq_data hpd_irq_data)
3201 {
3202         struct hpd_rx_irq_offload_work *offload_work =
3203                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3204
3205         if (!offload_work) {
3206                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3207                 return;
3208         }
3209
3210         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3211         offload_work->data = hpd_irq_data;
3212         offload_work->offload_wq = offload_wq;
3213
3214         queue_work(offload_wq->wq, &offload_work->work);
3215         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3216 }
3217
3218 static void handle_hpd_rx_irq(void *param)
3219 {
3220         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3221         struct drm_connector *connector = &aconnector->base;
3222         struct drm_device *dev = connector->dev;
3223         struct dc_link *dc_link = aconnector->dc_link;
3224         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3225         bool result = false;
3226         enum dc_connection_type new_connection_type = dc_connection_none;
3227         struct amdgpu_device *adev = drm_to_adev(dev);
3228         union hpd_irq_data hpd_irq_data;
3229         bool link_loss = false;
3230         bool has_left_work = false;
3231         int idx = aconnector->base.index;
3232         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3233
3234         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3235
3236         if (adev->dm.disable_hpd_irq)
3237                 return;
3238
3239         /*
3240          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3241          * conflict, after implement i2c helper, this mutex should be
3242          * retired.
3243          */
3244         mutex_lock(&aconnector->hpd_lock);
3245
3246         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3247                                                 &link_loss, true, &has_left_work);
3248
3249         if (!has_left_work)
3250                 goto out;
3251
3252         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3253                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3254                 goto out;
3255         }
3256
3257         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3258                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3259                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3260                         dm_handle_mst_sideband_msg(aconnector);
3261                         goto out;
3262                 }
3263
3264                 if (link_loss) {
3265                         bool skip = false;
3266
3267                         spin_lock(&offload_wq->offload_lock);
3268                         skip = offload_wq->is_handling_link_loss;
3269
3270                         if (!skip)
3271                                 offload_wq->is_handling_link_loss = true;
3272
3273                         spin_unlock(&offload_wq->offload_lock);
3274
3275                         if (!skip)
3276                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3277
3278                         goto out;
3279                 }
3280         }
3281
3282 out:
3283         if (result && !is_mst_root_connector) {
3284                 /* Downstream Port status changed. */
3285                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3286                         DRM_ERROR("KMS: Failed to detect connector\n");
3287
3288                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3289                         emulated_link_detect(dc_link);
3290
3291                         if (aconnector->fake_enable)
3292                                 aconnector->fake_enable = false;
3293
3294                         amdgpu_dm_update_connector_after_detect(aconnector);
3295
3296
3297                         drm_modeset_lock_all(dev);
3298                         dm_restore_drm_connector_state(dev, connector);
3299                         drm_modeset_unlock_all(dev);
3300
3301                         drm_kms_helper_connector_hotplug_event(connector);
3302                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3303
3304                         if (aconnector->fake_enable)
3305                                 aconnector->fake_enable = false;
3306
3307                         amdgpu_dm_update_connector_after_detect(aconnector);
3308
3309
3310                         drm_modeset_lock_all(dev);
3311                         dm_restore_drm_connector_state(dev, connector);
3312                         drm_modeset_unlock_all(dev);
3313
3314                         drm_kms_helper_connector_hotplug_event(connector);
3315                 }
3316         }
3317 #ifdef CONFIG_DRM_AMD_DC_HDCP
3318         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3319                 if (adev->dm.hdcp_workqueue)
3320                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3321         }
3322 #endif
3323
3324         if (dc_link->type != dc_connection_mst_branch)
3325                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3326
3327         mutex_unlock(&aconnector->hpd_lock);
3328 }
3329
3330 static void register_hpd_handlers(struct amdgpu_device *adev)
3331 {
3332         struct drm_device *dev = adev_to_drm(adev);
3333         struct drm_connector *connector;
3334         struct amdgpu_dm_connector *aconnector;
3335         const struct dc_link *dc_link;
3336         struct dc_interrupt_params int_params = {0};
3337
3338         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3339         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3340
3341         list_for_each_entry(connector,
3342                         &dev->mode_config.connector_list, head) {
3343
3344                 aconnector = to_amdgpu_dm_connector(connector);
3345                 dc_link = aconnector->dc_link;
3346
3347                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3348                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3349                         int_params.irq_source = dc_link->irq_source_hpd;
3350
3351                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3352                                         handle_hpd_irq,
3353                                         (void *) aconnector);
3354                 }
3355
3356                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3357
3358                         /* Also register for DP short pulse (hpd_rx). */
3359                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3360                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3361
3362                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3363                                         handle_hpd_rx_irq,
3364                                         (void *) aconnector);
3365
3366                         if (adev->dm.hpd_rx_offload_wq)
3367                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3368                                         aconnector;
3369                 }
3370         }
3371 }
3372
3373 #if defined(CONFIG_DRM_AMD_DC_SI)
3374 /* Register IRQ sources and initialize IRQ callbacks */
3375 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3376 {
3377         struct dc *dc = adev->dm.dc;
3378         struct common_irq_params *c_irq_params;
3379         struct dc_interrupt_params int_params = {0};
3380         int r;
3381         int i;
3382         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3383
3384         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3385         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3386
3387         /*
3388          * Actions of amdgpu_irq_add_id():
3389          * 1. Register a set() function with base driver.
3390          *    Base driver will call set() function to enable/disable an
3391          *    interrupt in DC hardware.
3392          * 2. Register amdgpu_dm_irq_handler().
3393          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3394          *    coming from DC hardware.
3395          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3396          *    for acknowledging and handling. */
3397
3398         /* Use VBLANK interrupt */
3399         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3400                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3401                 if (r) {
3402                         DRM_ERROR("Failed to add crtc irq id!\n");
3403                         return r;
3404                 }
3405
3406                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3407                 int_params.irq_source =
3408                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3409
3410                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3411
3412                 c_irq_params->adev = adev;
3413                 c_irq_params->irq_src = int_params.irq_source;
3414
3415                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3416                                 dm_crtc_high_irq, c_irq_params);
3417         }
3418
3419         /* Use GRPH_PFLIP interrupt */
3420         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3421                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3422                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3423                 if (r) {
3424                         DRM_ERROR("Failed to add page flip irq id!\n");
3425                         return r;
3426                 }
3427
3428                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3429                 int_params.irq_source =
3430                         dc_interrupt_to_irq_source(dc, i, 0);
3431
3432                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3433
3434                 c_irq_params->adev = adev;
3435                 c_irq_params->irq_src = int_params.irq_source;
3436
3437                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3438                                 dm_pflip_high_irq, c_irq_params);
3439
3440         }
3441
3442         /* HPD */
3443         r = amdgpu_irq_add_id(adev, client_id,
3444                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3445         if (r) {
3446                 DRM_ERROR("Failed to add hpd irq id!\n");
3447                 return r;
3448         }
3449
3450         register_hpd_handlers(adev);
3451
3452         return 0;
3453 }
3454 #endif
3455
3456 /* Register IRQ sources and initialize IRQ callbacks */
3457 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3458 {
3459         struct dc *dc = adev->dm.dc;
3460         struct common_irq_params *c_irq_params;
3461         struct dc_interrupt_params int_params = {0};
3462         int r;
3463         int i;
3464         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3465
3466         if (adev->family >= AMDGPU_FAMILY_AI)
3467                 client_id = SOC15_IH_CLIENTID_DCE;
3468
3469         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3470         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3471
3472         /*
3473          * Actions of amdgpu_irq_add_id():
3474          * 1. Register a set() function with base driver.
3475          *    Base driver will call set() function to enable/disable an
3476          *    interrupt in DC hardware.
3477          * 2. Register amdgpu_dm_irq_handler().
3478          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3479          *    coming from DC hardware.
3480          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3481          *    for acknowledging and handling. */
3482
3483         /* Use VBLANK interrupt */
3484         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3485                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3486                 if (r) {
3487                         DRM_ERROR("Failed to add crtc irq id!\n");
3488                         return r;
3489                 }
3490
3491                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3492                 int_params.irq_source =
3493                         dc_interrupt_to_irq_source(dc, i, 0);
3494
3495                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3496
3497                 c_irq_params->adev = adev;
3498                 c_irq_params->irq_src = int_params.irq_source;
3499
3500                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3501                                 dm_crtc_high_irq, c_irq_params);
3502         }
3503
3504         /* Use VUPDATE interrupt */
3505         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3506                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3507                 if (r) {
3508                         DRM_ERROR("Failed to add vupdate irq id!\n");
3509                         return r;
3510                 }
3511
3512                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3513                 int_params.irq_source =
3514                         dc_interrupt_to_irq_source(dc, i, 0);
3515
3516                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3517
3518                 c_irq_params->adev = adev;
3519                 c_irq_params->irq_src = int_params.irq_source;
3520
3521                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3522                                 dm_vupdate_high_irq, c_irq_params);
3523         }
3524
3525         /* Use GRPH_PFLIP interrupt */
3526         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3527                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3528                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3529                 if (r) {
3530                         DRM_ERROR("Failed to add page flip irq id!\n");
3531                         return r;
3532                 }
3533
3534                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3535                 int_params.irq_source =
3536                         dc_interrupt_to_irq_source(dc, i, 0);
3537
3538                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3539
3540                 c_irq_params->adev = adev;
3541                 c_irq_params->irq_src = int_params.irq_source;
3542
3543                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3544                                 dm_pflip_high_irq, c_irq_params);
3545
3546         }
3547
3548         /* HPD */
3549         r = amdgpu_irq_add_id(adev, client_id,
3550                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3551         if (r) {
3552                 DRM_ERROR("Failed to add hpd irq id!\n");
3553                 return r;
3554         }
3555
3556         register_hpd_handlers(adev);
3557
3558         return 0;
3559 }
3560
3561 #if defined(CONFIG_DRM_AMD_DC_DCN)
3562 /* Register IRQ sources and initialize IRQ callbacks */
3563 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3564 {
3565         struct dc *dc = adev->dm.dc;
3566         struct common_irq_params *c_irq_params;
3567         struct dc_interrupt_params int_params = {0};
3568         int r;
3569         int i;
3570 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3571         static const unsigned int vrtl_int_srcid[] = {
3572                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3573                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3574                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3575                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3576                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3577                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3578         };
3579 #endif
3580
3581         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3582         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3583
3584         /*
3585          * Actions of amdgpu_irq_add_id():
3586          * 1. Register a set() function with base driver.
3587          *    Base driver will call set() function to enable/disable an
3588          *    interrupt in DC hardware.
3589          * 2. Register amdgpu_dm_irq_handler().
3590          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3591          *    coming from DC hardware.
3592          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3593          *    for acknowledging and handling.
3594          */
3595
3596         /* Use VSTARTUP interrupt */
3597         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3598                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3599                         i++) {
3600                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3601
3602                 if (r) {
3603                         DRM_ERROR("Failed to add crtc irq id!\n");
3604                         return r;
3605                 }
3606
3607                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608                 int_params.irq_source =
3609                         dc_interrupt_to_irq_source(dc, i, 0);
3610
3611                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3612
3613                 c_irq_params->adev = adev;
3614                 c_irq_params->irq_src = int_params.irq_source;
3615
3616                 amdgpu_dm_irq_register_interrupt(
3617                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3618         }
3619
3620         /* Use otg vertical line interrupt */
3621 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3622         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3623                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3624                                 vrtl_int_srcid[i], &adev->vline0_irq);
3625
3626                 if (r) {
3627                         DRM_ERROR("Failed to add vline0 irq id!\n");
3628                         return r;
3629                 }
3630
3631                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3632                 int_params.irq_source =
3633                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3634
3635                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3636                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3637                         break;
3638                 }
3639
3640                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3641                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3642
3643                 c_irq_params->adev = adev;
3644                 c_irq_params->irq_src = int_params.irq_source;
3645
3646                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3647                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3648         }
3649 #endif
3650
3651         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3652          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3653          * to trigger at end of each vblank, regardless of state of the lock,
3654          * matching DCE behaviour.
3655          */
3656         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3657              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3658              i++) {
3659                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3660
3661                 if (r) {
3662                         DRM_ERROR("Failed to add vupdate irq id!\n");
3663                         return r;
3664                 }
3665
3666                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3667                 int_params.irq_source =
3668                         dc_interrupt_to_irq_source(dc, i, 0);
3669
3670                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3671
3672                 c_irq_params->adev = adev;
3673                 c_irq_params->irq_src = int_params.irq_source;
3674
3675                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3676                                 dm_vupdate_high_irq, c_irq_params);
3677         }
3678
3679         /* Use GRPH_PFLIP interrupt */
3680         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3681                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3682                         i++) {
3683                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3684                 if (r) {
3685                         DRM_ERROR("Failed to add page flip irq id!\n");
3686                         return r;
3687                 }
3688
3689                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3690                 int_params.irq_source =
3691                         dc_interrupt_to_irq_source(dc, i, 0);
3692
3693                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3694
3695                 c_irq_params->adev = adev;
3696                 c_irq_params->irq_src = int_params.irq_source;
3697
3698                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3699                                 dm_pflip_high_irq, c_irq_params);
3700
3701         }
3702
3703         /* HPD */
3704         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3705                         &adev->hpd_irq);
3706         if (r) {
3707                 DRM_ERROR("Failed to add hpd irq id!\n");
3708                 return r;
3709         }
3710
3711         register_hpd_handlers(adev);
3712
3713         return 0;
3714 }
3715 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3716 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3717 {
3718         struct dc *dc = adev->dm.dc;
3719         struct common_irq_params *c_irq_params;
3720         struct dc_interrupt_params int_params = {0};
3721         int r, i;
3722
3723         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3724         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3725
3726         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3727                         &adev->dmub_outbox_irq);
3728         if (r) {
3729                 DRM_ERROR("Failed to add outbox irq id!\n");
3730                 return r;
3731         }
3732
3733         if (dc->ctx->dmub_srv) {
3734                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3735                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3736                 int_params.irq_source =
3737                 dc_interrupt_to_irq_source(dc, i, 0);
3738
3739                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3740
3741                 c_irq_params->adev = adev;
3742                 c_irq_params->irq_src = int_params.irq_source;
3743
3744                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3745                                 dm_dmub_outbox1_low_irq, c_irq_params);
3746         }
3747
3748         return 0;
3749 }
3750 #endif
3751
3752 /*
3753  * Acquires the lock for the atomic state object and returns
3754  * the new atomic state.
3755  *
3756  * This should only be called during atomic check.
3757  */
3758 int dm_atomic_get_state(struct drm_atomic_state *state,
3759                         struct dm_atomic_state **dm_state)
3760 {
3761         struct drm_device *dev = state->dev;
3762         struct amdgpu_device *adev = drm_to_adev(dev);
3763         struct amdgpu_display_manager *dm = &adev->dm;
3764         struct drm_private_state *priv_state;
3765
3766         if (*dm_state)
3767                 return 0;
3768
3769         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3770         if (IS_ERR(priv_state))
3771                 return PTR_ERR(priv_state);
3772
3773         *dm_state = to_dm_atomic_state(priv_state);
3774
3775         return 0;
3776 }
3777
3778 static struct dm_atomic_state *
3779 dm_atomic_get_new_state(struct drm_atomic_state *state)
3780 {
3781         struct drm_device *dev = state->dev;
3782         struct amdgpu_device *adev = drm_to_adev(dev);
3783         struct amdgpu_display_manager *dm = &adev->dm;
3784         struct drm_private_obj *obj;
3785         struct drm_private_state *new_obj_state;
3786         int i;
3787
3788         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3789                 if (obj->funcs == dm->atomic_obj.funcs)
3790                         return to_dm_atomic_state(new_obj_state);
3791         }
3792
3793         return NULL;
3794 }
3795
3796 static struct drm_private_state *
3797 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3798 {
3799         struct dm_atomic_state *old_state, *new_state;
3800
3801         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3802         if (!new_state)
3803                 return NULL;
3804
3805         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3806
3807         old_state = to_dm_atomic_state(obj->state);
3808
3809         if (old_state && old_state->context)
3810                 new_state->context = dc_copy_state(old_state->context);
3811
3812         if (!new_state->context) {
3813                 kfree(new_state);
3814                 return NULL;
3815         }
3816
3817         return &new_state->base;
3818 }
3819
3820 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3821                                     struct drm_private_state *state)
3822 {
3823         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3824
3825         if (dm_state && dm_state->context)
3826                 dc_release_state(dm_state->context);
3827
3828         kfree(dm_state);
3829 }
3830
3831 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3832         .atomic_duplicate_state = dm_atomic_duplicate_state,
3833         .atomic_destroy_state = dm_atomic_destroy_state,
3834 };
3835
3836 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3837 {
3838         struct dm_atomic_state *state;
3839         int r;
3840
3841         adev->mode_info.mode_config_initialized = true;
3842
3843         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3844         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3845
3846         adev_to_drm(adev)->mode_config.max_width = 16384;
3847         adev_to_drm(adev)->mode_config.max_height = 16384;
3848
3849         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3850         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3851         /* indicates support for immediate flip */
3852         adev_to_drm(adev)->mode_config.async_page_flip = true;
3853
3854         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3855
3856         state = kzalloc(sizeof(*state), GFP_KERNEL);
3857         if (!state)
3858                 return -ENOMEM;
3859
3860         state->context = dc_create_state(adev->dm.dc);
3861         if (!state->context) {
3862                 kfree(state);
3863                 return -ENOMEM;
3864         }
3865
3866         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3867
3868         drm_atomic_private_obj_init(adev_to_drm(adev),
3869                                     &adev->dm.atomic_obj,
3870                                     &state->base,
3871                                     &dm_atomic_state_funcs);
3872
3873         r = amdgpu_display_modeset_create_props(adev);
3874         if (r) {
3875                 dc_release_state(state->context);
3876                 kfree(state);
3877                 return r;
3878         }
3879
3880         r = amdgpu_dm_audio_init(adev);
3881         if (r) {
3882                 dc_release_state(state->context);
3883                 kfree(state);
3884                 return r;
3885         }
3886
3887         return 0;
3888 }
3889
3890 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3891 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3892 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3893
3894 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3895         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3896
3897 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3898                                             int bl_idx)
3899 {
3900 #if defined(CONFIG_ACPI)
3901         struct amdgpu_dm_backlight_caps caps;
3902
3903         memset(&caps, 0, sizeof(caps));
3904
3905         if (dm->backlight_caps[bl_idx].caps_valid)
3906                 return;
3907
3908         amdgpu_acpi_get_backlight_caps(&caps);
3909         if (caps.caps_valid) {
3910                 dm->backlight_caps[bl_idx].caps_valid = true;
3911                 if (caps.aux_support)
3912                         return;
3913                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3914                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3915         } else {
3916                 dm->backlight_caps[bl_idx].min_input_signal =
3917                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3918                 dm->backlight_caps[bl_idx].max_input_signal =
3919                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3920         }
3921 #else
3922         if (dm->backlight_caps[bl_idx].aux_support)
3923                 return;
3924
3925         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3926         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3927 #endif
3928 }
3929
3930 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3931                                 unsigned *min, unsigned *max)
3932 {
3933         if (!caps)
3934                 return 0;
3935
3936         if (caps->aux_support) {
3937                 // Firmware limits are in nits, DC API wants millinits.
3938                 *max = 1000 * caps->aux_max_input_signal;
3939                 *min = 1000 * caps->aux_min_input_signal;
3940         } else {
3941                 // Firmware limits are 8-bit, PWM control is 16-bit.
3942                 *max = 0x101 * caps->max_input_signal;
3943                 *min = 0x101 * caps->min_input_signal;
3944         }
3945         return 1;
3946 }
3947
3948 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3949                                         uint32_t brightness)
3950 {
3951         unsigned min, max;
3952
3953         if (!get_brightness_range(caps, &min, &max))
3954                 return brightness;
3955
3956         // Rescale 0..255 to min..max
3957         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3958                                        AMDGPU_MAX_BL_LEVEL);
3959 }
3960
3961 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3962                                       uint32_t brightness)
3963 {
3964         unsigned min, max;
3965
3966         if (!get_brightness_range(caps, &min, &max))
3967                 return brightness;
3968
3969         if (brightness < min)
3970                 return 0;
3971         // Rescale min..max to 0..255
3972         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3973                                  max - min);
3974 }
3975
3976 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3977                                          int bl_idx,
3978                                          u32 user_brightness)
3979 {
3980         struct amdgpu_dm_backlight_caps caps;
3981         struct dc_link *link;
3982         u32 brightness;
3983         bool rc;
3984
3985         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3986         caps = dm->backlight_caps[bl_idx];
3987
3988         dm->brightness[bl_idx] = user_brightness;
3989         /* update scratch register */
3990         if (bl_idx == 0)
3991                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3992         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3993         link = (struct dc_link *)dm->backlight_link[bl_idx];
3994
3995         /* Change brightness based on AUX property */
3996         if (caps.aux_support) {
3997                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3998                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3999                 if (!rc)
4000                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4001         } else {
4002                 rc = dc_link_set_backlight_level(link, brightness, 0);
4003                 if (!rc)
4004                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4005         }
4006
4007         if (rc)
4008                 dm->actual_brightness[bl_idx] = user_brightness;
4009 }
4010
4011 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4012 {
4013         struct amdgpu_display_manager *dm = bl_get_data(bd);
4014         int i;
4015
4016         for (i = 0; i < dm->num_of_edps; i++) {
4017                 if (bd == dm->backlight_dev[i])
4018                         break;
4019         }
4020         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4021                 i = 0;
4022         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4023
4024         return 0;
4025 }
4026
4027 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4028                                          int bl_idx)
4029 {
4030         struct amdgpu_dm_backlight_caps caps;
4031         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4032
4033         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4034         caps = dm->backlight_caps[bl_idx];
4035
4036         if (caps.aux_support) {
4037                 u32 avg, peak;
4038                 bool rc;
4039
4040                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4041                 if (!rc)
4042                         return dm->brightness[bl_idx];
4043                 return convert_brightness_to_user(&caps, avg);
4044         } else {
4045                 int ret = dc_link_get_backlight_level(link);
4046
4047                 if (ret == DC_ERROR_UNEXPECTED)
4048                         return dm->brightness[bl_idx];
4049                 return convert_brightness_to_user(&caps, ret);
4050         }
4051 }
4052
4053 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4054 {
4055         struct amdgpu_display_manager *dm = bl_get_data(bd);
4056         int i;
4057
4058         for (i = 0; i < dm->num_of_edps; i++) {
4059                 if (bd == dm->backlight_dev[i])
4060                         break;
4061         }
4062         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4063                 i = 0;
4064         return amdgpu_dm_backlight_get_level(dm, i);
4065 }
4066
4067 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4068         .options = BL_CORE_SUSPENDRESUME,
4069         .get_brightness = amdgpu_dm_backlight_get_brightness,
4070         .update_status  = amdgpu_dm_backlight_update_status,
4071 };
4072
4073 static void
4074 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4075 {
4076         char bl_name[16];
4077         struct backlight_properties props = { 0 };
4078
4079         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4080         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4081
4082         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4083         props.brightness = AMDGPU_MAX_BL_LEVEL;
4084         props.type = BACKLIGHT_RAW;
4085
4086         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4087                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4088
4089         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4090                                                                        adev_to_drm(dm->adev)->dev,
4091                                                                        dm,
4092                                                                        &amdgpu_dm_backlight_ops,
4093                                                                        &props);
4094
4095         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4096                 DRM_ERROR("DM: Backlight registration failed!\n");
4097         else
4098                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4099 }
4100 #endif
4101
4102 static int initialize_plane(struct amdgpu_display_manager *dm,
4103                             struct amdgpu_mode_info *mode_info, int plane_id,
4104                             enum drm_plane_type plane_type,
4105                             const struct dc_plane_cap *plane_cap)
4106 {
4107         struct drm_plane *plane;
4108         unsigned long possible_crtcs;
4109         int ret = 0;
4110
4111         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4112         if (!plane) {
4113                 DRM_ERROR("KMS: Failed to allocate plane\n");
4114                 return -ENOMEM;
4115         }
4116         plane->type = plane_type;
4117
4118         /*
4119          * HACK: IGT tests expect that the primary plane for a CRTC
4120          * can only have one possible CRTC. Only expose support for
4121          * any CRTC if they're not going to be used as a primary plane
4122          * for a CRTC - like overlay or underlay planes.
4123          */
4124         possible_crtcs = 1 << plane_id;
4125         if (plane_id >= dm->dc->caps.max_streams)
4126                 possible_crtcs = 0xff;
4127
4128         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4129
4130         if (ret) {
4131                 DRM_ERROR("KMS: Failed to initialize plane\n");
4132                 kfree(plane);
4133                 return ret;
4134         }
4135
4136         if (mode_info)
4137                 mode_info->planes[plane_id] = plane;
4138
4139         return ret;
4140 }
4141
4142
4143 static void register_backlight_device(struct amdgpu_display_manager *dm,
4144                                       struct dc_link *link)
4145 {
4146 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4147         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4148
4149         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4150             link->type != dc_connection_none) {
4151                 /*
4152                  * Event if registration failed, we should continue with
4153                  * DM initialization because not having a backlight control
4154                  * is better then a black screen.
4155                  */
4156                 if (!dm->backlight_dev[dm->num_of_edps])
4157                         amdgpu_dm_register_backlight_device(dm);
4158
4159                 if (dm->backlight_dev[dm->num_of_edps]) {
4160                         dm->backlight_link[dm->num_of_edps] = link;
4161                         dm->num_of_edps++;
4162                 }
4163         }
4164 #endif
4165 }
4166
4167
4168 /*
4169  * In this architecture, the association
4170  * connector -> encoder -> crtc
4171  * id not really requried. The crtc and connector will hold the
4172  * display_index as an abstraction to use with DAL component
4173  *
4174  * Returns 0 on success
4175  */
4176 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4177 {
4178         struct amdgpu_display_manager *dm = &adev->dm;
4179         int32_t i;
4180         struct amdgpu_dm_connector *aconnector = NULL;
4181         struct amdgpu_encoder *aencoder = NULL;
4182         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4183         uint32_t link_cnt;
4184         int32_t primary_planes;
4185         enum dc_connection_type new_connection_type = dc_connection_none;
4186         const struct dc_plane_cap *plane;
4187         bool psr_feature_enabled = false;
4188
4189         dm->display_indexes_num = dm->dc->caps.max_streams;
4190         /* Update the actual used number of crtc */
4191         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4192
4193         link_cnt = dm->dc->caps.max_links;
4194         if (amdgpu_dm_mode_config_init(dm->adev)) {
4195                 DRM_ERROR("DM: Failed to initialize mode config\n");
4196                 return -EINVAL;
4197         }
4198
4199         /* There is one primary plane per CRTC */
4200         primary_planes = dm->dc->caps.max_streams;
4201         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4202
4203         /*
4204          * Initialize primary planes, implicit planes for legacy IOCTLS.
4205          * Order is reversed to match iteration order in atomic check.
4206          */
4207         for (i = (primary_planes - 1); i >= 0; i--) {
4208                 plane = &dm->dc->caps.planes[i];
4209
4210                 if (initialize_plane(dm, mode_info, i,
4211                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4212                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4213                         goto fail;
4214                 }
4215         }
4216
4217         /*
4218          * Initialize overlay planes, index starting after primary planes.
4219          * These planes have a higher DRM index than the primary planes since
4220          * they should be considered as having a higher z-order.
4221          * Order is reversed to match iteration order in atomic check.
4222          *
4223          * Only support DCN for now, and only expose one so we don't encourage
4224          * userspace to use up all the pipes.
4225          */
4226         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4227                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4228
4229                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4230                         continue;
4231
4232                 if (!plane->blends_with_above || !plane->blends_with_below)
4233                         continue;
4234
4235                 if (!plane->pixel_format_support.argb8888)
4236                         continue;
4237
4238                 if (initialize_plane(dm, NULL, primary_planes + i,
4239                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4240                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4241                         goto fail;
4242                 }
4243
4244                 /* Only create one overlay plane. */
4245                 break;
4246         }
4247
4248         for (i = 0; i < dm->dc->caps.max_streams; i++)
4249                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4250                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4251                         goto fail;
4252                 }
4253
4254 #if defined(CONFIG_DRM_AMD_DC_DCN)
4255         /* Use Outbox interrupt */
4256         switch (adev->ip_versions[DCE_HWIP][0]) {
4257         case IP_VERSION(3, 0, 0):
4258         case IP_VERSION(3, 1, 2):
4259         case IP_VERSION(3, 1, 3):
4260         case IP_VERSION(3, 1, 5):
4261         case IP_VERSION(3, 1, 6):
4262         case IP_VERSION(2, 1, 0):
4263                 if (register_outbox_irq_handlers(dm->adev)) {
4264                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4265                         goto fail;
4266                 }
4267                 break;
4268         default:
4269                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4270                               adev->ip_versions[DCE_HWIP][0]);
4271         }
4272
4273         /* Determine whether to enable PSR support by default. */
4274         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4275                 switch (adev->ip_versions[DCE_HWIP][0]) {
4276                 case IP_VERSION(3, 1, 2):
4277                 case IP_VERSION(3, 1, 3):
4278                 case IP_VERSION(3, 1, 5):
4279                 case IP_VERSION(3, 1, 6):
4280                         psr_feature_enabled = true;
4281                         break;
4282                 default:
4283                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4284                         break;
4285                 }
4286         }
4287 #endif
4288
4289         /* Disable vblank IRQs aggressively for power-saving. */
4290         adev_to_drm(adev)->vblank_disable_immediate = true;
4291
4292         /* loops over all connectors on the board */
4293         for (i = 0; i < link_cnt; i++) {
4294                 struct dc_link *link = NULL;
4295
4296                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4297                         DRM_ERROR(
4298                                 "KMS: Cannot support more than %d display indexes\n",
4299                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4300                         continue;
4301                 }
4302
4303                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4304                 if (!aconnector)
4305                         goto fail;
4306
4307                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4308                 if (!aencoder)
4309                         goto fail;
4310
4311                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4312                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4313                         goto fail;
4314                 }
4315
4316                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4317                         DRM_ERROR("KMS: Failed to initialize connector\n");
4318                         goto fail;
4319                 }
4320
4321                 link = dc_get_link_at_index(dm->dc, i);
4322
4323                 if (!dc_link_detect_sink(link, &new_connection_type))
4324                         DRM_ERROR("KMS: Failed to detect connector\n");
4325
4326                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4327                         emulated_link_detect(link);
4328                         amdgpu_dm_update_connector_after_detect(aconnector);
4329
4330                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4331                         amdgpu_dm_update_connector_after_detect(aconnector);
4332                         register_backlight_device(dm, link);
4333                         if (dm->num_of_edps)
4334                                 update_connector_ext_caps(aconnector);
4335                         if (psr_feature_enabled)
4336                                 amdgpu_dm_set_psr_caps(link);
4337
4338                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4339                          * PSR is also supported.
4340                          */
4341                         if (link->psr_settings.psr_feature_enabled)
4342                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4343                 }
4344
4345
4346         }
4347
4348         /* Software is initialized. Now we can register interrupt handlers. */
4349         switch (adev->asic_type) {
4350 #if defined(CONFIG_DRM_AMD_DC_SI)
4351         case CHIP_TAHITI:
4352         case CHIP_PITCAIRN:
4353         case CHIP_VERDE:
4354         case CHIP_OLAND:
4355                 if (dce60_register_irq_handlers(dm->adev)) {
4356                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4357                         goto fail;
4358                 }
4359                 break;
4360 #endif
4361         case CHIP_BONAIRE:
4362         case CHIP_HAWAII:
4363         case CHIP_KAVERI:
4364         case CHIP_KABINI:
4365         case CHIP_MULLINS:
4366         case CHIP_TONGA:
4367         case CHIP_FIJI:
4368         case CHIP_CARRIZO:
4369         case CHIP_STONEY:
4370         case CHIP_POLARIS11:
4371         case CHIP_POLARIS10:
4372         case CHIP_POLARIS12:
4373         case CHIP_VEGAM:
4374         case CHIP_VEGA10:
4375         case CHIP_VEGA12:
4376         case CHIP_VEGA20:
4377                 if (dce110_register_irq_handlers(dm->adev)) {
4378                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4379                         goto fail;
4380                 }
4381                 break;
4382         default:
4383 #if defined(CONFIG_DRM_AMD_DC_DCN)
4384                 switch (adev->ip_versions[DCE_HWIP][0]) {
4385                 case IP_VERSION(1, 0, 0):
4386                 case IP_VERSION(1, 0, 1):
4387                 case IP_VERSION(2, 0, 2):
4388                 case IP_VERSION(2, 0, 3):
4389                 case IP_VERSION(2, 0, 0):
4390                 case IP_VERSION(2, 1, 0):
4391                 case IP_VERSION(3, 0, 0):
4392                 case IP_VERSION(3, 0, 2):
4393                 case IP_VERSION(3, 0, 3):
4394                 case IP_VERSION(3, 0, 1):
4395                 case IP_VERSION(3, 1, 2):
4396                 case IP_VERSION(3, 1, 3):
4397                 case IP_VERSION(3, 1, 5):
4398                 case IP_VERSION(3, 1, 6):
4399                         if (dcn10_register_irq_handlers(dm->adev)) {
4400                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4401                                 goto fail;
4402                         }
4403                         break;
4404                 default:
4405                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4406                                         adev->ip_versions[DCE_HWIP][0]);
4407                         goto fail;
4408                 }
4409 #endif
4410                 break;
4411         }
4412
4413         return 0;
4414 fail:
4415         kfree(aencoder);
4416         kfree(aconnector);
4417
4418         return -EINVAL;
4419 }
4420
4421 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4422 {
4423         drm_atomic_private_obj_fini(&dm->atomic_obj);
4424         return;
4425 }
4426
4427 /******************************************************************************
4428  * amdgpu_display_funcs functions
4429  *****************************************************************************/
4430
4431 /*
4432  * dm_bandwidth_update - program display watermarks
4433  *
4434  * @adev: amdgpu_device pointer
4435  *
4436  * Calculate and program the display watermarks and line buffer allocation.
4437  */
4438 static void dm_bandwidth_update(struct amdgpu_device *adev)
4439 {
4440         /* TODO: implement later */
4441 }
4442
4443 static const struct amdgpu_display_funcs dm_display_funcs = {
4444         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4445         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4446         .backlight_set_level = NULL, /* never called for DC */
4447         .backlight_get_level = NULL, /* never called for DC */
4448         .hpd_sense = NULL,/* called unconditionally */
4449         .hpd_set_polarity = NULL, /* called unconditionally */
4450         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4451         .page_flip_get_scanoutpos =
4452                 dm_crtc_get_scanoutpos,/* called unconditionally */
4453         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4454         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4455 };
4456
4457 #if defined(CONFIG_DEBUG_KERNEL_DC)
4458
4459 static ssize_t s3_debug_store(struct device *device,
4460                               struct device_attribute *attr,
4461                               const char *buf,
4462                               size_t count)
4463 {
4464         int ret;
4465         int s3_state;
4466         struct drm_device *drm_dev = dev_get_drvdata(device);
4467         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4468
4469         ret = kstrtoint(buf, 0, &s3_state);
4470
4471         if (ret == 0) {
4472                 if (s3_state) {
4473                         dm_resume(adev);
4474                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4475                 } else
4476                         dm_suspend(adev);
4477         }
4478
4479         return ret == 0 ? count : 0;
4480 }
4481
4482 DEVICE_ATTR_WO(s3_debug);
4483
4484 #endif
4485
4486 static int dm_early_init(void *handle)
4487 {
4488         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4489
4490         switch (adev->asic_type) {
4491 #if defined(CONFIG_DRM_AMD_DC_SI)
4492         case CHIP_TAHITI:
4493         case CHIP_PITCAIRN:
4494         case CHIP_VERDE:
4495                 adev->mode_info.num_crtc = 6;
4496                 adev->mode_info.num_hpd = 6;
4497                 adev->mode_info.num_dig = 6;
4498                 break;
4499         case CHIP_OLAND:
4500                 adev->mode_info.num_crtc = 2;
4501                 adev->mode_info.num_hpd = 2;
4502                 adev->mode_info.num_dig = 2;
4503                 break;
4504 #endif
4505         case CHIP_BONAIRE:
4506         case CHIP_HAWAII:
4507                 adev->mode_info.num_crtc = 6;
4508                 adev->mode_info.num_hpd = 6;
4509                 adev->mode_info.num_dig = 6;
4510                 break;
4511         case CHIP_KAVERI:
4512                 adev->mode_info.num_crtc = 4;
4513                 adev->mode_info.num_hpd = 6;
4514                 adev->mode_info.num_dig = 7;
4515                 break;
4516         case CHIP_KABINI:
4517         case CHIP_MULLINS:
4518                 adev->mode_info.num_crtc = 2;
4519                 adev->mode_info.num_hpd = 6;
4520                 adev->mode_info.num_dig = 6;
4521                 break;
4522         case CHIP_FIJI:
4523         case CHIP_TONGA:
4524                 adev->mode_info.num_crtc = 6;
4525                 adev->mode_info.num_hpd = 6;
4526                 adev->mode_info.num_dig = 7;
4527                 break;
4528         case CHIP_CARRIZO:
4529                 adev->mode_info.num_crtc = 3;
4530                 adev->mode_info.num_hpd = 6;
4531                 adev->mode_info.num_dig = 9;
4532                 break;
4533         case CHIP_STONEY:
4534                 adev->mode_info.num_crtc = 2;
4535                 adev->mode_info.num_hpd = 6;
4536                 adev->mode_info.num_dig = 9;
4537                 break;
4538         case CHIP_POLARIS11:
4539         case CHIP_POLARIS12:
4540                 adev->mode_info.num_crtc = 5;
4541                 adev->mode_info.num_hpd = 5;
4542                 adev->mode_info.num_dig = 5;
4543                 break;
4544         case CHIP_POLARIS10:
4545         case CHIP_VEGAM:
4546                 adev->mode_info.num_crtc = 6;
4547                 adev->mode_info.num_hpd = 6;
4548                 adev->mode_info.num_dig = 6;
4549                 break;
4550         case CHIP_VEGA10:
4551         case CHIP_VEGA12:
4552         case CHIP_VEGA20:
4553                 adev->mode_info.num_crtc = 6;
4554                 adev->mode_info.num_hpd = 6;
4555                 adev->mode_info.num_dig = 6;
4556                 break;
4557         default:
4558 #if defined(CONFIG_DRM_AMD_DC_DCN)
4559                 switch (adev->ip_versions[DCE_HWIP][0]) {
4560                 case IP_VERSION(2, 0, 2):
4561                 case IP_VERSION(3, 0, 0):
4562                         adev->mode_info.num_crtc = 6;
4563                         adev->mode_info.num_hpd = 6;
4564                         adev->mode_info.num_dig = 6;
4565                         break;
4566                 case IP_VERSION(2, 0, 0):
4567                 case IP_VERSION(3, 0, 2):
4568                         adev->mode_info.num_crtc = 5;
4569                         adev->mode_info.num_hpd = 5;
4570                         adev->mode_info.num_dig = 5;
4571                         break;
4572                 case IP_VERSION(2, 0, 3):
4573                 case IP_VERSION(3, 0, 3):
4574                         adev->mode_info.num_crtc = 2;
4575                         adev->mode_info.num_hpd = 2;
4576                         adev->mode_info.num_dig = 2;
4577                         break;
4578                 case IP_VERSION(1, 0, 0):
4579                 case IP_VERSION(1, 0, 1):
4580                 case IP_VERSION(3, 0, 1):
4581                 case IP_VERSION(2, 1, 0):
4582                 case IP_VERSION(3, 1, 2):
4583                 case IP_VERSION(3, 1, 3):
4584                 case IP_VERSION(3, 1, 5):
4585                 case IP_VERSION(3, 1, 6):
4586                         adev->mode_info.num_crtc = 4;
4587                         adev->mode_info.num_hpd = 4;
4588                         adev->mode_info.num_dig = 4;
4589                         break;
4590                 default:
4591                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4592                                         adev->ip_versions[DCE_HWIP][0]);
4593                         return -EINVAL;
4594                 }
4595 #endif
4596                 break;
4597         }
4598
4599         amdgpu_dm_set_irq_funcs(adev);
4600
4601         if (adev->mode_info.funcs == NULL)
4602                 adev->mode_info.funcs = &dm_display_funcs;
4603
4604         /*
4605          * Note: Do NOT change adev->audio_endpt_rreg and
4606          * adev->audio_endpt_wreg because they are initialised in
4607          * amdgpu_device_init()
4608          */
4609 #if defined(CONFIG_DEBUG_KERNEL_DC)
4610         device_create_file(
4611                 adev_to_drm(adev)->dev,
4612                 &dev_attr_s3_debug);
4613 #endif
4614
4615         return 0;
4616 }
4617
4618 static bool modeset_required(struct drm_crtc_state *crtc_state,
4619                              struct dc_stream_state *new_stream,
4620                              struct dc_stream_state *old_stream)
4621 {
4622         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4623 }
4624
4625 static bool modereset_required(struct drm_crtc_state *crtc_state)
4626 {
4627         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4628 }
4629
4630 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4631 {
4632         drm_encoder_cleanup(encoder);
4633         kfree(encoder);
4634 }
4635
4636 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4637         .destroy = amdgpu_dm_encoder_destroy,
4638 };
4639
4640
4641 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4642                                          struct drm_framebuffer *fb,
4643                                          int *min_downscale, int *max_upscale)
4644 {
4645         struct amdgpu_device *adev = drm_to_adev(dev);
4646         struct dc *dc = adev->dm.dc;
4647         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4648         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4649
4650         switch (fb->format->format) {
4651         case DRM_FORMAT_P010:
4652         case DRM_FORMAT_NV12:
4653         case DRM_FORMAT_NV21:
4654                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4655                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4656                 break;
4657
4658         case DRM_FORMAT_XRGB16161616F:
4659         case DRM_FORMAT_ARGB16161616F:
4660         case DRM_FORMAT_XBGR16161616F:
4661         case DRM_FORMAT_ABGR16161616F:
4662                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4663                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4664                 break;
4665
4666         default:
4667                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4668                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4669                 break;
4670         }
4671
4672         /*
4673          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4674          * scaling factor of 1.0 == 1000 units.
4675          */
4676         if (*max_upscale == 1)
4677                 *max_upscale = 1000;
4678
4679         if (*min_downscale == 1)
4680                 *min_downscale = 1000;
4681 }
4682
4683
4684 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4685                                 const struct drm_plane_state *state,
4686                                 struct dc_scaling_info *scaling_info)
4687 {
4688         int scale_w, scale_h, min_downscale, max_upscale;
4689
4690         memset(scaling_info, 0, sizeof(*scaling_info));
4691
4692         /* Source is fixed 16.16 but we ignore mantissa for now... */
4693         scaling_info->src_rect.x = state->src_x >> 16;
4694         scaling_info->src_rect.y = state->src_y >> 16;
4695
4696         /*
4697          * For reasons we don't (yet) fully understand a non-zero
4698          * src_y coordinate into an NV12 buffer can cause a
4699          * system hang on DCN1x.
4700          * To avoid hangs (and maybe be overly cautious)
4701          * let's reject both non-zero src_x and src_y.
4702          *
4703          * We currently know of only one use-case to reproduce a
4704          * scenario with non-zero src_x and src_y for NV12, which
4705          * is to gesture the YouTube Android app into full screen
4706          * on ChromeOS.
4707          */
4708         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4709             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4710             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4711             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4712                 return -EINVAL;
4713
4714         scaling_info->src_rect.width = state->src_w >> 16;
4715         if (scaling_info->src_rect.width == 0)
4716                 return -EINVAL;
4717
4718         scaling_info->src_rect.height = state->src_h >> 16;
4719         if (scaling_info->src_rect.height == 0)
4720                 return -EINVAL;
4721
4722         scaling_info->dst_rect.x = state->crtc_x;
4723         scaling_info->dst_rect.y = state->crtc_y;
4724
4725         if (state->crtc_w == 0)
4726                 return -EINVAL;
4727
4728         scaling_info->dst_rect.width = state->crtc_w;
4729
4730         if (state->crtc_h == 0)
4731                 return -EINVAL;
4732
4733         scaling_info->dst_rect.height = state->crtc_h;
4734
4735         /* DRM doesn't specify clipping on destination output. */
4736         scaling_info->clip_rect = scaling_info->dst_rect;
4737
4738         /* Validate scaling per-format with DC plane caps */
4739         if (state->plane && state->plane->dev && state->fb) {
4740                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4741                                              &min_downscale, &max_upscale);
4742         } else {
4743                 min_downscale = 250;
4744                 max_upscale = 16000;
4745         }
4746
4747         scale_w = scaling_info->dst_rect.width * 1000 /
4748                   scaling_info->src_rect.width;
4749
4750         if (scale_w < min_downscale || scale_w > max_upscale)
4751                 return -EINVAL;
4752
4753         scale_h = scaling_info->dst_rect.height * 1000 /
4754                   scaling_info->src_rect.height;
4755
4756         if (scale_h < min_downscale || scale_h > max_upscale)
4757                 return -EINVAL;
4758
4759         /*
4760          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4761          * assume reasonable defaults based on the format.
4762          */
4763
4764         return 0;
4765 }
4766
4767 static void
4768 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4769                                  uint64_t tiling_flags)
4770 {
4771         /* Fill GFX8 params */
4772         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4773                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4774
4775                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4776                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4777                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4778                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4779                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4780
4781                 /* XXX fix me for VI */
4782                 tiling_info->gfx8.num_banks = num_banks;
4783                 tiling_info->gfx8.array_mode =
4784                                 DC_ARRAY_2D_TILED_THIN1;
4785                 tiling_info->gfx8.tile_split = tile_split;
4786                 tiling_info->gfx8.bank_width = bankw;
4787                 tiling_info->gfx8.bank_height = bankh;
4788                 tiling_info->gfx8.tile_aspect = mtaspect;
4789                 tiling_info->gfx8.tile_mode =
4790                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4791         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4792                         == DC_ARRAY_1D_TILED_THIN1) {
4793                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4794         }
4795
4796         tiling_info->gfx8.pipe_config =
4797                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4798 }
4799
4800 static void
4801 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4802                                   union dc_tiling_info *tiling_info)
4803 {
4804         tiling_info->gfx9.num_pipes =
4805                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4806         tiling_info->gfx9.num_banks =
4807                 adev->gfx.config.gb_addr_config_fields.num_banks;
4808         tiling_info->gfx9.pipe_interleave =
4809                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4810         tiling_info->gfx9.num_shader_engines =
4811                 adev->gfx.config.gb_addr_config_fields.num_se;
4812         tiling_info->gfx9.max_compressed_frags =
4813                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4814         tiling_info->gfx9.num_rb_per_se =
4815                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4816         tiling_info->gfx9.shaderEnable = 1;
4817         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4818                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4819 }
4820
4821 static int
4822 validate_dcc(struct amdgpu_device *adev,
4823              const enum surface_pixel_format format,
4824              const enum dc_rotation_angle rotation,
4825              const union dc_tiling_info *tiling_info,
4826              const struct dc_plane_dcc_param *dcc,
4827              const struct dc_plane_address *address,
4828              const struct plane_size *plane_size)
4829 {
4830         struct dc *dc = adev->dm.dc;
4831         struct dc_dcc_surface_param input;
4832         struct dc_surface_dcc_cap output;
4833
4834         memset(&input, 0, sizeof(input));
4835         memset(&output, 0, sizeof(output));
4836
4837         if (!dcc->enable)
4838                 return 0;
4839
4840         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4841             !dc->cap_funcs.get_dcc_compression_cap)
4842                 return -EINVAL;
4843
4844         input.format = format;
4845         input.surface_size.width = plane_size->surface_size.width;
4846         input.surface_size.height = plane_size->surface_size.height;
4847         input.swizzle_mode = tiling_info->gfx9.swizzle;
4848
4849         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4850                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4851         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4852                 input.scan = SCAN_DIRECTION_VERTICAL;
4853
4854         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4855                 return -EINVAL;
4856
4857         if (!output.capable)
4858                 return -EINVAL;
4859
4860         if (dcc->independent_64b_blks == 0 &&
4861             output.grph.rgb.independent_64b_blks != 0)
4862                 return -EINVAL;
4863
4864         return 0;
4865 }
4866
4867 static bool
4868 modifier_has_dcc(uint64_t modifier)
4869 {
4870         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4871 }
4872
4873 static unsigned
4874 modifier_gfx9_swizzle_mode(uint64_t modifier)
4875 {
4876         if (modifier == DRM_FORMAT_MOD_LINEAR)
4877                 return 0;
4878
4879         return AMD_FMT_MOD_GET(TILE, modifier);
4880 }
4881
4882 static const struct drm_format_info *
4883 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4884 {
4885         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4886 }
4887
4888 static void
4889 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4890                                     union dc_tiling_info *tiling_info,
4891                                     uint64_t modifier)
4892 {
4893         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4894         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4895         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4896         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4897
4898         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4899
4900         if (!IS_AMD_FMT_MOD(modifier))
4901                 return;
4902
4903         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4904         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4905
4906         if (adev->family >= AMDGPU_FAMILY_NV) {
4907                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4908         } else {
4909                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4910
4911                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4912         }
4913 }
4914
4915 enum dm_micro_swizzle {
4916         MICRO_SWIZZLE_Z = 0,
4917         MICRO_SWIZZLE_S = 1,
4918         MICRO_SWIZZLE_D = 2,
4919         MICRO_SWIZZLE_R = 3
4920 };
4921
4922 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4923                                           uint32_t format,
4924                                           uint64_t modifier)
4925 {
4926         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4927         const struct drm_format_info *info = drm_format_info(format);
4928         int i;
4929
4930         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4931
4932         if (!info)
4933                 return false;
4934
4935         /*
4936          * We always have to allow these modifiers:
4937          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4938          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4939          */
4940         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4941             modifier == DRM_FORMAT_MOD_INVALID) {
4942                 return true;
4943         }
4944
4945         /* Check that the modifier is on the list of the plane's supported modifiers. */
4946         for (i = 0; i < plane->modifier_count; i++) {
4947                 if (modifier == plane->modifiers[i])
4948                         break;
4949         }
4950         if (i == plane->modifier_count)
4951                 return false;
4952
4953         /*
4954          * For D swizzle the canonical modifier depends on the bpp, so check
4955          * it here.
4956          */
4957         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4958             adev->family >= AMDGPU_FAMILY_NV) {
4959                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4960                         return false;
4961         }
4962
4963         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4964             info->cpp[0] < 8)
4965                 return false;
4966
4967         if (modifier_has_dcc(modifier)) {
4968                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4969                 if (info->cpp[0] != 4)
4970                         return false;
4971                 /* We support multi-planar formats, but not when combined with
4972                  * additional DCC metadata planes. */
4973                 if (info->num_planes > 1)
4974                         return false;
4975         }
4976
4977         return true;
4978 }
4979
4980 static void
4981 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4982 {
4983         if (!*mods)
4984                 return;
4985
4986         if (*cap - *size < 1) {
4987                 uint64_t new_cap = *cap * 2;
4988                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4989
4990                 if (!new_mods) {
4991                         kfree(*mods);
4992                         *mods = NULL;
4993                         return;
4994                 }
4995
4996                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4997                 kfree(*mods);
4998                 *mods = new_mods;
4999                 *cap = new_cap;
5000         }
5001
5002         (*mods)[*size] = mod;
5003         *size += 1;
5004 }
5005
5006 static void
5007 add_gfx9_modifiers(const struct amdgpu_device *adev,
5008                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
5009 {
5010         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5011         int pipe_xor_bits = min(8, pipes +
5012                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5013         int bank_xor_bits = min(8 - pipe_xor_bits,
5014                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5015         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5016                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5017
5018
5019         if (adev->family == AMDGPU_FAMILY_RV) {
5020                 /* Raven2 and later */
5021                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5022
5023                 /*
5024                  * No _D DCC swizzles yet because we only allow 32bpp, which
5025                  * doesn't support _D on DCN
5026                  */
5027
5028                 if (has_constant_encode) {
5029                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5031                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5032                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5033                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5034                                     AMD_FMT_MOD_SET(DCC, 1) |
5035                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5036                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5037                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5038                 }
5039
5040                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5041                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5042                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5043                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5044                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5045                             AMD_FMT_MOD_SET(DCC, 1) |
5046                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5047                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5048                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5049
5050                 if (has_constant_encode) {
5051                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5053                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5054                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5055                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5056                                     AMD_FMT_MOD_SET(DCC, 1) |
5057                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5058                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5059                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5060
5061                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5062                                     AMD_FMT_MOD_SET(RB, rb) |
5063                                     AMD_FMT_MOD_SET(PIPE, pipes));
5064                 }
5065
5066                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5067                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5068                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5069                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5070                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5071                             AMD_FMT_MOD_SET(DCC, 1) |
5072                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5073                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5074                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5075                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5076                             AMD_FMT_MOD_SET(RB, rb) |
5077                             AMD_FMT_MOD_SET(PIPE, pipes));
5078         }
5079
5080         /*
5081          * Only supported for 64bpp on Raven, will be filtered on format in
5082          * dm_plane_format_mod_supported.
5083          */
5084         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5085                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5086                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5087                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5088                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5089
5090         if (adev->family == AMDGPU_FAMILY_RV) {
5091                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5093                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5094                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5096         }
5097
5098         /*
5099          * Only supported for 64bpp on Raven, will be filtered on format in
5100          * dm_plane_format_mod_supported.
5101          */
5102         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5104                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5105
5106         if (adev->family == AMDGPU_FAMILY_RV) {
5107                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5109                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5110         }
5111 }
5112
5113 static void
5114 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5115                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5116 {
5117         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5118
5119         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5121                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5122                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5123                     AMD_FMT_MOD_SET(DCC, 1) |
5124                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5125                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5126                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5127
5128         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5129                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5130                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5131                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5132                     AMD_FMT_MOD_SET(DCC, 1) |
5133                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5134                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5135                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5136                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5137
5138         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5140                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5141                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5142
5143         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5144                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5145                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5146                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5147
5148
5149         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5150         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5151                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5152                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5153
5154         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5156                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5157 }
5158
5159 static void
5160 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5161                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5162 {
5163         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5164         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5165
5166         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5167                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5168                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5169                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5170                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5171                     AMD_FMT_MOD_SET(DCC, 1) |
5172                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5173                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5174                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5175                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5176
5177         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5178                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5179                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5180                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5181                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5182                     AMD_FMT_MOD_SET(DCC, 1) |
5183                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5184                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5185                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5186
5187         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5188                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5189                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5190                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5191                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5192                     AMD_FMT_MOD_SET(DCC, 1) |
5193                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5194                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5195                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5196                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5197                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5198
5199         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5200                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5201                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5202                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5203                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5204                     AMD_FMT_MOD_SET(DCC, 1) |
5205                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5206                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5207                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5208                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5209
5210         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5211                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5212                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5213                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5214                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5215
5216         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5217                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5218                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5219                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5220                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5221
5222         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5223         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5224                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5225                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5226
5227         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5228                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5229                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5230 }
5231
5232 static int
5233 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5234 {
5235         uint64_t size = 0, capacity = 128;
5236         *mods = NULL;
5237
5238         /* We have not hooked up any pre-GFX9 modifiers. */
5239         if (adev->family < AMDGPU_FAMILY_AI)
5240                 return 0;
5241
5242         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5243
5244         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5245                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5246                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5247                 return *mods ? 0 : -ENOMEM;
5248         }
5249
5250         switch (adev->family) {
5251         case AMDGPU_FAMILY_AI:
5252         case AMDGPU_FAMILY_RV:
5253                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5254                 break;
5255         case AMDGPU_FAMILY_NV:
5256         case AMDGPU_FAMILY_VGH:
5257         case AMDGPU_FAMILY_YC:
5258         case AMDGPU_FAMILY_GC_10_3_6:
5259         case AMDGPU_FAMILY_GC_10_3_7:
5260                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5261                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5262                 else
5263                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5264                 break;
5265         }
5266
5267         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5268
5269         /* INVALID marks the end of the list. */
5270         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5271
5272         if (!*mods)
5273                 return -ENOMEM;
5274
5275         return 0;
5276 }
5277
5278 static int
5279 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5280                                           const struct amdgpu_framebuffer *afb,
5281                                           const enum surface_pixel_format format,
5282                                           const enum dc_rotation_angle rotation,
5283                                           const struct plane_size *plane_size,
5284                                           union dc_tiling_info *tiling_info,
5285                                           struct dc_plane_dcc_param *dcc,
5286                                           struct dc_plane_address *address,
5287                                           const bool force_disable_dcc)
5288 {
5289         const uint64_t modifier = afb->base.modifier;
5290         int ret = 0;
5291
5292         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5293         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5294
5295         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5296                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5297                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5298                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5299
5300                 dcc->enable = 1;
5301                 dcc->meta_pitch = afb->base.pitches[1];
5302                 dcc->independent_64b_blks = independent_64b_blks;
5303                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5304                         if (independent_64b_blks && independent_128b_blks)
5305                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5306                         else if (independent_128b_blks)
5307                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5308                         else if (independent_64b_blks && !independent_128b_blks)
5309                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5310                         else
5311                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5312                 } else {
5313                         if (independent_64b_blks)
5314                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5315                         else
5316                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5317                 }
5318
5319                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5320                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5321         }
5322
5323         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5324         if (ret)
5325                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5326
5327         return ret;
5328 }
5329
5330 static int
5331 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5332                              const struct amdgpu_framebuffer *afb,
5333                              const enum surface_pixel_format format,
5334                              const enum dc_rotation_angle rotation,
5335                              const uint64_t tiling_flags,
5336                              union dc_tiling_info *tiling_info,
5337                              struct plane_size *plane_size,
5338                              struct dc_plane_dcc_param *dcc,
5339                              struct dc_plane_address *address,
5340                              bool tmz_surface,
5341                              bool force_disable_dcc)
5342 {
5343         const struct drm_framebuffer *fb = &afb->base;
5344         int ret;
5345
5346         memset(tiling_info, 0, sizeof(*tiling_info));
5347         memset(plane_size, 0, sizeof(*plane_size));
5348         memset(dcc, 0, sizeof(*dcc));
5349         memset(address, 0, sizeof(*address));
5350
5351         address->tmz_surface = tmz_surface;
5352
5353         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5354                 uint64_t addr = afb->address + fb->offsets[0];
5355
5356                 plane_size->surface_size.x = 0;
5357                 plane_size->surface_size.y = 0;
5358                 plane_size->surface_size.width = fb->width;
5359                 plane_size->surface_size.height = fb->height;
5360                 plane_size->surface_pitch =
5361                         fb->pitches[0] / fb->format->cpp[0];
5362
5363                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5364                 address->grph.addr.low_part = lower_32_bits(addr);
5365                 address->grph.addr.high_part = upper_32_bits(addr);
5366         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5367                 uint64_t luma_addr = afb->address + fb->offsets[0];
5368                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5369
5370                 plane_size->surface_size.x = 0;
5371                 plane_size->surface_size.y = 0;
5372                 plane_size->surface_size.width = fb->width;
5373                 plane_size->surface_size.height = fb->height;
5374                 plane_size->surface_pitch =
5375                         fb->pitches[0] / fb->format->cpp[0];
5376
5377                 plane_size->chroma_size.x = 0;
5378                 plane_size->chroma_size.y = 0;
5379                 /* TODO: set these based on surface format */
5380                 plane_size->chroma_size.width = fb->width / 2;
5381                 plane_size->chroma_size.height = fb->height / 2;
5382
5383                 plane_size->chroma_pitch =
5384                         fb->pitches[1] / fb->format->cpp[1];
5385
5386                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5387                 address->video_progressive.luma_addr.low_part =
5388                         lower_32_bits(luma_addr);
5389                 address->video_progressive.luma_addr.high_part =
5390                         upper_32_bits(luma_addr);
5391                 address->video_progressive.chroma_addr.low_part =
5392                         lower_32_bits(chroma_addr);
5393                 address->video_progressive.chroma_addr.high_part =
5394                         upper_32_bits(chroma_addr);
5395         }
5396
5397         if (adev->family >= AMDGPU_FAMILY_AI) {
5398                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5399                                                                 rotation, plane_size,
5400                                                                 tiling_info, dcc,
5401                                                                 address,
5402                                                                 force_disable_dcc);
5403                 if (ret)
5404                         return ret;
5405         } else {
5406                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5407         }
5408
5409         return 0;
5410 }
5411
5412 static void
5413 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5414                                bool *per_pixel_alpha, bool *global_alpha,
5415                                int *global_alpha_value)
5416 {
5417         *per_pixel_alpha = false;
5418         *global_alpha = false;
5419         *global_alpha_value = 0xff;
5420
5421         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5422                 return;
5423
5424         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5425                 static const uint32_t alpha_formats[] = {
5426                         DRM_FORMAT_ARGB8888,
5427                         DRM_FORMAT_RGBA8888,
5428                         DRM_FORMAT_ABGR8888,
5429                 };
5430                 uint32_t format = plane_state->fb->format->format;
5431                 unsigned int i;
5432
5433                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5434                         if (format == alpha_formats[i]) {
5435                                 *per_pixel_alpha = true;
5436                                 break;
5437                         }
5438                 }
5439         }
5440
5441         if (plane_state->alpha < 0xffff) {
5442                 *global_alpha = true;
5443                 *global_alpha_value = plane_state->alpha >> 8;
5444         }
5445 }
5446
5447 static int
5448 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5449                             const enum surface_pixel_format format,
5450                             enum dc_color_space *color_space)
5451 {
5452         bool full_range;
5453
5454         *color_space = COLOR_SPACE_SRGB;
5455
5456         /* DRM color properties only affect non-RGB formats. */
5457         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5458                 return 0;
5459
5460         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5461
5462         switch (plane_state->color_encoding) {
5463         case DRM_COLOR_YCBCR_BT601:
5464                 if (full_range)
5465                         *color_space = COLOR_SPACE_YCBCR601;
5466                 else
5467                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5468                 break;
5469
5470         case DRM_COLOR_YCBCR_BT709:
5471                 if (full_range)
5472                         *color_space = COLOR_SPACE_YCBCR709;
5473                 else
5474                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5475                 break;
5476
5477         case DRM_COLOR_YCBCR_BT2020:
5478                 if (full_range)
5479                         *color_space = COLOR_SPACE_2020_YCBCR;
5480                 else
5481                         return -EINVAL;
5482                 break;
5483
5484         default:
5485                 return -EINVAL;
5486         }
5487
5488         return 0;
5489 }
5490
5491 static int
5492 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5493                             const struct drm_plane_state *plane_state,
5494                             const uint64_t tiling_flags,
5495                             struct dc_plane_info *plane_info,
5496                             struct dc_plane_address *address,
5497                             bool tmz_surface,
5498                             bool force_disable_dcc)
5499 {
5500         const struct drm_framebuffer *fb = plane_state->fb;
5501         const struct amdgpu_framebuffer *afb =
5502                 to_amdgpu_framebuffer(plane_state->fb);
5503         int ret;
5504
5505         memset(plane_info, 0, sizeof(*plane_info));
5506
5507         switch (fb->format->format) {
5508         case DRM_FORMAT_C8:
5509                 plane_info->format =
5510                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5511                 break;
5512         case DRM_FORMAT_RGB565:
5513                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5514                 break;
5515         case DRM_FORMAT_XRGB8888:
5516         case DRM_FORMAT_ARGB8888:
5517                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5518                 break;
5519         case DRM_FORMAT_XRGB2101010:
5520         case DRM_FORMAT_ARGB2101010:
5521                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5522                 break;
5523         case DRM_FORMAT_XBGR2101010:
5524         case DRM_FORMAT_ABGR2101010:
5525                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5526                 break;
5527         case DRM_FORMAT_XBGR8888:
5528         case DRM_FORMAT_ABGR8888:
5529                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5530                 break;
5531         case DRM_FORMAT_NV21:
5532                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5533                 break;
5534         case DRM_FORMAT_NV12:
5535                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5536                 break;
5537         case DRM_FORMAT_P010:
5538                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5539                 break;
5540         case DRM_FORMAT_XRGB16161616F:
5541         case DRM_FORMAT_ARGB16161616F:
5542                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5543                 break;
5544         case DRM_FORMAT_XBGR16161616F:
5545         case DRM_FORMAT_ABGR16161616F:
5546                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5547                 break;
5548         case DRM_FORMAT_XRGB16161616:
5549         case DRM_FORMAT_ARGB16161616:
5550                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5551                 break;
5552         case DRM_FORMAT_XBGR16161616:
5553         case DRM_FORMAT_ABGR16161616:
5554                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5555                 break;
5556         default:
5557                 DRM_ERROR(
5558                         "Unsupported screen format %p4cc\n",
5559                         &fb->format->format);
5560                 return -EINVAL;
5561         }
5562
5563         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5564         case DRM_MODE_ROTATE_0:
5565                 plane_info->rotation = ROTATION_ANGLE_0;
5566                 break;
5567         case DRM_MODE_ROTATE_90:
5568                 plane_info->rotation = ROTATION_ANGLE_90;
5569                 break;
5570         case DRM_MODE_ROTATE_180:
5571                 plane_info->rotation = ROTATION_ANGLE_180;
5572                 break;
5573         case DRM_MODE_ROTATE_270:
5574                 plane_info->rotation = ROTATION_ANGLE_270;
5575                 break;
5576         default:
5577                 plane_info->rotation = ROTATION_ANGLE_0;
5578                 break;
5579         }
5580
5581         plane_info->visible = true;
5582         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5583
5584         plane_info->layer_index = 0;
5585
5586         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5587                                           &plane_info->color_space);
5588         if (ret)
5589                 return ret;
5590
5591         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5592                                            plane_info->rotation, tiling_flags,
5593                                            &plane_info->tiling_info,
5594                                            &plane_info->plane_size,
5595                                            &plane_info->dcc, address, tmz_surface,
5596                                            force_disable_dcc);
5597         if (ret)
5598                 return ret;
5599
5600         fill_blending_from_plane_state(
5601                 plane_state, &plane_info->per_pixel_alpha,
5602                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5603
5604         return 0;
5605 }
5606
5607 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5608                                     struct dc_plane_state *dc_plane_state,
5609                                     struct drm_plane_state *plane_state,
5610                                     struct drm_crtc_state *crtc_state)
5611 {
5612         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5613         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5614         struct dc_scaling_info scaling_info;
5615         struct dc_plane_info plane_info;
5616         int ret;
5617         bool force_disable_dcc = false;
5618
5619         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5620         if (ret)
5621                 return ret;
5622
5623         dc_plane_state->src_rect = scaling_info.src_rect;
5624         dc_plane_state->dst_rect = scaling_info.dst_rect;
5625         dc_plane_state->clip_rect = scaling_info.clip_rect;
5626         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5627
5628         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5629         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5630                                           afb->tiling_flags,
5631                                           &plane_info,
5632                                           &dc_plane_state->address,
5633                                           afb->tmz_surface,
5634                                           force_disable_dcc);
5635         if (ret)
5636                 return ret;
5637
5638         dc_plane_state->format = plane_info.format;
5639         dc_plane_state->color_space = plane_info.color_space;
5640         dc_plane_state->format = plane_info.format;
5641         dc_plane_state->plane_size = plane_info.plane_size;
5642         dc_plane_state->rotation = plane_info.rotation;
5643         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5644         dc_plane_state->stereo_format = plane_info.stereo_format;
5645         dc_plane_state->tiling_info = plane_info.tiling_info;
5646         dc_plane_state->visible = plane_info.visible;
5647         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5648         dc_plane_state->global_alpha = plane_info.global_alpha;
5649         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5650         dc_plane_state->dcc = plane_info.dcc;
5651         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5652         dc_plane_state->flip_int_enabled = true;
5653
5654         /*
5655          * Always set input transfer function, since plane state is refreshed
5656          * every time.
5657          */
5658         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5659         if (ret)
5660                 return ret;
5661
5662         return 0;
5663 }
5664
5665 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5666                                            const struct dm_connector_state *dm_state,
5667                                            struct dc_stream_state *stream)
5668 {
5669         enum amdgpu_rmx_type rmx_type;
5670
5671         struct rect src = { 0 }; /* viewport in composition space*/
5672         struct rect dst = { 0 }; /* stream addressable area */
5673
5674         /* no mode. nothing to be done */
5675         if (!mode)
5676                 return;
5677
5678         /* Full screen scaling by default */
5679         src.width = mode->hdisplay;
5680         src.height = mode->vdisplay;
5681         dst.width = stream->timing.h_addressable;
5682         dst.height = stream->timing.v_addressable;
5683
5684         if (dm_state) {
5685                 rmx_type = dm_state->scaling;
5686                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5687                         if (src.width * dst.height <
5688                                         src.height * dst.width) {
5689                                 /* height needs less upscaling/more downscaling */
5690                                 dst.width = src.width *
5691                                                 dst.height / src.height;
5692                         } else {
5693                                 /* width needs less upscaling/more downscaling */
5694                                 dst.height = src.height *
5695                                                 dst.width / src.width;
5696                         }
5697                 } else if (rmx_type == RMX_CENTER) {
5698                         dst = src;
5699                 }
5700
5701                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5702                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5703
5704                 if (dm_state->underscan_enable) {
5705                         dst.x += dm_state->underscan_hborder / 2;
5706                         dst.y += dm_state->underscan_vborder / 2;
5707                         dst.width -= dm_state->underscan_hborder;
5708                         dst.height -= dm_state->underscan_vborder;
5709                 }
5710         }
5711
5712         stream->src = src;
5713         stream->dst = dst;
5714
5715         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5716                       dst.x, dst.y, dst.width, dst.height);
5717
5718 }
5719
5720 static enum dc_color_depth
5721 convert_color_depth_from_display_info(const struct drm_connector *connector,
5722                                       bool is_y420, int requested_bpc)
5723 {
5724         uint8_t bpc;
5725
5726         if (is_y420) {
5727                 bpc = 8;
5728
5729                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5730                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5731                         bpc = 16;
5732                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5733                         bpc = 12;
5734                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5735                         bpc = 10;
5736         } else {
5737                 bpc = (uint8_t)connector->display_info.bpc;
5738                 /* Assume 8 bpc by default if no bpc is specified. */
5739                 bpc = bpc ? bpc : 8;
5740         }
5741
5742         if (requested_bpc > 0) {
5743                 /*
5744                  * Cap display bpc based on the user requested value.
5745                  *
5746                  * The value for state->max_bpc may not correctly updated
5747                  * depending on when the connector gets added to the state
5748                  * or if this was called outside of atomic check, so it
5749                  * can't be used directly.
5750                  */
5751                 bpc = min_t(u8, bpc, requested_bpc);
5752
5753                 /* Round down to the nearest even number. */
5754                 bpc = bpc - (bpc & 1);
5755         }
5756
5757         switch (bpc) {
5758         case 0:
5759                 /*
5760                  * Temporary Work around, DRM doesn't parse color depth for
5761                  * EDID revision before 1.4
5762                  * TODO: Fix edid parsing
5763                  */
5764                 return COLOR_DEPTH_888;
5765         case 6:
5766                 return COLOR_DEPTH_666;
5767         case 8:
5768                 return COLOR_DEPTH_888;
5769         case 10:
5770                 return COLOR_DEPTH_101010;
5771         case 12:
5772                 return COLOR_DEPTH_121212;
5773         case 14:
5774                 return COLOR_DEPTH_141414;
5775         case 16:
5776                 return COLOR_DEPTH_161616;
5777         default:
5778                 return COLOR_DEPTH_UNDEFINED;
5779         }
5780 }
5781
5782 static enum dc_aspect_ratio
5783 get_aspect_ratio(const struct drm_display_mode *mode_in)
5784 {
5785         /* 1-1 mapping, since both enums follow the HDMI spec. */
5786         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5787 }
5788
5789 static enum dc_color_space
5790 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5791 {
5792         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5793
5794         switch (dc_crtc_timing->pixel_encoding) {
5795         case PIXEL_ENCODING_YCBCR422:
5796         case PIXEL_ENCODING_YCBCR444:
5797         case PIXEL_ENCODING_YCBCR420:
5798         {
5799                 /*
5800                  * 27030khz is the separation point between HDTV and SDTV
5801                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5802                  * respectively
5803                  */
5804                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5805                         if (dc_crtc_timing->flags.Y_ONLY)
5806                                 color_space =
5807                                         COLOR_SPACE_YCBCR709_LIMITED;
5808                         else
5809                                 color_space = COLOR_SPACE_YCBCR709;
5810                 } else {
5811                         if (dc_crtc_timing->flags.Y_ONLY)
5812                                 color_space =
5813                                         COLOR_SPACE_YCBCR601_LIMITED;
5814                         else
5815                                 color_space = COLOR_SPACE_YCBCR601;
5816                 }
5817
5818         }
5819         break;
5820         case PIXEL_ENCODING_RGB:
5821                 color_space = COLOR_SPACE_SRGB;
5822                 break;
5823
5824         default:
5825                 WARN_ON(1);
5826                 break;
5827         }
5828
5829         return color_space;
5830 }
5831
5832 static bool adjust_colour_depth_from_display_info(
5833         struct dc_crtc_timing *timing_out,
5834         const struct drm_display_info *info)
5835 {
5836         enum dc_color_depth depth = timing_out->display_color_depth;
5837         int normalized_clk;
5838         do {
5839                 normalized_clk = timing_out->pix_clk_100hz / 10;
5840                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5841                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5842                         normalized_clk /= 2;
5843                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5844                 switch (depth) {
5845                 case COLOR_DEPTH_888:
5846                         break;
5847                 case COLOR_DEPTH_101010:
5848                         normalized_clk = (normalized_clk * 30) / 24;
5849                         break;
5850                 case COLOR_DEPTH_121212:
5851                         normalized_clk = (normalized_clk * 36) / 24;
5852                         break;
5853                 case COLOR_DEPTH_161616:
5854                         normalized_clk = (normalized_clk * 48) / 24;
5855                         break;
5856                 default:
5857                         /* The above depths are the only ones valid for HDMI. */
5858                         return false;
5859                 }
5860                 if (normalized_clk <= info->max_tmds_clock) {
5861                         timing_out->display_color_depth = depth;
5862                         return true;
5863                 }
5864         } while (--depth > COLOR_DEPTH_666);
5865         return false;
5866 }
5867
5868 static void fill_stream_properties_from_drm_display_mode(
5869         struct dc_stream_state *stream,
5870         const struct drm_display_mode *mode_in,
5871         const struct drm_connector *connector,
5872         const struct drm_connector_state *connector_state,
5873         const struct dc_stream_state *old_stream,
5874         int requested_bpc)
5875 {
5876         struct dc_crtc_timing *timing_out = &stream->timing;
5877         const struct drm_display_info *info = &connector->display_info;
5878         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5879         struct hdmi_vendor_infoframe hv_frame;
5880         struct hdmi_avi_infoframe avi_frame;
5881
5882         memset(&hv_frame, 0, sizeof(hv_frame));
5883         memset(&avi_frame, 0, sizeof(avi_frame));
5884
5885         timing_out->h_border_left = 0;
5886         timing_out->h_border_right = 0;
5887         timing_out->v_border_top = 0;
5888         timing_out->v_border_bottom = 0;
5889         /* TODO: un-hardcode */
5890         if (drm_mode_is_420_only(info, mode_in)
5891                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5892                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5893         else if (drm_mode_is_420_also(info, mode_in)
5894                         && aconnector->force_yuv420_output)
5895                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5896         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5897                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5898                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5899         else
5900                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5901
5902         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5903         timing_out->display_color_depth = convert_color_depth_from_display_info(
5904                 connector,
5905                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5906                 requested_bpc);
5907         timing_out->scan_type = SCANNING_TYPE_NODATA;
5908         timing_out->hdmi_vic = 0;
5909
5910         if(old_stream) {
5911                 timing_out->vic = old_stream->timing.vic;
5912                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5913                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5914         } else {
5915                 timing_out->vic = drm_match_cea_mode(mode_in);
5916                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5917                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5918                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5919                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5920         }
5921
5922         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5923                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5924                 timing_out->vic = avi_frame.video_code;
5925                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5926                 timing_out->hdmi_vic = hv_frame.vic;
5927         }
5928
5929         if (is_freesync_video_mode(mode_in, aconnector)) {
5930                 timing_out->h_addressable = mode_in->hdisplay;
5931                 timing_out->h_total = mode_in->htotal;
5932                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5933                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5934                 timing_out->v_total = mode_in->vtotal;
5935                 timing_out->v_addressable = mode_in->vdisplay;
5936                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5937                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5938                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5939         } else {
5940                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5941                 timing_out->h_total = mode_in->crtc_htotal;
5942                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5943                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5944                 timing_out->v_total = mode_in->crtc_vtotal;
5945                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5946                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5947                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5948                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5949         }
5950
5951         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5952
5953         stream->output_color_space = get_output_color_space(timing_out);
5954
5955         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5956         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5957         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5958                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5959                     drm_mode_is_420_also(info, mode_in) &&
5960                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5961                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5962                         adjust_colour_depth_from_display_info(timing_out, info);
5963                 }
5964         }
5965 }
5966
5967 static void fill_audio_info(struct audio_info *audio_info,
5968                             const struct drm_connector *drm_connector,
5969                             const struct dc_sink *dc_sink)
5970 {
5971         int i = 0;
5972         int cea_revision = 0;
5973         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5974
5975         audio_info->manufacture_id = edid_caps->manufacturer_id;
5976         audio_info->product_id = edid_caps->product_id;
5977
5978         cea_revision = drm_connector->display_info.cea_rev;
5979
5980         strscpy(audio_info->display_name,
5981                 edid_caps->display_name,
5982                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5983
5984         if (cea_revision >= 3) {
5985                 audio_info->mode_count = edid_caps->audio_mode_count;
5986
5987                 for (i = 0; i < audio_info->mode_count; ++i) {
5988                         audio_info->modes[i].format_code =
5989                                         (enum audio_format_code)
5990                                         (edid_caps->audio_modes[i].format_code);
5991                         audio_info->modes[i].channel_count =
5992                                         edid_caps->audio_modes[i].channel_count;
5993                         audio_info->modes[i].sample_rates.all =
5994                                         edid_caps->audio_modes[i].sample_rate;
5995                         audio_info->modes[i].sample_size =
5996                                         edid_caps->audio_modes[i].sample_size;
5997                 }
5998         }
5999
6000         audio_info->flags.all = edid_caps->speaker_flags;
6001
6002         /* TODO: We only check for the progressive mode, check for interlace mode too */
6003         if (drm_connector->latency_present[0]) {
6004                 audio_info->video_latency = drm_connector->video_latency[0];
6005                 audio_info->audio_latency = drm_connector->audio_latency[0];
6006         }
6007
6008         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6009
6010 }
6011
6012 static void
6013 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6014                                       struct drm_display_mode *dst_mode)
6015 {
6016         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6017         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6018         dst_mode->crtc_clock = src_mode->crtc_clock;
6019         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6020         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6021         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6022         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6023         dst_mode->crtc_htotal = src_mode->crtc_htotal;
6024         dst_mode->crtc_hskew = src_mode->crtc_hskew;
6025         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6026         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6027         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6028         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6029         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6030 }
6031
6032 static void
6033 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6034                                         const struct drm_display_mode *native_mode,
6035                                         bool scale_enabled)
6036 {
6037         if (scale_enabled) {
6038                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6039         } else if (native_mode->clock == drm_mode->clock &&
6040                         native_mode->htotal == drm_mode->htotal &&
6041                         native_mode->vtotal == drm_mode->vtotal) {
6042                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6043         } else {
6044                 /* no scaling nor amdgpu inserted, no need to patch */
6045         }
6046 }
6047
6048 static struct dc_sink *
6049 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6050 {
6051         struct dc_sink_init_data sink_init_data = { 0 };
6052         struct dc_sink *sink = NULL;
6053         sink_init_data.link = aconnector->dc_link;
6054         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6055
6056         sink = dc_sink_create(&sink_init_data);
6057         if (!sink) {
6058                 DRM_ERROR("Failed to create sink!\n");
6059                 return NULL;
6060         }
6061         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6062
6063         return sink;
6064 }
6065
6066 static void set_multisync_trigger_params(
6067                 struct dc_stream_state *stream)
6068 {
6069         struct dc_stream_state *master = NULL;
6070
6071         if (stream->triggered_crtc_reset.enabled) {
6072                 master = stream->triggered_crtc_reset.event_source;
6073                 stream->triggered_crtc_reset.event =
6074                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6075                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6076                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6077         }
6078 }
6079
6080 static void set_master_stream(struct dc_stream_state *stream_set[],
6081                               int stream_count)
6082 {
6083         int j, highest_rfr = 0, master_stream = 0;
6084
6085         for (j = 0;  j < stream_count; j++) {
6086                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6087                         int refresh_rate = 0;
6088
6089                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6090                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6091                         if (refresh_rate > highest_rfr) {
6092                                 highest_rfr = refresh_rate;
6093                                 master_stream = j;
6094                         }
6095                 }
6096         }
6097         for (j = 0;  j < stream_count; j++) {
6098                 if (stream_set[j])
6099                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6100         }
6101 }
6102
6103 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6104 {
6105         int i = 0;
6106         struct dc_stream_state *stream;
6107
6108         if (context->stream_count < 2)
6109                 return;
6110         for (i = 0; i < context->stream_count ; i++) {
6111                 if (!context->streams[i])
6112                         continue;
6113                 /*
6114                  * TODO: add a function to read AMD VSDB bits and set
6115                  * crtc_sync_master.multi_sync_enabled flag
6116                  * For now it's set to false
6117                  */
6118         }
6119
6120         set_master_stream(context->streams, context->stream_count);
6121
6122         for (i = 0; i < context->stream_count ; i++) {
6123                 stream = context->streams[i];
6124
6125                 if (!stream)
6126                         continue;
6127
6128                 set_multisync_trigger_params(stream);
6129         }
6130 }
6131
6132 #if defined(CONFIG_DRM_AMD_DC_DCN)
6133 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6134                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6135                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6136 {
6137         stream->timing.flags.DSC = 0;
6138         dsc_caps->is_dsc_supported = false;
6139
6140         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6141                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6142                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6143                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6144                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6145                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6146                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6147                                 dsc_caps);
6148         }
6149 }
6150
6151 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6152                                     struct dc_sink *sink, struct dc_stream_state *stream,
6153                                     struct dsc_dec_dpcd_caps *dsc_caps,
6154                                     uint32_t max_dsc_target_bpp_limit_override)
6155 {
6156         const struct dc_link_settings *verified_link_cap = NULL;
6157         uint32_t link_bw_in_kbps;
6158         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6159         struct dc *dc = sink->ctx->dc;
6160         struct dc_dsc_bw_range bw_range = {0};
6161         struct dc_dsc_config dsc_cfg = {0};
6162
6163         verified_link_cap = dc_link_get_link_cap(stream->link);
6164         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6165         edp_min_bpp_x16 = 8 * 16;
6166         edp_max_bpp_x16 = 8 * 16;
6167
6168         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6169                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6170
6171         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6172                 edp_min_bpp_x16 = edp_max_bpp_x16;
6173
6174         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6175                                 dc->debug.dsc_min_slice_height_override,
6176                                 edp_min_bpp_x16, edp_max_bpp_x16,
6177                                 dsc_caps,
6178                                 &stream->timing,
6179                                 &bw_range)) {
6180
6181                 if (bw_range.max_kbps < link_bw_in_kbps) {
6182                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6183                                         dsc_caps,
6184                                         dc->debug.dsc_min_slice_height_override,
6185                                         max_dsc_target_bpp_limit_override,
6186                                         0,
6187                                         &stream->timing,
6188                                         &dsc_cfg)) {
6189                                 stream->timing.dsc_cfg = dsc_cfg;
6190                                 stream->timing.flags.DSC = 1;
6191                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6192                         }
6193                         return;
6194                 }
6195         }
6196
6197         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6198                                 dsc_caps,
6199                                 dc->debug.dsc_min_slice_height_override,
6200                                 max_dsc_target_bpp_limit_override,
6201                                 link_bw_in_kbps,
6202                                 &stream->timing,
6203                                 &dsc_cfg)) {
6204                 stream->timing.dsc_cfg = dsc_cfg;
6205                 stream->timing.flags.DSC = 1;
6206         }
6207 }
6208
6209 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6210                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6211                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6212 {
6213         struct drm_connector *drm_connector = &aconnector->base;
6214         uint32_t link_bandwidth_kbps;
6215         uint32_t max_dsc_target_bpp_limit_override = 0;
6216         struct dc *dc = sink->ctx->dc;
6217         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6218         uint32_t dsc_max_supported_bw_in_kbps;
6219
6220         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6221                                                         dc_link_get_link_cap(aconnector->dc_link));
6222
6223         if (stream->link && stream->link->local_sink)
6224                 max_dsc_target_bpp_limit_override =
6225                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6226
6227         /* Set DSC policy according to dsc_clock_en */
6228         dc_dsc_policy_set_enable_dsc_when_not_needed(
6229                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6230
6231         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6232             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6233
6234                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6235
6236         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6237                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6238                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6239                                                 dsc_caps,
6240                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6241                                                 max_dsc_target_bpp_limit_override,
6242                                                 link_bandwidth_kbps,
6243                                                 &stream->timing,
6244                                                 &stream->timing.dsc_cfg)) {
6245                                 stream->timing.flags.DSC = 1;
6246                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6247                                                                  __func__, drm_connector->name);
6248                         }
6249                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6250                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6251                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6252                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6253
6254                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6255                                         max_supported_bw_in_kbps > 0 &&
6256                                         dsc_max_supported_bw_in_kbps > 0)
6257                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6258                                                 dsc_caps,
6259                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6260                                                 max_dsc_target_bpp_limit_override,
6261                                                 dsc_max_supported_bw_in_kbps,
6262                                                 &stream->timing,
6263                                                 &stream->timing.dsc_cfg)) {
6264                                         stream->timing.flags.DSC = 1;
6265                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6266                                                                          __func__, drm_connector->name);
6267                                 }
6268                 }
6269         }
6270
6271         /* Overwrite the stream flag if DSC is enabled through debugfs */
6272         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6273                 stream->timing.flags.DSC = 1;
6274
6275         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6276                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6277
6278         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6279                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6280
6281         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6282                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6283 }
6284 #endif /* CONFIG_DRM_AMD_DC_DCN */
6285
6286 /**
6287  * DOC: FreeSync Video
6288  *
6289  * When a userspace application wants to play a video, the content follows a
6290  * standard format definition that usually specifies the FPS for that format.
6291  * The below list illustrates some video format and the expected FPS,
6292  * respectively:
6293  *
6294  * - TV/NTSC (23.976 FPS)
6295  * - Cinema (24 FPS)
6296  * - TV/PAL (25 FPS)
6297  * - TV/NTSC (29.97 FPS)
6298  * - TV/NTSC (30 FPS)
6299  * - Cinema HFR (48 FPS)
6300  * - TV/PAL (50 FPS)
6301  * - Commonly used (60 FPS)
6302  * - Multiples of 24 (48,72,96,120 FPS)
6303  *
6304  * The list of standards video format is not huge and can be added to the
6305  * connector modeset list beforehand. With that, userspace can leverage
6306  * FreeSync to extends the front porch in order to attain the target refresh
6307  * rate. Such a switch will happen seamlessly, without screen blanking or
6308  * reprogramming of the output in any other way. If the userspace requests a
6309  * modesetting change compatible with FreeSync modes that only differ in the
6310  * refresh rate, DC will skip the full update and avoid blink during the
6311  * transition. For example, the video player can change the modesetting from
6312  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6313  * causing any display blink. This same concept can be applied to a mode
6314  * setting change.
6315  */
6316 static struct drm_display_mode *
6317 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6318                           bool use_probed_modes)
6319 {
6320         struct drm_display_mode *m, *m_pref = NULL;
6321         u16 current_refresh, highest_refresh;
6322         struct list_head *list_head = use_probed_modes ?
6323                                                     &aconnector->base.probed_modes :
6324                                                     &aconnector->base.modes;
6325
6326         if (aconnector->freesync_vid_base.clock != 0)
6327                 return &aconnector->freesync_vid_base;
6328
6329         /* Find the preferred mode */
6330         list_for_each_entry (m, list_head, head) {
6331                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6332                         m_pref = m;
6333                         break;
6334                 }
6335         }
6336
6337         if (!m_pref) {
6338                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6339                 m_pref = list_first_entry_or_null(
6340                         &aconnector->base.modes, struct drm_display_mode, head);
6341                 if (!m_pref) {
6342                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6343                         return NULL;
6344                 }
6345         }
6346
6347         highest_refresh = drm_mode_vrefresh(m_pref);
6348
6349         /*
6350          * Find the mode with highest refresh rate with same resolution.
6351          * For some monitors, preferred mode is not the mode with highest
6352          * supported refresh rate.
6353          */
6354         list_for_each_entry (m, list_head, head) {
6355                 current_refresh  = drm_mode_vrefresh(m);
6356
6357                 if (m->hdisplay == m_pref->hdisplay &&
6358                     m->vdisplay == m_pref->vdisplay &&
6359                     highest_refresh < current_refresh) {
6360                         highest_refresh = current_refresh;
6361                         m_pref = m;
6362                 }
6363         }
6364
6365         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6366         return m_pref;
6367 }
6368
6369 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6370                                    struct amdgpu_dm_connector *aconnector)
6371 {
6372         struct drm_display_mode *high_mode;
6373         int timing_diff;
6374
6375         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6376         if (!high_mode || !mode)
6377                 return false;
6378
6379         timing_diff = high_mode->vtotal - mode->vtotal;
6380
6381         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6382             high_mode->hdisplay != mode->hdisplay ||
6383             high_mode->vdisplay != mode->vdisplay ||
6384             high_mode->hsync_start != mode->hsync_start ||
6385             high_mode->hsync_end != mode->hsync_end ||
6386             high_mode->htotal != mode->htotal ||
6387             high_mode->hskew != mode->hskew ||
6388             high_mode->vscan != mode->vscan ||
6389             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6390             high_mode->vsync_end - mode->vsync_end != timing_diff)
6391                 return false;
6392         else
6393                 return true;
6394 }
6395
6396 static struct dc_stream_state *
6397 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6398                        const struct drm_display_mode *drm_mode,
6399                        const struct dm_connector_state *dm_state,
6400                        const struct dc_stream_state *old_stream,
6401                        int requested_bpc)
6402 {
6403         struct drm_display_mode *preferred_mode = NULL;
6404         struct drm_connector *drm_connector;
6405         const struct drm_connector_state *con_state =
6406                 dm_state ? &dm_state->base : NULL;
6407         struct dc_stream_state *stream = NULL;
6408         struct drm_display_mode mode = *drm_mode;
6409         struct drm_display_mode saved_mode;
6410         struct drm_display_mode *freesync_mode = NULL;
6411         bool native_mode_found = false;
6412         bool recalculate_timing = false;
6413         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6414         int mode_refresh;
6415         int preferred_refresh = 0;
6416 #if defined(CONFIG_DRM_AMD_DC_DCN)
6417         struct dsc_dec_dpcd_caps dsc_caps;
6418 #endif
6419         struct dc_sink *sink = NULL;
6420
6421         memset(&saved_mode, 0, sizeof(saved_mode));
6422
6423         if (aconnector == NULL) {
6424                 DRM_ERROR("aconnector is NULL!\n");
6425                 return stream;
6426         }
6427
6428         drm_connector = &aconnector->base;
6429
6430         if (!aconnector->dc_sink) {
6431                 sink = create_fake_sink(aconnector);
6432                 if (!sink)
6433                         return stream;
6434         } else {
6435                 sink = aconnector->dc_sink;
6436                 dc_sink_retain(sink);
6437         }
6438
6439         stream = dc_create_stream_for_sink(sink);
6440
6441         if (stream == NULL) {
6442                 DRM_ERROR("Failed to create stream for sink!\n");
6443                 goto finish;
6444         }
6445
6446         stream->dm_stream_context = aconnector;
6447
6448         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6449                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6450
6451         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6452                 /* Search for preferred mode */
6453                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6454                         native_mode_found = true;
6455                         break;
6456                 }
6457         }
6458         if (!native_mode_found)
6459                 preferred_mode = list_first_entry_or_null(
6460                                 &aconnector->base.modes,
6461                                 struct drm_display_mode,
6462                                 head);
6463
6464         mode_refresh = drm_mode_vrefresh(&mode);
6465
6466         if (preferred_mode == NULL) {
6467                 /*
6468                  * This may not be an error, the use case is when we have no
6469                  * usermode calls to reset and set mode upon hotplug. In this
6470                  * case, we call set mode ourselves to restore the previous mode
6471                  * and the modelist may not be filled in in time.
6472                  */
6473                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6474         } else {
6475                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6476                 if (recalculate_timing) {
6477                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6478                         drm_mode_copy(&saved_mode, &mode);
6479                         drm_mode_copy(&mode, freesync_mode);
6480                 } else {
6481                         decide_crtc_timing_for_drm_display_mode(
6482                                 &mode, preferred_mode, scale);
6483
6484                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6485                 }
6486         }
6487
6488         if (recalculate_timing)
6489                 drm_mode_set_crtcinfo(&saved_mode, 0);
6490         else if (!dm_state)
6491                 drm_mode_set_crtcinfo(&mode, 0);
6492
6493        /*
6494         * If scaling is enabled and refresh rate didn't change
6495         * we copy the vic and polarities of the old timings
6496         */
6497         if (!scale || mode_refresh != preferred_refresh)
6498                 fill_stream_properties_from_drm_display_mode(
6499                         stream, &mode, &aconnector->base, con_state, NULL,
6500                         requested_bpc);
6501         else
6502                 fill_stream_properties_from_drm_display_mode(
6503                         stream, &mode, &aconnector->base, con_state, old_stream,
6504                         requested_bpc);
6505
6506 #if defined(CONFIG_DRM_AMD_DC_DCN)
6507         /* SST DSC determination policy */
6508         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6509         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6510                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6511 #endif
6512
6513         update_stream_scaling_settings(&mode, dm_state, stream);
6514
6515         fill_audio_info(
6516                 &stream->audio_info,
6517                 drm_connector,
6518                 sink);
6519
6520         update_stream_signal(stream, sink);
6521
6522         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6523                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6524
6525         if (stream->link->psr_settings.psr_feature_enabled) {
6526                 //
6527                 // should decide stream support vsc sdp colorimetry capability
6528                 // before building vsc info packet
6529                 //
6530                 stream->use_vsc_sdp_for_colorimetry = false;
6531                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6532                         stream->use_vsc_sdp_for_colorimetry =
6533                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6534                 } else {
6535                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6536                                 stream->use_vsc_sdp_for_colorimetry = true;
6537                 }
6538                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6539                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6540
6541         }
6542 finish:
6543         dc_sink_release(sink);
6544
6545         return stream;
6546 }
6547
6548 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6549 {
6550         drm_crtc_cleanup(crtc);
6551         kfree(crtc);
6552 }
6553
6554 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6555                                   struct drm_crtc_state *state)
6556 {
6557         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6558
6559         /* TODO Destroy dc_stream objects are stream object is flattened */
6560         if (cur->stream)
6561                 dc_stream_release(cur->stream);
6562
6563
6564         __drm_atomic_helper_crtc_destroy_state(state);
6565
6566
6567         kfree(state);
6568 }
6569
6570 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6571 {
6572         struct dm_crtc_state *state;
6573
6574         if (crtc->state)
6575                 dm_crtc_destroy_state(crtc, crtc->state);
6576
6577         state = kzalloc(sizeof(*state), GFP_KERNEL);
6578         if (WARN_ON(!state))
6579                 return;
6580
6581         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6582 }
6583
6584 static struct drm_crtc_state *
6585 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6586 {
6587         struct dm_crtc_state *state, *cur;
6588
6589         cur = to_dm_crtc_state(crtc->state);
6590
6591         if (WARN_ON(!crtc->state))
6592                 return NULL;
6593
6594         state = kzalloc(sizeof(*state), GFP_KERNEL);
6595         if (!state)
6596                 return NULL;
6597
6598         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6599
6600         if (cur->stream) {
6601                 state->stream = cur->stream;
6602                 dc_stream_retain(state->stream);
6603         }
6604
6605         state->active_planes = cur->active_planes;
6606         state->vrr_infopacket = cur->vrr_infopacket;
6607         state->abm_level = cur->abm_level;
6608         state->vrr_supported = cur->vrr_supported;
6609         state->freesync_config = cur->freesync_config;
6610         state->cm_has_degamma = cur->cm_has_degamma;
6611         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6612         state->force_dpms_off = cur->force_dpms_off;
6613         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6614
6615         return &state->base;
6616 }
6617
6618 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6619 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6620 {
6621         crtc_debugfs_init(crtc);
6622
6623         return 0;
6624 }
6625 #endif
6626
6627 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6628 {
6629         enum dc_irq_source irq_source;
6630         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6631         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6632         int rc;
6633
6634         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6635
6636         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6637
6638         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6639                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6640         return rc;
6641 }
6642
6643 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6644 {
6645         enum dc_irq_source irq_source;
6646         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6647         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6648         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6649 #if defined(CONFIG_DRM_AMD_DC_DCN)
6650         struct amdgpu_display_manager *dm = &adev->dm;
6651         struct vblank_control_work *work;
6652 #endif
6653         int rc = 0;
6654
6655         if (enable) {
6656                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6657                 if (amdgpu_dm_vrr_active(acrtc_state))
6658                         rc = dm_set_vupdate_irq(crtc, true);
6659         } else {
6660                 /* vblank irq off -> vupdate irq off */
6661                 rc = dm_set_vupdate_irq(crtc, false);
6662         }
6663
6664         if (rc)
6665                 return rc;
6666
6667         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6668
6669         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6670                 return -EBUSY;
6671
6672         if (amdgpu_in_reset(adev))
6673                 return 0;
6674
6675 #if defined(CONFIG_DRM_AMD_DC_DCN)
6676         if (dm->vblank_control_workqueue) {
6677                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6678                 if (!work)
6679                         return -ENOMEM;
6680
6681                 INIT_WORK(&work->work, vblank_control_worker);
6682                 work->dm = dm;
6683                 work->acrtc = acrtc;
6684                 work->enable = enable;
6685
6686                 if (acrtc_state->stream) {
6687                         dc_stream_retain(acrtc_state->stream);
6688                         work->stream = acrtc_state->stream;
6689                 }
6690
6691                 queue_work(dm->vblank_control_workqueue, &work->work);
6692         }
6693 #endif
6694
6695         return 0;
6696 }
6697
6698 static int dm_enable_vblank(struct drm_crtc *crtc)
6699 {
6700         return dm_set_vblank(crtc, true);
6701 }
6702
6703 static void dm_disable_vblank(struct drm_crtc *crtc)
6704 {
6705         dm_set_vblank(crtc, false);
6706 }
6707
6708 /* Implemented only the options currently availible for the driver */
6709 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6710         .reset = dm_crtc_reset_state,
6711         .destroy = amdgpu_dm_crtc_destroy,
6712         .set_config = drm_atomic_helper_set_config,
6713         .page_flip = drm_atomic_helper_page_flip,
6714         .atomic_duplicate_state = dm_crtc_duplicate_state,
6715         .atomic_destroy_state = dm_crtc_destroy_state,
6716         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6717         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6718         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6719         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6720         .enable_vblank = dm_enable_vblank,
6721         .disable_vblank = dm_disable_vblank,
6722         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6723 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6724         .late_register = amdgpu_dm_crtc_late_register,
6725 #endif
6726 };
6727
6728 static enum drm_connector_status
6729 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6730 {
6731         bool connected;
6732         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6733
6734         /*
6735          * Notes:
6736          * 1. This interface is NOT called in context of HPD irq.
6737          * 2. This interface *is called* in context of user-mode ioctl. Which
6738          * makes it a bad place for *any* MST-related activity.
6739          */
6740
6741         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6742             !aconnector->fake_enable)
6743                 connected = (aconnector->dc_sink != NULL);
6744         else
6745                 connected = (aconnector->base.force == DRM_FORCE_ON);
6746
6747         update_subconnector_property(aconnector);
6748
6749         return (connected ? connector_status_connected :
6750                         connector_status_disconnected);
6751 }
6752
6753 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6754                                             struct drm_connector_state *connector_state,
6755                                             struct drm_property *property,
6756                                             uint64_t val)
6757 {
6758         struct drm_device *dev = connector->dev;
6759         struct amdgpu_device *adev = drm_to_adev(dev);
6760         struct dm_connector_state *dm_old_state =
6761                 to_dm_connector_state(connector->state);
6762         struct dm_connector_state *dm_new_state =
6763                 to_dm_connector_state(connector_state);
6764
6765         int ret = -EINVAL;
6766
6767         if (property == dev->mode_config.scaling_mode_property) {
6768                 enum amdgpu_rmx_type rmx_type;
6769
6770                 switch (val) {
6771                 case DRM_MODE_SCALE_CENTER:
6772                         rmx_type = RMX_CENTER;
6773                         break;
6774                 case DRM_MODE_SCALE_ASPECT:
6775                         rmx_type = RMX_ASPECT;
6776                         break;
6777                 case DRM_MODE_SCALE_FULLSCREEN:
6778                         rmx_type = RMX_FULL;
6779                         break;
6780                 case DRM_MODE_SCALE_NONE:
6781                 default:
6782                         rmx_type = RMX_OFF;
6783                         break;
6784                 }
6785
6786                 if (dm_old_state->scaling == rmx_type)
6787                         return 0;
6788
6789                 dm_new_state->scaling = rmx_type;
6790                 ret = 0;
6791         } else if (property == adev->mode_info.underscan_hborder_property) {
6792                 dm_new_state->underscan_hborder = val;
6793                 ret = 0;
6794         } else if (property == adev->mode_info.underscan_vborder_property) {
6795                 dm_new_state->underscan_vborder = val;
6796                 ret = 0;
6797         } else if (property == adev->mode_info.underscan_property) {
6798                 dm_new_state->underscan_enable = val;
6799                 ret = 0;
6800         } else if (property == adev->mode_info.abm_level_property) {
6801                 dm_new_state->abm_level = val;
6802                 ret = 0;
6803         }
6804
6805         return ret;
6806 }
6807
6808 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6809                                             const struct drm_connector_state *state,
6810                                             struct drm_property *property,
6811                                             uint64_t *val)
6812 {
6813         struct drm_device *dev = connector->dev;
6814         struct amdgpu_device *adev = drm_to_adev(dev);
6815         struct dm_connector_state *dm_state =
6816                 to_dm_connector_state(state);
6817         int ret = -EINVAL;
6818
6819         if (property == dev->mode_config.scaling_mode_property) {
6820                 switch (dm_state->scaling) {
6821                 case RMX_CENTER:
6822                         *val = DRM_MODE_SCALE_CENTER;
6823                         break;
6824                 case RMX_ASPECT:
6825                         *val = DRM_MODE_SCALE_ASPECT;
6826                         break;
6827                 case RMX_FULL:
6828                         *val = DRM_MODE_SCALE_FULLSCREEN;
6829                         break;
6830                 case RMX_OFF:
6831                 default:
6832                         *val = DRM_MODE_SCALE_NONE;
6833                         break;
6834                 }
6835                 ret = 0;
6836         } else if (property == adev->mode_info.underscan_hborder_property) {
6837                 *val = dm_state->underscan_hborder;
6838                 ret = 0;
6839         } else if (property == adev->mode_info.underscan_vborder_property) {
6840                 *val = dm_state->underscan_vborder;
6841                 ret = 0;
6842         } else if (property == adev->mode_info.underscan_property) {
6843                 *val = dm_state->underscan_enable;
6844                 ret = 0;
6845         } else if (property == adev->mode_info.abm_level_property) {
6846                 *val = dm_state->abm_level;
6847                 ret = 0;
6848         }
6849
6850         return ret;
6851 }
6852
6853 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6854 {
6855         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6856
6857         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6858 }
6859
6860 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6861 {
6862         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6863         const struct dc_link *link = aconnector->dc_link;
6864         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6865         struct amdgpu_display_manager *dm = &adev->dm;
6866         int i;
6867
6868         /*
6869          * Call only if mst_mgr was iniitalized before since it's not done
6870          * for all connector types.
6871          */
6872         if (aconnector->mst_mgr.dev)
6873                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6874
6875 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6876         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6877         for (i = 0; i < dm->num_of_edps; i++) {
6878                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6879                         backlight_device_unregister(dm->backlight_dev[i]);
6880                         dm->backlight_dev[i] = NULL;
6881                 }
6882         }
6883 #endif
6884
6885         if (aconnector->dc_em_sink)
6886                 dc_sink_release(aconnector->dc_em_sink);
6887         aconnector->dc_em_sink = NULL;
6888         if (aconnector->dc_sink)
6889                 dc_sink_release(aconnector->dc_sink);
6890         aconnector->dc_sink = NULL;
6891
6892         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6893         drm_connector_unregister(connector);
6894         drm_connector_cleanup(connector);
6895         if (aconnector->i2c) {
6896                 i2c_del_adapter(&aconnector->i2c->base);
6897                 kfree(aconnector->i2c);
6898         }
6899         kfree(aconnector->dm_dp_aux.aux.name);
6900
6901         kfree(connector);
6902 }
6903
6904 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6905 {
6906         struct dm_connector_state *state =
6907                 to_dm_connector_state(connector->state);
6908
6909         if (connector->state)
6910                 __drm_atomic_helper_connector_destroy_state(connector->state);
6911
6912         kfree(state);
6913
6914         state = kzalloc(sizeof(*state), GFP_KERNEL);
6915
6916         if (state) {
6917                 state->scaling = RMX_OFF;
6918                 state->underscan_enable = false;
6919                 state->underscan_hborder = 0;
6920                 state->underscan_vborder = 0;
6921                 state->base.max_requested_bpc = 8;
6922                 state->vcpi_slots = 0;
6923                 state->pbn = 0;
6924                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6925                         state->abm_level = amdgpu_dm_abm_level;
6926
6927                 __drm_atomic_helper_connector_reset(connector, &state->base);
6928         }
6929 }
6930
6931 struct drm_connector_state *
6932 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6933 {
6934         struct dm_connector_state *state =
6935                 to_dm_connector_state(connector->state);
6936
6937         struct dm_connector_state *new_state =
6938                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6939
6940         if (!new_state)
6941                 return NULL;
6942
6943         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6944
6945         new_state->freesync_capable = state->freesync_capable;
6946         new_state->abm_level = state->abm_level;
6947         new_state->scaling = state->scaling;
6948         new_state->underscan_enable = state->underscan_enable;
6949         new_state->underscan_hborder = state->underscan_hborder;
6950         new_state->underscan_vborder = state->underscan_vborder;
6951         new_state->vcpi_slots = state->vcpi_slots;
6952         new_state->pbn = state->pbn;
6953         return &new_state->base;
6954 }
6955
6956 static int
6957 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6958 {
6959         struct amdgpu_dm_connector *amdgpu_dm_connector =
6960                 to_amdgpu_dm_connector(connector);
6961         int r;
6962
6963         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6964             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6965                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6966                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6967                 if (r)
6968                         return r;
6969         }
6970
6971 #if defined(CONFIG_DEBUG_FS)
6972         connector_debugfs_init(amdgpu_dm_connector);
6973 #endif
6974
6975         return 0;
6976 }
6977
6978 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6979         .reset = amdgpu_dm_connector_funcs_reset,
6980         .detect = amdgpu_dm_connector_detect,
6981         .fill_modes = drm_helper_probe_single_connector_modes,
6982         .destroy = amdgpu_dm_connector_destroy,
6983         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6984         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6985         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6986         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6987         .late_register = amdgpu_dm_connector_late_register,
6988         .early_unregister = amdgpu_dm_connector_unregister
6989 };
6990
6991 static int get_modes(struct drm_connector *connector)
6992 {
6993         return amdgpu_dm_connector_get_modes(connector);
6994 }
6995
6996 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6997 {
6998         struct dc_sink_init_data init_params = {
6999                         .link = aconnector->dc_link,
7000                         .sink_signal = SIGNAL_TYPE_VIRTUAL
7001         };
7002         struct edid *edid;
7003
7004         if (!aconnector->base.edid_blob_ptr) {
7005                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7006                                 aconnector->base.name);
7007
7008                 aconnector->base.force = DRM_FORCE_OFF;
7009                 aconnector->base.override_edid = false;
7010                 return;
7011         }
7012
7013         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7014
7015         aconnector->edid = edid;
7016
7017         aconnector->dc_em_sink = dc_link_add_remote_sink(
7018                 aconnector->dc_link,
7019                 (uint8_t *)edid,
7020                 (edid->extensions + 1) * EDID_LENGTH,
7021                 &init_params);
7022
7023         if (aconnector->base.force == DRM_FORCE_ON) {
7024                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7025                 aconnector->dc_link->local_sink :
7026                 aconnector->dc_em_sink;
7027                 dc_sink_retain(aconnector->dc_sink);
7028         }
7029 }
7030
7031 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7032 {
7033         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7034
7035         /*
7036          * In case of headless boot with force on for DP managed connector
7037          * Those settings have to be != 0 to get initial modeset
7038          */
7039         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7040                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7041                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7042         }
7043
7044
7045         aconnector->base.override_edid = true;
7046         create_eml_sink(aconnector);
7047 }
7048
7049 struct dc_stream_state *
7050 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7051                                 const struct drm_display_mode *drm_mode,
7052                                 const struct dm_connector_state *dm_state,
7053                                 const struct dc_stream_state *old_stream)
7054 {
7055         struct drm_connector *connector = &aconnector->base;
7056         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7057         struct dc_stream_state *stream;
7058         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7059         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7060         enum dc_status dc_result = DC_OK;
7061
7062         do {
7063                 stream = create_stream_for_sink(aconnector, drm_mode,
7064                                                 dm_state, old_stream,
7065                                                 requested_bpc);
7066                 if (stream == NULL) {
7067                         DRM_ERROR("Failed to create stream for sink!\n");
7068                         break;
7069                 }
7070
7071                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7072
7073                 if (dc_result != DC_OK) {
7074                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7075                                       drm_mode->hdisplay,
7076                                       drm_mode->vdisplay,
7077                                       drm_mode->clock,
7078                                       dc_result,
7079                                       dc_status_to_str(dc_result));
7080
7081                         dc_stream_release(stream);
7082                         stream = NULL;
7083                         requested_bpc -= 2; /* lower bpc to retry validation */
7084                 }
7085
7086         } while (stream == NULL && requested_bpc >= 6);
7087
7088         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7089                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7090
7091                 aconnector->force_yuv420_output = true;
7092                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7093                                                 dm_state, old_stream);
7094                 aconnector->force_yuv420_output = false;
7095         }
7096
7097         return stream;
7098 }
7099
7100 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7101                                    struct drm_display_mode *mode)
7102 {
7103         int result = MODE_ERROR;
7104         struct dc_sink *dc_sink;
7105         /* TODO: Unhardcode stream count */
7106         struct dc_stream_state *stream;
7107         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7108
7109         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7110                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7111                 return result;
7112
7113         /*
7114          * Only run this the first time mode_valid is called to initilialize
7115          * EDID mgmt
7116          */
7117         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7118                 !aconnector->dc_em_sink)
7119                 handle_edid_mgmt(aconnector);
7120
7121         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7122
7123         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7124                                 aconnector->base.force != DRM_FORCE_ON) {
7125                 DRM_ERROR("dc_sink is NULL!\n");
7126                 goto fail;
7127         }
7128
7129         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7130         if (stream) {
7131                 dc_stream_release(stream);
7132                 result = MODE_OK;
7133         }
7134
7135 fail:
7136         /* TODO: error handling*/
7137         return result;
7138 }
7139
7140 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7141                                 struct dc_info_packet *out)
7142 {
7143         struct hdmi_drm_infoframe frame;
7144         unsigned char buf[30]; /* 26 + 4 */
7145         ssize_t len;
7146         int ret, i;
7147
7148         memset(out, 0, sizeof(*out));
7149
7150         if (!state->hdr_output_metadata)
7151                 return 0;
7152
7153         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7154         if (ret)
7155                 return ret;
7156
7157         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7158         if (len < 0)
7159                 return (int)len;
7160
7161         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7162         if (len != 30)
7163                 return -EINVAL;
7164
7165         /* Prepare the infopacket for DC. */
7166         switch (state->connector->connector_type) {
7167         case DRM_MODE_CONNECTOR_HDMIA:
7168                 out->hb0 = 0x87; /* type */
7169                 out->hb1 = 0x01; /* version */
7170                 out->hb2 = 0x1A; /* length */
7171                 out->sb[0] = buf[3]; /* checksum */
7172                 i = 1;
7173                 break;
7174
7175         case DRM_MODE_CONNECTOR_DisplayPort:
7176         case DRM_MODE_CONNECTOR_eDP:
7177                 out->hb0 = 0x00; /* sdp id, zero */
7178                 out->hb1 = 0x87; /* type */
7179                 out->hb2 = 0x1D; /* payload len - 1 */
7180                 out->hb3 = (0x13 << 2); /* sdp version */
7181                 out->sb[0] = 0x01; /* version */
7182                 out->sb[1] = 0x1A; /* length */
7183                 i = 2;
7184                 break;
7185
7186         default:
7187                 return -EINVAL;
7188         }
7189
7190         memcpy(&out->sb[i], &buf[4], 26);
7191         out->valid = true;
7192
7193         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7194                        sizeof(out->sb), false);
7195
7196         return 0;
7197 }
7198
7199 static int
7200 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7201                                  struct drm_atomic_state *state)
7202 {
7203         struct drm_connector_state *new_con_state =
7204                 drm_atomic_get_new_connector_state(state, conn);
7205         struct drm_connector_state *old_con_state =
7206                 drm_atomic_get_old_connector_state(state, conn);
7207         struct drm_crtc *crtc = new_con_state->crtc;
7208         struct drm_crtc_state *new_crtc_state;
7209         int ret;
7210
7211         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7212
7213         if (!crtc)
7214                 return 0;
7215
7216         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7217                 struct dc_info_packet hdr_infopacket;
7218
7219                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7220                 if (ret)
7221                         return ret;
7222
7223                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7224                 if (IS_ERR(new_crtc_state))
7225                         return PTR_ERR(new_crtc_state);
7226
7227                 /*
7228                  * DC considers the stream backends changed if the
7229                  * static metadata changes. Forcing the modeset also
7230                  * gives a simple way for userspace to switch from
7231                  * 8bpc to 10bpc when setting the metadata to enter
7232                  * or exit HDR.
7233                  *
7234                  * Changing the static metadata after it's been
7235                  * set is permissible, however. So only force a
7236                  * modeset if we're entering or exiting HDR.
7237                  */
7238                 new_crtc_state->mode_changed =
7239                         !old_con_state->hdr_output_metadata ||
7240                         !new_con_state->hdr_output_metadata;
7241         }
7242
7243         return 0;
7244 }
7245
7246 static const struct drm_connector_helper_funcs
7247 amdgpu_dm_connector_helper_funcs = {
7248         /*
7249          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7250          * modes will be filtered by drm_mode_validate_size(), and those modes
7251          * are missing after user start lightdm. So we need to renew modes list.
7252          * in get_modes call back, not just return the modes count
7253          */
7254         .get_modes = get_modes,
7255         .mode_valid = amdgpu_dm_connector_mode_valid,
7256         .atomic_check = amdgpu_dm_connector_atomic_check,
7257 };
7258
7259 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7260 {
7261 }
7262
7263 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7264 {
7265         struct drm_atomic_state *state = new_crtc_state->state;
7266         struct drm_plane *plane;
7267         int num_active = 0;
7268
7269         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7270                 struct drm_plane_state *new_plane_state;
7271
7272                 /* Cursor planes are "fake". */
7273                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7274                         continue;
7275
7276                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7277
7278                 if (!new_plane_state) {
7279                         /*
7280                          * The plane is enable on the CRTC and hasn't changed
7281                          * state. This means that it previously passed
7282                          * validation and is therefore enabled.
7283                          */
7284                         num_active += 1;
7285                         continue;
7286                 }
7287
7288                 /* We need a framebuffer to be considered enabled. */
7289                 num_active += (new_plane_state->fb != NULL);
7290         }
7291
7292         return num_active;
7293 }
7294
7295 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7296                                          struct drm_crtc_state *new_crtc_state)
7297 {
7298         struct dm_crtc_state *dm_new_crtc_state =
7299                 to_dm_crtc_state(new_crtc_state);
7300
7301         dm_new_crtc_state->active_planes = 0;
7302
7303         if (!dm_new_crtc_state->stream)
7304                 return;
7305
7306         dm_new_crtc_state->active_planes =
7307                 count_crtc_active_planes(new_crtc_state);
7308 }
7309
7310 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7311                                        struct drm_atomic_state *state)
7312 {
7313         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7314                                                                           crtc);
7315         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7316         struct dc *dc = adev->dm.dc;
7317         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7318         int ret = -EINVAL;
7319
7320         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7321
7322         dm_update_crtc_active_planes(crtc, crtc_state);
7323
7324         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7325                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7326                 return ret;
7327         }
7328
7329         /*
7330          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7331          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7332          * planes are disabled, which is not supported by the hardware. And there is legacy
7333          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7334          */
7335         if (crtc_state->enable &&
7336             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7337                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7338                 return -EINVAL;
7339         }
7340
7341         /* In some use cases, like reset, no stream is attached */
7342         if (!dm_crtc_state->stream)
7343                 return 0;
7344
7345         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7346                 return 0;
7347
7348         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7349         return ret;
7350 }
7351
7352 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7353                                       const struct drm_display_mode *mode,
7354                                       struct drm_display_mode *adjusted_mode)
7355 {
7356         return true;
7357 }
7358
7359 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7360         .disable = dm_crtc_helper_disable,
7361         .atomic_check = dm_crtc_helper_atomic_check,
7362         .mode_fixup = dm_crtc_helper_mode_fixup,
7363         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7364 };
7365
7366 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7367 {
7368
7369 }
7370
7371 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7372 {
7373         switch (display_color_depth) {
7374                 case COLOR_DEPTH_666:
7375                         return 6;
7376                 case COLOR_DEPTH_888:
7377                         return 8;
7378                 case COLOR_DEPTH_101010:
7379                         return 10;
7380                 case COLOR_DEPTH_121212:
7381                         return 12;
7382                 case COLOR_DEPTH_141414:
7383                         return 14;
7384                 case COLOR_DEPTH_161616:
7385                         return 16;
7386                 default:
7387                         break;
7388                 }
7389         return 0;
7390 }
7391
7392 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7393                                           struct drm_crtc_state *crtc_state,
7394                                           struct drm_connector_state *conn_state)
7395 {
7396         struct drm_atomic_state *state = crtc_state->state;
7397         struct drm_connector *connector = conn_state->connector;
7398         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7399         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7400         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7401         struct drm_dp_mst_topology_mgr *mst_mgr;
7402         struct drm_dp_mst_port *mst_port;
7403         enum dc_color_depth color_depth;
7404         int clock, bpp = 0;
7405         bool is_y420 = false;
7406
7407         if (!aconnector->port || !aconnector->dc_sink)
7408                 return 0;
7409
7410         mst_port = aconnector->port;
7411         mst_mgr = &aconnector->mst_port->mst_mgr;
7412
7413         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7414                 return 0;
7415
7416         if (!state->duplicated) {
7417                 int max_bpc = conn_state->max_requested_bpc;
7418                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7419                                 aconnector->force_yuv420_output;
7420                 color_depth = convert_color_depth_from_display_info(connector,
7421                                                                     is_y420,
7422                                                                     max_bpc);
7423                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7424                 clock = adjusted_mode->clock;
7425                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7426         }
7427         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7428                                                                            mst_mgr,
7429                                                                            mst_port,
7430                                                                            dm_new_connector_state->pbn,
7431                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7432         if (dm_new_connector_state->vcpi_slots < 0) {
7433                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7434                 return dm_new_connector_state->vcpi_slots;
7435         }
7436         return 0;
7437 }
7438
7439 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7440         .disable = dm_encoder_helper_disable,
7441         .atomic_check = dm_encoder_helper_atomic_check
7442 };
7443
7444 #if defined(CONFIG_DRM_AMD_DC_DCN)
7445 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7446                                             struct dc_state *dc_state,
7447                                             struct dsc_mst_fairness_vars *vars)
7448 {
7449         struct dc_stream_state *stream = NULL;
7450         struct drm_connector *connector;
7451         struct drm_connector_state *new_con_state;
7452         struct amdgpu_dm_connector *aconnector;
7453         struct dm_connector_state *dm_conn_state;
7454         int i, j;
7455         int vcpi, pbn_div, pbn, slot_num = 0;
7456
7457         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7458
7459                 aconnector = to_amdgpu_dm_connector(connector);
7460
7461                 if (!aconnector->port)
7462                         continue;
7463
7464                 if (!new_con_state || !new_con_state->crtc)
7465                         continue;
7466
7467                 dm_conn_state = to_dm_connector_state(new_con_state);
7468
7469                 for (j = 0; j < dc_state->stream_count; j++) {
7470                         stream = dc_state->streams[j];
7471                         if (!stream)
7472                                 continue;
7473
7474                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7475                                 break;
7476
7477                         stream = NULL;
7478                 }
7479
7480                 if (!stream)
7481                         continue;
7482
7483                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7484                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7485                 for (j = 0; j < dc_state->stream_count; j++) {
7486                         if (vars[j].aconnector == aconnector) {
7487                                 pbn = vars[j].pbn;
7488                                 break;
7489                         }
7490                 }
7491
7492                 if (j == dc_state->stream_count)
7493                         continue;
7494
7495                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7496
7497                 if (stream->timing.flags.DSC != 1) {
7498                         dm_conn_state->pbn = pbn;
7499                         dm_conn_state->vcpi_slots = slot_num;
7500
7501                         drm_dp_mst_atomic_enable_dsc(state,
7502                                                      aconnector->port,
7503                                                      dm_conn_state->pbn,
7504                                                      0,
7505                                                      false);
7506                         continue;
7507                 }
7508
7509                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7510                                                     aconnector->port,
7511                                                     pbn, pbn_div,
7512                                                     true);
7513                 if (vcpi < 0)
7514                         return vcpi;
7515
7516                 dm_conn_state->pbn = pbn;
7517                 dm_conn_state->vcpi_slots = vcpi;
7518         }
7519         return 0;
7520 }
7521 #endif
7522
7523 static void dm_drm_plane_reset(struct drm_plane *plane)
7524 {
7525         struct dm_plane_state *amdgpu_state = NULL;
7526
7527         if (plane->state)
7528                 plane->funcs->atomic_destroy_state(plane, plane->state);
7529
7530         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7531         WARN_ON(amdgpu_state == NULL);
7532
7533         if (amdgpu_state)
7534                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7535 }
7536
7537 static struct drm_plane_state *
7538 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7539 {
7540         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7541
7542         old_dm_plane_state = to_dm_plane_state(plane->state);
7543         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7544         if (!dm_plane_state)
7545                 return NULL;
7546
7547         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7548
7549         if (old_dm_plane_state->dc_state) {
7550                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7551                 dc_plane_state_retain(dm_plane_state->dc_state);
7552         }
7553
7554         return &dm_plane_state->base;
7555 }
7556
7557 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7558                                 struct drm_plane_state *state)
7559 {
7560         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7561
7562         if (dm_plane_state->dc_state)
7563                 dc_plane_state_release(dm_plane_state->dc_state);
7564
7565         drm_atomic_helper_plane_destroy_state(plane, state);
7566 }
7567
7568 static const struct drm_plane_funcs dm_plane_funcs = {
7569         .update_plane   = drm_atomic_helper_update_plane,
7570         .disable_plane  = drm_atomic_helper_disable_plane,
7571         .destroy        = drm_primary_helper_destroy,
7572         .reset = dm_drm_plane_reset,
7573         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7574         .atomic_destroy_state = dm_drm_plane_destroy_state,
7575         .format_mod_supported = dm_plane_format_mod_supported,
7576 };
7577
7578 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7579                                       struct drm_plane_state *new_state)
7580 {
7581         struct amdgpu_framebuffer *afb;
7582         struct drm_gem_object *obj;
7583         struct amdgpu_device *adev;
7584         struct amdgpu_bo *rbo;
7585         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7586         struct list_head list;
7587         struct ttm_validate_buffer tv;
7588         struct ww_acquire_ctx ticket;
7589         uint32_t domain;
7590         int r;
7591
7592         if (!new_state->fb) {
7593                 DRM_DEBUG_KMS("No FB bound\n");
7594                 return 0;
7595         }
7596
7597         afb = to_amdgpu_framebuffer(new_state->fb);
7598         obj = new_state->fb->obj[0];
7599         rbo = gem_to_amdgpu_bo(obj);
7600         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7601         INIT_LIST_HEAD(&list);
7602
7603         tv.bo = &rbo->tbo;
7604         tv.num_shared = 1;
7605         list_add(&tv.head, &list);
7606
7607         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7608         if (r) {
7609                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7610                 return r;
7611         }
7612
7613         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7614                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7615         else
7616                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7617
7618         r = amdgpu_bo_pin(rbo, domain);
7619         if (unlikely(r != 0)) {
7620                 if (r != -ERESTARTSYS)
7621                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7622                 ttm_eu_backoff_reservation(&ticket, &list);
7623                 return r;
7624         }
7625
7626         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7627         if (unlikely(r != 0)) {
7628                 amdgpu_bo_unpin(rbo);
7629                 ttm_eu_backoff_reservation(&ticket, &list);
7630                 DRM_ERROR("%p bind failed\n", rbo);
7631                 return r;
7632         }
7633
7634         ttm_eu_backoff_reservation(&ticket, &list);
7635
7636         afb->address = amdgpu_bo_gpu_offset(rbo);
7637
7638         amdgpu_bo_ref(rbo);
7639
7640         /**
7641          * We don't do surface updates on planes that have been newly created,
7642          * but we also don't have the afb->address during atomic check.
7643          *
7644          * Fill in buffer attributes depending on the address here, but only on
7645          * newly created planes since they're not being used by DC yet and this
7646          * won't modify global state.
7647          */
7648         dm_plane_state_old = to_dm_plane_state(plane->state);
7649         dm_plane_state_new = to_dm_plane_state(new_state);
7650
7651         if (dm_plane_state_new->dc_state &&
7652             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7653                 struct dc_plane_state *plane_state =
7654                         dm_plane_state_new->dc_state;
7655                 bool force_disable_dcc = !plane_state->dcc.enable;
7656
7657                 fill_plane_buffer_attributes(
7658                         adev, afb, plane_state->format, plane_state->rotation,
7659                         afb->tiling_flags,
7660                         &plane_state->tiling_info, &plane_state->plane_size,
7661                         &plane_state->dcc, &plane_state->address,
7662                         afb->tmz_surface, force_disable_dcc);
7663         }
7664
7665         return 0;
7666 }
7667
7668 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7669                                        struct drm_plane_state *old_state)
7670 {
7671         struct amdgpu_bo *rbo;
7672         int r;
7673
7674         if (!old_state->fb)
7675                 return;
7676
7677         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7678         r = amdgpu_bo_reserve(rbo, false);
7679         if (unlikely(r)) {
7680                 DRM_ERROR("failed to reserve rbo before unpin\n");
7681                 return;
7682         }
7683
7684         amdgpu_bo_unpin(rbo);
7685         amdgpu_bo_unreserve(rbo);
7686         amdgpu_bo_unref(&rbo);
7687 }
7688
7689 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7690                                        struct drm_crtc_state *new_crtc_state)
7691 {
7692         struct drm_framebuffer *fb = state->fb;
7693         int min_downscale, max_upscale;
7694         int min_scale = 0;
7695         int max_scale = INT_MAX;
7696
7697         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7698         if (fb && state->crtc) {
7699                 /* Validate viewport to cover the case when only the position changes */
7700                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7701                         int viewport_width = state->crtc_w;
7702                         int viewport_height = state->crtc_h;
7703
7704                         if (state->crtc_x < 0)
7705                                 viewport_width += state->crtc_x;
7706                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7707                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7708
7709                         if (state->crtc_y < 0)
7710                                 viewport_height += state->crtc_y;
7711                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7712                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7713
7714                         if (viewport_width < 0 || viewport_height < 0) {
7715                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7716                                 return -EINVAL;
7717                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7718                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7719                                 return -EINVAL;
7720                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7721                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7722                                 return -EINVAL;
7723                         }
7724
7725                 }
7726
7727                 /* Get min/max allowed scaling factors from plane caps. */
7728                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7729                                              &min_downscale, &max_upscale);
7730                 /*
7731                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7732                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7733                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7734                  */
7735                 min_scale = (1000 << 16) / max_upscale;
7736                 max_scale = (1000 << 16) / min_downscale;
7737         }
7738
7739         return drm_atomic_helper_check_plane_state(
7740                 state, new_crtc_state, min_scale, max_scale, true, true);
7741 }
7742
7743 static int dm_plane_atomic_check(struct drm_plane *plane,
7744                                  struct drm_atomic_state *state)
7745 {
7746         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7747                                                                                  plane);
7748         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7749         struct dc *dc = adev->dm.dc;
7750         struct dm_plane_state *dm_plane_state;
7751         struct dc_scaling_info scaling_info;
7752         struct drm_crtc_state *new_crtc_state;
7753         int ret;
7754
7755         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7756
7757         dm_plane_state = to_dm_plane_state(new_plane_state);
7758
7759         if (!dm_plane_state->dc_state)
7760                 return 0;
7761
7762         new_crtc_state =
7763                 drm_atomic_get_new_crtc_state(state,
7764                                               new_plane_state->crtc);
7765         if (!new_crtc_state)
7766                 return -EINVAL;
7767
7768         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7769         if (ret)
7770                 return ret;
7771
7772         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7773         if (ret)
7774                 return ret;
7775
7776         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7777                 return 0;
7778
7779         return -EINVAL;
7780 }
7781
7782 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7783                                        struct drm_atomic_state *state)
7784 {
7785         /* Only support async updates on cursor planes. */
7786         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7787                 return -EINVAL;
7788
7789         return 0;
7790 }
7791
7792 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7793                                          struct drm_atomic_state *state)
7794 {
7795         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7796                                                                            plane);
7797         struct drm_plane_state *old_state =
7798                 drm_atomic_get_old_plane_state(state, plane);
7799
7800         trace_amdgpu_dm_atomic_update_cursor(new_state);
7801
7802         swap(plane->state->fb, new_state->fb);
7803
7804         plane->state->src_x = new_state->src_x;
7805         plane->state->src_y = new_state->src_y;
7806         plane->state->src_w = new_state->src_w;
7807         plane->state->src_h = new_state->src_h;
7808         plane->state->crtc_x = new_state->crtc_x;
7809         plane->state->crtc_y = new_state->crtc_y;
7810         plane->state->crtc_w = new_state->crtc_w;
7811         plane->state->crtc_h = new_state->crtc_h;
7812
7813         handle_cursor_update(plane, old_state);
7814 }
7815
7816 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7817         .prepare_fb = dm_plane_helper_prepare_fb,
7818         .cleanup_fb = dm_plane_helper_cleanup_fb,
7819         .atomic_check = dm_plane_atomic_check,
7820         .atomic_async_check = dm_plane_atomic_async_check,
7821         .atomic_async_update = dm_plane_atomic_async_update
7822 };
7823
7824 /*
7825  * TODO: these are currently initialized to rgb formats only.
7826  * For future use cases we should either initialize them dynamically based on
7827  * plane capabilities, or initialize this array to all formats, so internal drm
7828  * check will succeed, and let DC implement proper check
7829  */
7830 static const uint32_t rgb_formats[] = {
7831         DRM_FORMAT_XRGB8888,
7832         DRM_FORMAT_ARGB8888,
7833         DRM_FORMAT_RGBA8888,
7834         DRM_FORMAT_XRGB2101010,
7835         DRM_FORMAT_XBGR2101010,
7836         DRM_FORMAT_ARGB2101010,
7837         DRM_FORMAT_ABGR2101010,
7838         DRM_FORMAT_XRGB16161616,
7839         DRM_FORMAT_XBGR16161616,
7840         DRM_FORMAT_ARGB16161616,
7841         DRM_FORMAT_ABGR16161616,
7842         DRM_FORMAT_XBGR8888,
7843         DRM_FORMAT_ABGR8888,
7844         DRM_FORMAT_RGB565,
7845 };
7846
7847 static const uint32_t overlay_formats[] = {
7848         DRM_FORMAT_XRGB8888,
7849         DRM_FORMAT_ARGB8888,
7850         DRM_FORMAT_RGBA8888,
7851         DRM_FORMAT_XBGR8888,
7852         DRM_FORMAT_ABGR8888,
7853         DRM_FORMAT_RGB565
7854 };
7855
7856 static const u32 cursor_formats[] = {
7857         DRM_FORMAT_ARGB8888
7858 };
7859
7860 static int get_plane_formats(const struct drm_plane *plane,
7861                              const struct dc_plane_cap *plane_cap,
7862                              uint32_t *formats, int max_formats)
7863 {
7864         int i, num_formats = 0;
7865
7866         /*
7867          * TODO: Query support for each group of formats directly from
7868          * DC plane caps. This will require adding more formats to the
7869          * caps list.
7870          */
7871
7872         switch (plane->type) {
7873         case DRM_PLANE_TYPE_PRIMARY:
7874                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7875                         if (num_formats >= max_formats)
7876                                 break;
7877
7878                         formats[num_formats++] = rgb_formats[i];
7879                 }
7880
7881                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7882                         formats[num_formats++] = DRM_FORMAT_NV12;
7883                 if (plane_cap && plane_cap->pixel_format_support.p010)
7884                         formats[num_formats++] = DRM_FORMAT_P010;
7885                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7886                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7887                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7888                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7889                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7890                 }
7891                 break;
7892
7893         case DRM_PLANE_TYPE_OVERLAY:
7894                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7895                         if (num_formats >= max_formats)
7896                                 break;
7897
7898                         formats[num_formats++] = overlay_formats[i];
7899                 }
7900                 break;
7901
7902         case DRM_PLANE_TYPE_CURSOR:
7903                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7904                         if (num_formats >= max_formats)
7905                                 break;
7906
7907                         formats[num_formats++] = cursor_formats[i];
7908                 }
7909                 break;
7910         }
7911
7912         return num_formats;
7913 }
7914
7915 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7916                                 struct drm_plane *plane,
7917                                 unsigned long possible_crtcs,
7918                                 const struct dc_plane_cap *plane_cap)
7919 {
7920         uint32_t formats[32];
7921         int num_formats;
7922         int res = -EPERM;
7923         unsigned int supported_rotations;
7924         uint64_t *modifiers = NULL;
7925
7926         num_formats = get_plane_formats(plane, plane_cap, formats,
7927                                         ARRAY_SIZE(formats));
7928
7929         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7930         if (res)
7931                 return res;
7932
7933         if (modifiers == NULL)
7934                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7935
7936         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7937                                        &dm_plane_funcs, formats, num_formats,
7938                                        modifiers, plane->type, NULL);
7939         kfree(modifiers);
7940         if (res)
7941                 return res;
7942
7943         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7944             plane_cap && plane_cap->per_pixel_alpha) {
7945                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7946                                           BIT(DRM_MODE_BLEND_PREMULTI);
7947
7948                 drm_plane_create_alpha_property(plane);
7949                 drm_plane_create_blend_mode_property(plane, blend_caps);
7950         }
7951
7952         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7953             plane_cap &&
7954             (plane_cap->pixel_format_support.nv12 ||
7955              plane_cap->pixel_format_support.p010)) {
7956                 /* This only affects YUV formats. */
7957                 drm_plane_create_color_properties(
7958                         plane,
7959                         BIT(DRM_COLOR_YCBCR_BT601) |
7960                         BIT(DRM_COLOR_YCBCR_BT709) |
7961                         BIT(DRM_COLOR_YCBCR_BT2020),
7962                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7963                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7964                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7965         }
7966
7967         supported_rotations =
7968                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7969                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7970
7971         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7972             plane->type != DRM_PLANE_TYPE_CURSOR)
7973                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7974                                                    supported_rotations);
7975
7976         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7977
7978         /* Create (reset) the plane state */
7979         if (plane->funcs->reset)
7980                 plane->funcs->reset(plane);
7981
7982         return 0;
7983 }
7984
7985 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7986                                struct drm_plane *plane,
7987                                uint32_t crtc_index)
7988 {
7989         struct amdgpu_crtc *acrtc = NULL;
7990         struct drm_plane *cursor_plane;
7991
7992         int res = -ENOMEM;
7993
7994         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7995         if (!cursor_plane)
7996                 goto fail;
7997
7998         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7999         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8000
8001         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8002         if (!acrtc)
8003                 goto fail;
8004
8005         res = drm_crtc_init_with_planes(
8006                         dm->ddev,
8007                         &acrtc->base,
8008                         plane,
8009                         cursor_plane,
8010                         &amdgpu_dm_crtc_funcs, NULL);
8011
8012         if (res)
8013                 goto fail;
8014
8015         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8016
8017         /* Create (reset) the plane state */
8018         if (acrtc->base.funcs->reset)
8019                 acrtc->base.funcs->reset(&acrtc->base);
8020
8021         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8022         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8023
8024         acrtc->crtc_id = crtc_index;
8025         acrtc->base.enabled = false;
8026         acrtc->otg_inst = -1;
8027
8028         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8029         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8030                                    true, MAX_COLOR_LUT_ENTRIES);
8031         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8032
8033         return 0;
8034
8035 fail:
8036         kfree(acrtc);
8037         kfree(cursor_plane);
8038         return res;
8039 }
8040
8041
8042 static int to_drm_connector_type(enum signal_type st)
8043 {
8044         switch (st) {
8045         case SIGNAL_TYPE_HDMI_TYPE_A:
8046                 return DRM_MODE_CONNECTOR_HDMIA;
8047         case SIGNAL_TYPE_EDP:
8048                 return DRM_MODE_CONNECTOR_eDP;
8049         case SIGNAL_TYPE_LVDS:
8050                 return DRM_MODE_CONNECTOR_LVDS;
8051         case SIGNAL_TYPE_RGB:
8052                 return DRM_MODE_CONNECTOR_VGA;
8053         case SIGNAL_TYPE_DISPLAY_PORT:
8054         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8055                 return DRM_MODE_CONNECTOR_DisplayPort;
8056         case SIGNAL_TYPE_DVI_DUAL_LINK:
8057         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8058                 return DRM_MODE_CONNECTOR_DVID;
8059         case SIGNAL_TYPE_VIRTUAL:
8060                 return DRM_MODE_CONNECTOR_VIRTUAL;
8061
8062         default:
8063                 return DRM_MODE_CONNECTOR_Unknown;
8064         }
8065 }
8066
8067 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8068 {
8069         struct drm_encoder *encoder;
8070
8071         /* There is only one encoder per connector */
8072         drm_connector_for_each_possible_encoder(connector, encoder)
8073                 return encoder;
8074
8075         return NULL;
8076 }
8077
8078 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8079 {
8080         struct drm_encoder *encoder;
8081         struct amdgpu_encoder *amdgpu_encoder;
8082
8083         encoder = amdgpu_dm_connector_to_encoder(connector);
8084
8085         if (encoder == NULL)
8086                 return;
8087
8088         amdgpu_encoder = to_amdgpu_encoder(encoder);
8089
8090         amdgpu_encoder->native_mode.clock = 0;
8091
8092         if (!list_empty(&connector->probed_modes)) {
8093                 struct drm_display_mode *preferred_mode = NULL;
8094
8095                 list_for_each_entry(preferred_mode,
8096                                     &connector->probed_modes,
8097                                     head) {
8098                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8099                                 amdgpu_encoder->native_mode = *preferred_mode;
8100
8101                         break;
8102                 }
8103
8104         }
8105 }
8106
8107 static struct drm_display_mode *
8108 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8109                              char *name,
8110                              int hdisplay, int vdisplay)
8111 {
8112         struct drm_device *dev = encoder->dev;
8113         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8114         struct drm_display_mode *mode = NULL;
8115         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8116
8117         mode = drm_mode_duplicate(dev, native_mode);
8118
8119         if (mode == NULL)
8120                 return NULL;
8121
8122         mode->hdisplay = hdisplay;
8123         mode->vdisplay = vdisplay;
8124         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8125         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8126
8127         return mode;
8128
8129 }
8130
8131 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8132                                                  struct drm_connector *connector)
8133 {
8134         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8135         struct drm_display_mode *mode = NULL;
8136         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8137         struct amdgpu_dm_connector *amdgpu_dm_connector =
8138                                 to_amdgpu_dm_connector(connector);
8139         int i;
8140         int n;
8141         struct mode_size {
8142                 char name[DRM_DISPLAY_MODE_LEN];
8143                 int w;
8144                 int h;
8145         } common_modes[] = {
8146                 {  "640x480",  640,  480},
8147                 {  "800x600",  800,  600},
8148                 { "1024x768", 1024,  768},
8149                 { "1280x720", 1280,  720},
8150                 { "1280x800", 1280,  800},
8151                 {"1280x1024", 1280, 1024},
8152                 { "1440x900", 1440,  900},
8153                 {"1680x1050", 1680, 1050},
8154                 {"1600x1200", 1600, 1200},
8155                 {"1920x1080", 1920, 1080},
8156                 {"1920x1200", 1920, 1200}
8157         };
8158
8159         n = ARRAY_SIZE(common_modes);
8160
8161         for (i = 0; i < n; i++) {
8162                 struct drm_display_mode *curmode = NULL;
8163                 bool mode_existed = false;
8164
8165                 if (common_modes[i].w > native_mode->hdisplay ||
8166                     common_modes[i].h > native_mode->vdisplay ||
8167                    (common_modes[i].w == native_mode->hdisplay &&
8168                     common_modes[i].h == native_mode->vdisplay))
8169                         continue;
8170
8171                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8172                         if (common_modes[i].w == curmode->hdisplay &&
8173                             common_modes[i].h == curmode->vdisplay) {
8174                                 mode_existed = true;
8175                                 break;
8176                         }
8177                 }
8178
8179                 if (mode_existed)
8180                         continue;
8181
8182                 mode = amdgpu_dm_create_common_mode(encoder,
8183                                 common_modes[i].name, common_modes[i].w,
8184                                 common_modes[i].h);
8185                 if (!mode)
8186                         continue;
8187
8188                 drm_mode_probed_add(connector, mode);
8189                 amdgpu_dm_connector->num_modes++;
8190         }
8191 }
8192
8193 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8194 {
8195         struct drm_encoder *encoder;
8196         struct amdgpu_encoder *amdgpu_encoder;
8197         const struct drm_display_mode *native_mode;
8198
8199         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8200             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8201                 return;
8202
8203         encoder = amdgpu_dm_connector_to_encoder(connector);
8204         if (!encoder)
8205                 return;
8206
8207         amdgpu_encoder = to_amdgpu_encoder(encoder);
8208
8209         native_mode = &amdgpu_encoder->native_mode;
8210         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8211                 return;
8212
8213         drm_connector_set_panel_orientation_with_quirk(connector,
8214                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8215                                                        native_mode->hdisplay,
8216                                                        native_mode->vdisplay);
8217 }
8218
8219 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8220                                               struct edid *edid)
8221 {
8222         struct amdgpu_dm_connector *amdgpu_dm_connector =
8223                         to_amdgpu_dm_connector(connector);
8224
8225         if (edid) {
8226                 /* empty probed_modes */
8227                 INIT_LIST_HEAD(&connector->probed_modes);
8228                 amdgpu_dm_connector->num_modes =
8229                                 drm_add_edid_modes(connector, edid);
8230
8231                 /* sorting the probed modes before calling function
8232                  * amdgpu_dm_get_native_mode() since EDID can have
8233                  * more than one preferred mode. The modes that are
8234                  * later in the probed mode list could be of higher
8235                  * and preferred resolution. For example, 3840x2160
8236                  * resolution in base EDID preferred timing and 4096x2160
8237                  * preferred resolution in DID extension block later.
8238                  */
8239                 drm_mode_sort(&connector->probed_modes);
8240                 amdgpu_dm_get_native_mode(connector);
8241
8242                 /* Freesync capabilities are reset by calling
8243                  * drm_add_edid_modes() and need to be
8244                  * restored here.
8245                  */
8246                 amdgpu_dm_update_freesync_caps(connector, edid);
8247
8248                 amdgpu_set_panel_orientation(connector);
8249         } else {
8250                 amdgpu_dm_connector->num_modes = 0;
8251         }
8252 }
8253
8254 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8255                               struct drm_display_mode *mode)
8256 {
8257         struct drm_display_mode *m;
8258
8259         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8260                 if (drm_mode_equal(m, mode))
8261                         return true;
8262         }
8263
8264         return false;
8265 }
8266
8267 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8268 {
8269         const struct drm_display_mode *m;
8270         struct drm_display_mode *new_mode;
8271         uint i;
8272         uint32_t new_modes_count = 0;
8273
8274         /* Standard FPS values
8275          *
8276          * 23.976       - TV/NTSC
8277          * 24           - Cinema
8278          * 25           - TV/PAL
8279          * 29.97        - TV/NTSC
8280          * 30           - TV/NTSC
8281          * 48           - Cinema HFR
8282          * 50           - TV/PAL
8283          * 60           - Commonly used
8284          * 48,72,96,120 - Multiples of 24
8285          */
8286         static const uint32_t common_rates[] = {
8287                 23976, 24000, 25000, 29970, 30000,
8288                 48000, 50000, 60000, 72000, 96000, 120000
8289         };
8290
8291         /*
8292          * Find mode with highest refresh rate with the same resolution
8293          * as the preferred mode. Some monitors report a preferred mode
8294          * with lower resolution than the highest refresh rate supported.
8295          */
8296
8297         m = get_highest_refresh_rate_mode(aconnector, true);
8298         if (!m)
8299                 return 0;
8300
8301         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8302                 uint64_t target_vtotal, target_vtotal_diff;
8303                 uint64_t num, den;
8304
8305                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8306                         continue;
8307
8308                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8309                     common_rates[i] > aconnector->max_vfreq * 1000)
8310                         continue;
8311
8312                 num = (unsigned long long)m->clock * 1000 * 1000;
8313                 den = common_rates[i] * (unsigned long long)m->htotal;
8314                 target_vtotal = div_u64(num, den);
8315                 target_vtotal_diff = target_vtotal - m->vtotal;
8316
8317                 /* Check for illegal modes */
8318                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8319                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8320                     m->vtotal + target_vtotal_diff < m->vsync_end)
8321                         continue;
8322
8323                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8324                 if (!new_mode)
8325                         goto out;
8326
8327                 new_mode->vtotal += (u16)target_vtotal_diff;
8328                 new_mode->vsync_start += (u16)target_vtotal_diff;
8329                 new_mode->vsync_end += (u16)target_vtotal_diff;
8330                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8331                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8332
8333                 if (!is_duplicate_mode(aconnector, new_mode)) {
8334                         drm_mode_probed_add(&aconnector->base, new_mode);
8335                         new_modes_count += 1;
8336                 } else
8337                         drm_mode_destroy(aconnector->base.dev, new_mode);
8338         }
8339  out:
8340         return new_modes_count;
8341 }
8342
8343 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8344                                                    struct edid *edid)
8345 {
8346         struct amdgpu_dm_connector *amdgpu_dm_connector =
8347                 to_amdgpu_dm_connector(connector);
8348
8349         if (!edid)
8350                 return;
8351
8352         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8353                 amdgpu_dm_connector->num_modes +=
8354                         add_fs_modes(amdgpu_dm_connector);
8355 }
8356
8357 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8358 {
8359         struct amdgpu_dm_connector *amdgpu_dm_connector =
8360                         to_amdgpu_dm_connector(connector);
8361         struct drm_encoder *encoder;
8362         struct edid *edid = amdgpu_dm_connector->edid;
8363
8364         encoder = amdgpu_dm_connector_to_encoder(connector);
8365
8366         if (!drm_edid_is_valid(edid)) {
8367                 amdgpu_dm_connector->num_modes =
8368                                 drm_add_modes_noedid(connector, 640, 480);
8369         } else {
8370                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8371                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8372                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8373         }
8374         amdgpu_dm_fbc_init(connector);
8375
8376         return amdgpu_dm_connector->num_modes;
8377 }
8378
8379 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8380                                      struct amdgpu_dm_connector *aconnector,
8381                                      int connector_type,
8382                                      struct dc_link *link,
8383                                      int link_index)
8384 {
8385         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8386
8387         /*
8388          * Some of the properties below require access to state, like bpc.
8389          * Allocate some default initial connector state with our reset helper.
8390          */
8391         if (aconnector->base.funcs->reset)
8392                 aconnector->base.funcs->reset(&aconnector->base);
8393
8394         aconnector->connector_id = link_index;
8395         aconnector->dc_link = link;
8396         aconnector->base.interlace_allowed = false;
8397         aconnector->base.doublescan_allowed = false;
8398         aconnector->base.stereo_allowed = false;
8399         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8400         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8401         aconnector->audio_inst = -1;
8402         mutex_init(&aconnector->hpd_lock);
8403
8404         /*
8405          * configure support HPD hot plug connector_>polled default value is 0
8406          * which means HPD hot plug not supported
8407          */
8408         switch (connector_type) {
8409         case DRM_MODE_CONNECTOR_HDMIA:
8410                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8411                 aconnector->base.ycbcr_420_allowed =
8412                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8413                 break;
8414         case DRM_MODE_CONNECTOR_DisplayPort:
8415                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8416                 link->link_enc = link_enc_cfg_get_link_enc(link);
8417                 ASSERT(link->link_enc);
8418                 if (link->link_enc)
8419                         aconnector->base.ycbcr_420_allowed =
8420                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8421                 break;
8422         case DRM_MODE_CONNECTOR_DVID:
8423                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8424                 break;
8425         default:
8426                 break;
8427         }
8428
8429         drm_object_attach_property(&aconnector->base.base,
8430                                 dm->ddev->mode_config.scaling_mode_property,
8431                                 DRM_MODE_SCALE_NONE);
8432
8433         drm_object_attach_property(&aconnector->base.base,
8434                                 adev->mode_info.underscan_property,
8435                                 UNDERSCAN_OFF);
8436         drm_object_attach_property(&aconnector->base.base,
8437                                 adev->mode_info.underscan_hborder_property,
8438                                 0);
8439         drm_object_attach_property(&aconnector->base.base,
8440                                 adev->mode_info.underscan_vborder_property,
8441                                 0);
8442
8443         if (!aconnector->mst_port)
8444                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8445
8446         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8447         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8448         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8449
8450         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8451             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8452                 drm_object_attach_property(&aconnector->base.base,
8453                                 adev->mode_info.abm_level_property, 0);
8454         }
8455
8456         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8457             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8458             connector_type == DRM_MODE_CONNECTOR_eDP) {
8459                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8460
8461                 if (!aconnector->mst_port)
8462                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8463
8464 #ifdef CONFIG_DRM_AMD_DC_HDCP
8465                 if (adev->dm.hdcp_workqueue)
8466                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8467 #endif
8468         }
8469 }
8470
8471 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8472                               struct i2c_msg *msgs, int num)
8473 {
8474         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8475         struct ddc_service *ddc_service = i2c->ddc_service;
8476         struct i2c_command cmd;
8477         int i;
8478         int result = -EIO;
8479
8480         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8481
8482         if (!cmd.payloads)
8483                 return result;
8484
8485         cmd.number_of_payloads = num;
8486         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8487         cmd.speed = 100;
8488
8489         for (i = 0; i < num; i++) {
8490                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8491                 cmd.payloads[i].address = msgs[i].addr;
8492                 cmd.payloads[i].length = msgs[i].len;
8493                 cmd.payloads[i].data = msgs[i].buf;
8494         }
8495
8496         if (dc_submit_i2c(
8497                         ddc_service->ctx->dc,
8498                         ddc_service->ddc_pin->hw_info.ddc_channel,
8499                         &cmd))
8500                 result = num;
8501
8502         kfree(cmd.payloads);
8503         return result;
8504 }
8505
8506 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8507 {
8508         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8509 }
8510
8511 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8512         .master_xfer = amdgpu_dm_i2c_xfer,
8513         .functionality = amdgpu_dm_i2c_func,
8514 };
8515
8516 static struct amdgpu_i2c_adapter *
8517 create_i2c(struct ddc_service *ddc_service,
8518            int link_index,
8519            int *res)
8520 {
8521         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8522         struct amdgpu_i2c_adapter *i2c;
8523
8524         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8525         if (!i2c)
8526                 return NULL;
8527         i2c->base.owner = THIS_MODULE;
8528         i2c->base.class = I2C_CLASS_DDC;
8529         i2c->base.dev.parent = &adev->pdev->dev;
8530         i2c->base.algo = &amdgpu_dm_i2c_algo;
8531         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8532         i2c_set_adapdata(&i2c->base, i2c);
8533         i2c->ddc_service = ddc_service;
8534         if (i2c->ddc_service->ddc_pin)
8535                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8536
8537         return i2c;
8538 }
8539
8540
8541 /*
8542  * Note: this function assumes that dc_link_detect() was called for the
8543  * dc_link which will be represented by this aconnector.
8544  */
8545 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8546                                     struct amdgpu_dm_connector *aconnector,
8547                                     uint32_t link_index,
8548                                     struct amdgpu_encoder *aencoder)
8549 {
8550         int res = 0;
8551         int connector_type;
8552         struct dc *dc = dm->dc;
8553         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8554         struct amdgpu_i2c_adapter *i2c;
8555
8556         link->priv = aconnector;
8557
8558         DRM_DEBUG_DRIVER("%s()\n", __func__);
8559
8560         i2c = create_i2c(link->ddc, link->link_index, &res);
8561         if (!i2c) {
8562                 DRM_ERROR("Failed to create i2c adapter data\n");
8563                 return -ENOMEM;
8564         }
8565
8566         aconnector->i2c = i2c;
8567         res = i2c_add_adapter(&i2c->base);
8568
8569         if (res) {
8570                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8571                 goto out_free;
8572         }
8573
8574         connector_type = to_drm_connector_type(link->connector_signal);
8575
8576         res = drm_connector_init_with_ddc(
8577                         dm->ddev,
8578                         &aconnector->base,
8579                         &amdgpu_dm_connector_funcs,
8580                         connector_type,
8581                         &i2c->base);
8582
8583         if (res) {
8584                 DRM_ERROR("connector_init failed\n");
8585                 aconnector->connector_id = -1;
8586                 goto out_free;
8587         }
8588
8589         drm_connector_helper_add(
8590                         &aconnector->base,
8591                         &amdgpu_dm_connector_helper_funcs);
8592
8593         amdgpu_dm_connector_init_helper(
8594                 dm,
8595                 aconnector,
8596                 connector_type,
8597                 link,
8598                 link_index);
8599
8600         drm_connector_attach_encoder(
8601                 &aconnector->base, &aencoder->base);
8602
8603         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8604                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8605                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8606
8607 out_free:
8608         if (res) {
8609                 kfree(i2c);
8610                 aconnector->i2c = NULL;
8611         }
8612         return res;
8613 }
8614
8615 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8616 {
8617         switch (adev->mode_info.num_crtc) {
8618         case 1:
8619                 return 0x1;
8620         case 2:
8621                 return 0x3;
8622         case 3:
8623                 return 0x7;
8624         case 4:
8625                 return 0xf;
8626         case 5:
8627                 return 0x1f;
8628         case 6:
8629         default:
8630                 return 0x3f;
8631         }
8632 }
8633
8634 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8635                                   struct amdgpu_encoder *aencoder,
8636                                   uint32_t link_index)
8637 {
8638         struct amdgpu_device *adev = drm_to_adev(dev);
8639
8640         int res = drm_encoder_init(dev,
8641                                    &aencoder->base,
8642                                    &amdgpu_dm_encoder_funcs,
8643                                    DRM_MODE_ENCODER_TMDS,
8644                                    NULL);
8645
8646         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8647
8648         if (!res)
8649                 aencoder->encoder_id = link_index;
8650         else
8651                 aencoder->encoder_id = -1;
8652
8653         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8654
8655         return res;
8656 }
8657
8658 static void manage_dm_interrupts(struct amdgpu_device *adev,
8659                                  struct amdgpu_crtc *acrtc,
8660                                  bool enable)
8661 {
8662         /*
8663          * We have no guarantee that the frontend index maps to the same
8664          * backend index - some even map to more than one.
8665          *
8666          * TODO: Use a different interrupt or check DC itself for the mapping.
8667          */
8668         int irq_type =
8669                 amdgpu_display_crtc_idx_to_irq_type(
8670                         adev,
8671                         acrtc->crtc_id);
8672
8673         if (enable) {
8674                 drm_crtc_vblank_on(&acrtc->base);
8675                 amdgpu_irq_get(
8676                         adev,
8677                         &adev->pageflip_irq,
8678                         irq_type);
8679 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8680                 amdgpu_irq_get(
8681                         adev,
8682                         &adev->vline0_irq,
8683                         irq_type);
8684 #endif
8685         } else {
8686 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8687                 amdgpu_irq_put(
8688                         adev,
8689                         &adev->vline0_irq,
8690                         irq_type);
8691 #endif
8692                 amdgpu_irq_put(
8693                         adev,
8694                         &adev->pageflip_irq,
8695                         irq_type);
8696                 drm_crtc_vblank_off(&acrtc->base);
8697         }
8698 }
8699
8700 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8701                                       struct amdgpu_crtc *acrtc)
8702 {
8703         int irq_type =
8704                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8705
8706         /**
8707          * This reads the current state for the IRQ and force reapplies
8708          * the setting to hardware.
8709          */
8710         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8711 }
8712
8713 static bool
8714 is_scaling_state_different(const struct dm_connector_state *dm_state,
8715                            const struct dm_connector_state *old_dm_state)
8716 {
8717         if (dm_state->scaling != old_dm_state->scaling)
8718                 return true;
8719         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8720                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8721                         return true;
8722         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8723                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8724                         return true;
8725         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8726                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8727                 return true;
8728         return false;
8729 }
8730
8731 #ifdef CONFIG_DRM_AMD_DC_HDCP
8732 static bool is_content_protection_different(struct drm_connector_state *state,
8733                                             const struct drm_connector_state *old_state,
8734                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8735 {
8736         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8737         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8738
8739         /* Handle: Type0/1 change */
8740         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8741             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8742                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8743                 return true;
8744         }
8745
8746         /* CP is being re enabled, ignore this
8747          *
8748          * Handles:     ENABLED -> DESIRED
8749          */
8750         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8751             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8752                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8753                 return false;
8754         }
8755
8756         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8757          *
8758          * Handles:     UNDESIRED -> ENABLED
8759          */
8760         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8761             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8762                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8763
8764         /* Stream removed and re-enabled
8765          *
8766          * Can sometimes overlap with the HPD case,
8767          * thus set update_hdcp to false to avoid
8768          * setting HDCP multiple times.
8769          *
8770          * Handles:     DESIRED -> DESIRED (Special case)
8771          */
8772         if (!(old_state->crtc && old_state->crtc->enabled) &&
8773                 state->crtc && state->crtc->enabled &&
8774                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8775                 dm_con_state->update_hdcp = false;
8776                 return true;
8777         }
8778
8779         /* Hot-plug, headless s3, dpms
8780          *
8781          * Only start HDCP if the display is connected/enabled.
8782          * update_hdcp flag will be set to false until the next
8783          * HPD comes in.
8784          *
8785          * Handles:     DESIRED -> DESIRED (Special case)
8786          */
8787         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8788             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8789                 dm_con_state->update_hdcp = false;
8790                 return true;
8791         }
8792
8793         /*
8794          * Handles:     UNDESIRED -> UNDESIRED
8795          *              DESIRED -> DESIRED
8796          *              ENABLED -> ENABLED
8797          */
8798         if (old_state->content_protection == state->content_protection)
8799                 return false;
8800
8801         /*
8802          * Handles:     UNDESIRED -> DESIRED
8803          *              DESIRED -> UNDESIRED
8804          *              ENABLED -> UNDESIRED
8805          */
8806         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8807                 return true;
8808
8809         /*
8810          * Handles:     DESIRED -> ENABLED
8811          */
8812         return false;
8813 }
8814
8815 #endif
8816 static void remove_stream(struct amdgpu_device *adev,
8817                           struct amdgpu_crtc *acrtc,
8818                           struct dc_stream_state *stream)
8819 {
8820         /* this is the update mode case */
8821
8822         acrtc->otg_inst = -1;
8823         acrtc->enabled = false;
8824 }
8825
8826 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8827                                struct dc_cursor_position *position)
8828 {
8829         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8830         int x, y;
8831         int xorigin = 0, yorigin = 0;
8832
8833         if (!crtc || !plane->state->fb)
8834                 return 0;
8835
8836         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8837             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8838                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8839                           __func__,
8840                           plane->state->crtc_w,
8841                           plane->state->crtc_h);
8842                 return -EINVAL;
8843         }
8844
8845         x = plane->state->crtc_x;
8846         y = plane->state->crtc_y;
8847
8848         if (x <= -amdgpu_crtc->max_cursor_width ||
8849             y <= -amdgpu_crtc->max_cursor_height)
8850                 return 0;
8851
8852         if (x < 0) {
8853                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8854                 x = 0;
8855         }
8856         if (y < 0) {
8857                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8858                 y = 0;
8859         }
8860         position->enable = true;
8861         position->translate_by_source = true;
8862         position->x = x;
8863         position->y = y;
8864         position->x_hotspot = xorigin;
8865         position->y_hotspot = yorigin;
8866
8867         return 0;
8868 }
8869
8870 static void handle_cursor_update(struct drm_plane *plane,
8871                                  struct drm_plane_state *old_plane_state)
8872 {
8873         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8874         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8875         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8876         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8877         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8878         uint64_t address = afb ? afb->address : 0;
8879         struct dc_cursor_position position = {0};
8880         struct dc_cursor_attributes attributes;
8881         int ret;
8882
8883         if (!plane->state->fb && !old_plane_state->fb)
8884                 return;
8885
8886         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8887                       __func__,
8888                       amdgpu_crtc->crtc_id,
8889                       plane->state->crtc_w,
8890                       plane->state->crtc_h);
8891
8892         ret = get_cursor_position(plane, crtc, &position);
8893         if (ret)
8894                 return;
8895
8896         if (!position.enable) {
8897                 /* turn off cursor */
8898                 if (crtc_state && crtc_state->stream) {
8899                         mutex_lock(&adev->dm.dc_lock);
8900                         dc_stream_set_cursor_position(crtc_state->stream,
8901                                                       &position);
8902                         mutex_unlock(&adev->dm.dc_lock);
8903                 }
8904                 return;
8905         }
8906
8907         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8908         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8909
8910         memset(&attributes, 0, sizeof(attributes));
8911         attributes.address.high_part = upper_32_bits(address);
8912         attributes.address.low_part  = lower_32_bits(address);
8913         attributes.width             = plane->state->crtc_w;
8914         attributes.height            = plane->state->crtc_h;
8915         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8916         attributes.rotation_angle    = 0;
8917         attributes.attribute_flags.value = 0;
8918
8919         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8920
8921         if (crtc_state->stream) {
8922                 mutex_lock(&adev->dm.dc_lock);
8923                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8924                                                          &attributes))
8925                         DRM_ERROR("DC failed to set cursor attributes\n");
8926
8927                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8928                                                    &position))
8929                         DRM_ERROR("DC failed to set cursor position\n");
8930                 mutex_unlock(&adev->dm.dc_lock);
8931         }
8932 }
8933
8934 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8935 {
8936
8937         assert_spin_locked(&acrtc->base.dev->event_lock);
8938         WARN_ON(acrtc->event);
8939
8940         acrtc->event = acrtc->base.state->event;
8941
8942         /* Set the flip status */
8943         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8944
8945         /* Mark this event as consumed */
8946         acrtc->base.state->event = NULL;
8947
8948         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8949                      acrtc->crtc_id);
8950 }
8951
8952 static void update_freesync_state_on_stream(
8953         struct amdgpu_display_manager *dm,
8954         struct dm_crtc_state *new_crtc_state,
8955         struct dc_stream_state *new_stream,
8956         struct dc_plane_state *surface,
8957         u32 flip_timestamp_in_us)
8958 {
8959         struct mod_vrr_params vrr_params;
8960         struct dc_info_packet vrr_infopacket = {0};
8961         struct amdgpu_device *adev = dm->adev;
8962         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8963         unsigned long flags;
8964         bool pack_sdp_v1_3 = false;
8965
8966         if (!new_stream)
8967                 return;
8968
8969         /*
8970          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8971          * For now it's sufficient to just guard against these conditions.
8972          */
8973
8974         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8975                 return;
8976
8977         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8978         vrr_params = acrtc->dm_irq_params.vrr_params;
8979
8980         if (surface) {
8981                 mod_freesync_handle_preflip(
8982                         dm->freesync_module,
8983                         surface,
8984                         new_stream,
8985                         flip_timestamp_in_us,
8986                         &vrr_params);
8987
8988                 if (adev->family < AMDGPU_FAMILY_AI &&
8989                     amdgpu_dm_vrr_active(new_crtc_state)) {
8990                         mod_freesync_handle_v_update(dm->freesync_module,
8991                                                      new_stream, &vrr_params);
8992
8993                         /* Need to call this before the frame ends. */
8994                         dc_stream_adjust_vmin_vmax(dm->dc,
8995                                                    new_crtc_state->stream,
8996                                                    &vrr_params.adjust);
8997                 }
8998         }
8999
9000         mod_freesync_build_vrr_infopacket(
9001                 dm->freesync_module,
9002                 new_stream,
9003                 &vrr_params,
9004                 PACKET_TYPE_VRR,
9005                 TRANSFER_FUNC_UNKNOWN,
9006                 &vrr_infopacket,
9007                 pack_sdp_v1_3);
9008
9009         new_crtc_state->freesync_timing_changed |=
9010                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9011                         &vrr_params.adjust,
9012                         sizeof(vrr_params.adjust)) != 0);
9013
9014         new_crtc_state->freesync_vrr_info_changed |=
9015                 (memcmp(&new_crtc_state->vrr_infopacket,
9016                         &vrr_infopacket,
9017                         sizeof(vrr_infopacket)) != 0);
9018
9019         acrtc->dm_irq_params.vrr_params = vrr_params;
9020         new_crtc_state->vrr_infopacket = vrr_infopacket;
9021
9022         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9023         new_stream->vrr_infopacket = vrr_infopacket;
9024
9025         if (new_crtc_state->freesync_vrr_info_changed)
9026                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9027                               new_crtc_state->base.crtc->base.id,
9028                               (int)new_crtc_state->base.vrr_enabled,
9029                               (int)vrr_params.state);
9030
9031         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9032 }
9033
9034 static void update_stream_irq_parameters(
9035         struct amdgpu_display_manager *dm,
9036         struct dm_crtc_state *new_crtc_state)
9037 {
9038         struct dc_stream_state *new_stream = new_crtc_state->stream;
9039         struct mod_vrr_params vrr_params;
9040         struct mod_freesync_config config = new_crtc_state->freesync_config;
9041         struct amdgpu_device *adev = dm->adev;
9042         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9043         unsigned long flags;
9044
9045         if (!new_stream)
9046                 return;
9047
9048         /*
9049          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9050          * For now it's sufficient to just guard against these conditions.
9051          */
9052         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9053                 return;
9054
9055         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9056         vrr_params = acrtc->dm_irq_params.vrr_params;
9057
9058         if (new_crtc_state->vrr_supported &&
9059             config.min_refresh_in_uhz &&
9060             config.max_refresh_in_uhz) {
9061                 /*
9062                  * if freesync compatible mode was set, config.state will be set
9063                  * in atomic check
9064                  */
9065                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9066                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9067                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9068                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9069                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9070                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9071                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9072                 } else {
9073                         config.state = new_crtc_state->base.vrr_enabled ?
9074                                                      VRR_STATE_ACTIVE_VARIABLE :
9075                                                      VRR_STATE_INACTIVE;
9076                 }
9077         } else {
9078                 config.state = VRR_STATE_UNSUPPORTED;
9079         }
9080
9081         mod_freesync_build_vrr_params(dm->freesync_module,
9082                                       new_stream,
9083                                       &config, &vrr_params);
9084
9085         new_crtc_state->freesync_timing_changed |=
9086                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9087                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9088
9089         new_crtc_state->freesync_config = config;
9090         /* Copy state for access from DM IRQ handler */
9091         acrtc->dm_irq_params.freesync_config = config;
9092         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9093         acrtc->dm_irq_params.vrr_params = vrr_params;
9094         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9095 }
9096
9097 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9098                                             struct dm_crtc_state *new_state)
9099 {
9100         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9101         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9102
9103         if (!old_vrr_active && new_vrr_active) {
9104                 /* Transition VRR inactive -> active:
9105                  * While VRR is active, we must not disable vblank irq, as a
9106                  * reenable after disable would compute bogus vblank/pflip
9107                  * timestamps if it likely happened inside display front-porch.
9108                  *
9109                  * We also need vupdate irq for the actual core vblank handling
9110                  * at end of vblank.
9111                  */
9112                 dm_set_vupdate_irq(new_state->base.crtc, true);
9113                 drm_crtc_vblank_get(new_state->base.crtc);
9114                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9115                                  __func__, new_state->base.crtc->base.id);
9116         } else if (old_vrr_active && !new_vrr_active) {
9117                 /* Transition VRR active -> inactive:
9118                  * Allow vblank irq disable again for fixed refresh rate.
9119                  */
9120                 dm_set_vupdate_irq(new_state->base.crtc, false);
9121                 drm_crtc_vblank_put(new_state->base.crtc);
9122                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9123                                  __func__, new_state->base.crtc->base.id);
9124         }
9125 }
9126
9127 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9128 {
9129         struct drm_plane *plane;
9130         struct drm_plane_state *old_plane_state;
9131         int i;
9132
9133         /*
9134          * TODO: Make this per-stream so we don't issue redundant updates for
9135          * commits with multiple streams.
9136          */
9137         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9138                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9139                         handle_cursor_update(plane, old_plane_state);
9140 }
9141
9142 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9143                                     struct dc_state *dc_state,
9144                                     struct drm_device *dev,
9145                                     struct amdgpu_display_manager *dm,
9146                                     struct drm_crtc *pcrtc,
9147                                     bool wait_for_vblank)
9148 {
9149         uint32_t i;
9150         uint64_t timestamp_ns;
9151         struct drm_plane *plane;
9152         struct drm_plane_state *old_plane_state, *new_plane_state;
9153         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9154         struct drm_crtc_state *new_pcrtc_state =
9155                         drm_atomic_get_new_crtc_state(state, pcrtc);
9156         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9157         struct dm_crtc_state *dm_old_crtc_state =
9158                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9159         int planes_count = 0, vpos, hpos;
9160         long r;
9161         unsigned long flags;
9162         struct amdgpu_bo *abo;
9163         uint32_t target_vblank, last_flip_vblank;
9164         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9165         bool pflip_present = false;
9166         struct {
9167                 struct dc_surface_update surface_updates[MAX_SURFACES];
9168                 struct dc_plane_info plane_infos[MAX_SURFACES];
9169                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9170                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9171                 struct dc_stream_update stream_update;
9172         } *bundle;
9173
9174         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9175
9176         if (!bundle) {
9177                 dm_error("Failed to allocate update bundle\n");
9178                 goto cleanup;
9179         }
9180
9181         /*
9182          * Disable the cursor first if we're disabling all the planes.
9183          * It'll remain on the screen after the planes are re-enabled
9184          * if we don't.
9185          */
9186         if (acrtc_state->active_planes == 0)
9187                 amdgpu_dm_commit_cursors(state);
9188
9189         /* update planes when needed */
9190         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9191                 struct drm_crtc *crtc = new_plane_state->crtc;
9192                 struct drm_crtc_state *new_crtc_state;
9193                 struct drm_framebuffer *fb = new_plane_state->fb;
9194                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9195                 bool plane_needs_flip;
9196                 struct dc_plane_state *dc_plane;
9197                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9198
9199                 /* Cursor plane is handled after stream updates */
9200                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9201                         continue;
9202
9203                 if (!fb || !crtc || pcrtc != crtc)
9204                         continue;
9205
9206                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9207                 if (!new_crtc_state->active)
9208                         continue;
9209
9210                 dc_plane = dm_new_plane_state->dc_state;
9211
9212                 bundle->surface_updates[planes_count].surface = dc_plane;
9213                 if (new_pcrtc_state->color_mgmt_changed) {
9214                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9215                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9216                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9217                 }
9218
9219                 fill_dc_scaling_info(dm->adev, new_plane_state,
9220                                      &bundle->scaling_infos[planes_count]);
9221
9222                 bundle->surface_updates[planes_count].scaling_info =
9223                         &bundle->scaling_infos[planes_count];
9224
9225                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9226
9227                 pflip_present = pflip_present || plane_needs_flip;
9228
9229                 if (!plane_needs_flip) {
9230                         planes_count += 1;
9231                         continue;
9232                 }
9233
9234                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9235
9236                 /*
9237                  * Wait for all fences on this FB. Do limited wait to avoid
9238                  * deadlock during GPU reset when this fence will not signal
9239                  * but we hold reservation lock for the BO.
9240                  */
9241                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9242                                           msecs_to_jiffies(5000));
9243                 if (unlikely(r <= 0))
9244                         DRM_ERROR("Waiting for fences timed out!");
9245
9246                 fill_dc_plane_info_and_addr(
9247                         dm->adev, new_plane_state,
9248                         afb->tiling_flags,
9249                         &bundle->plane_infos[planes_count],
9250                         &bundle->flip_addrs[planes_count].address,
9251                         afb->tmz_surface, false);
9252
9253                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9254                                  new_plane_state->plane->index,
9255                                  bundle->plane_infos[planes_count].dcc.enable);
9256
9257                 bundle->surface_updates[planes_count].plane_info =
9258                         &bundle->plane_infos[planes_count];
9259
9260                 /*
9261                  * Only allow immediate flips for fast updates that don't
9262                  * change FB pitch, DCC state, rotation or mirroing.
9263                  */
9264                 bundle->flip_addrs[planes_count].flip_immediate =
9265                         crtc->state->async_flip &&
9266                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9267
9268                 timestamp_ns = ktime_get_ns();
9269                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9270                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9271                 bundle->surface_updates[planes_count].surface = dc_plane;
9272
9273                 if (!bundle->surface_updates[planes_count].surface) {
9274                         DRM_ERROR("No surface for CRTC: id=%d\n",
9275                                         acrtc_attach->crtc_id);
9276                         continue;
9277                 }
9278
9279                 if (plane == pcrtc->primary)
9280                         update_freesync_state_on_stream(
9281                                 dm,
9282                                 acrtc_state,
9283                                 acrtc_state->stream,
9284                                 dc_plane,
9285                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9286
9287                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9288                                  __func__,
9289                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9290                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9291
9292                 planes_count += 1;
9293
9294         }
9295
9296         if (pflip_present) {
9297                 if (!vrr_active) {
9298                         /* Use old throttling in non-vrr fixed refresh rate mode
9299                          * to keep flip scheduling based on target vblank counts
9300                          * working in a backwards compatible way, e.g., for
9301                          * clients using the GLX_OML_sync_control extension or
9302                          * DRI3/Present extension with defined target_msc.
9303                          */
9304                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9305                 }
9306                 else {
9307                         /* For variable refresh rate mode only:
9308                          * Get vblank of last completed flip to avoid > 1 vrr
9309                          * flips per video frame by use of throttling, but allow
9310                          * flip programming anywhere in the possibly large
9311                          * variable vrr vblank interval for fine-grained flip
9312                          * timing control and more opportunity to avoid stutter
9313                          * on late submission of flips.
9314                          */
9315                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9316                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9317                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9318                 }
9319
9320                 target_vblank = last_flip_vblank + wait_for_vblank;
9321
9322                 /*
9323                  * Wait until we're out of the vertical blank period before the one
9324                  * targeted by the flip
9325                  */
9326                 while ((acrtc_attach->enabled &&
9327                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9328                                                             0, &vpos, &hpos, NULL,
9329                                                             NULL, &pcrtc->hwmode)
9330                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9331                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9332                         (int)(target_vblank -
9333                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9334                         usleep_range(1000, 1100);
9335                 }
9336
9337                 /**
9338                  * Prepare the flip event for the pageflip interrupt to handle.
9339                  *
9340                  * This only works in the case where we've already turned on the
9341                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9342                  * from 0 -> n planes we have to skip a hardware generated event
9343                  * and rely on sending it from software.
9344                  */
9345                 if (acrtc_attach->base.state->event &&
9346                     acrtc_state->active_planes > 0 &&
9347                     !acrtc_state->force_dpms_off) {
9348                         drm_crtc_vblank_get(pcrtc);
9349
9350                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9351
9352                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9353                         prepare_flip_isr(acrtc_attach);
9354
9355                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9356                 }
9357
9358                 if (acrtc_state->stream) {
9359                         if (acrtc_state->freesync_vrr_info_changed)
9360                                 bundle->stream_update.vrr_infopacket =
9361                                         &acrtc_state->stream->vrr_infopacket;
9362                 }
9363         }
9364
9365         /* Update the planes if changed or disable if we don't have any. */
9366         if ((planes_count || acrtc_state->active_planes == 0) &&
9367                 acrtc_state->stream) {
9368 #if defined(CONFIG_DRM_AMD_DC_DCN)
9369                 /*
9370                  * If PSR or idle optimizations are enabled then flush out
9371                  * any pending work before hardware programming.
9372                  */
9373                 if (dm->vblank_control_workqueue)
9374                         flush_workqueue(dm->vblank_control_workqueue);
9375 #endif
9376
9377                 bundle->stream_update.stream = acrtc_state->stream;
9378                 if (new_pcrtc_state->mode_changed) {
9379                         bundle->stream_update.src = acrtc_state->stream->src;
9380                         bundle->stream_update.dst = acrtc_state->stream->dst;
9381                 }
9382
9383                 if (new_pcrtc_state->color_mgmt_changed) {
9384                         /*
9385                          * TODO: This isn't fully correct since we've actually
9386                          * already modified the stream in place.
9387                          */
9388                         bundle->stream_update.gamut_remap =
9389                                 &acrtc_state->stream->gamut_remap_matrix;
9390                         bundle->stream_update.output_csc_transform =
9391                                 &acrtc_state->stream->csc_color_matrix;
9392                         bundle->stream_update.out_transfer_func =
9393                                 acrtc_state->stream->out_transfer_func;
9394                 }
9395
9396                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9397                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9398                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9399
9400                 /*
9401                  * If FreeSync state on the stream has changed then we need to
9402                  * re-adjust the min/max bounds now that DC doesn't handle this
9403                  * as part of commit.
9404                  */
9405                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9406                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9407                         dc_stream_adjust_vmin_vmax(
9408                                 dm->dc, acrtc_state->stream,
9409                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9410                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9411                 }
9412                 mutex_lock(&dm->dc_lock);
9413                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9414                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9415                         amdgpu_dm_psr_disable(acrtc_state->stream);
9416
9417                 dc_commit_updates_for_stream(dm->dc,
9418                                                      bundle->surface_updates,
9419                                                      planes_count,
9420                                                      acrtc_state->stream,
9421                                                      &bundle->stream_update,
9422                                                      dc_state);
9423
9424                 /**
9425                  * Enable or disable the interrupts on the backend.
9426                  *
9427                  * Most pipes are put into power gating when unused.
9428                  *
9429                  * When power gating is enabled on a pipe we lose the
9430                  * interrupt enablement state when power gating is disabled.
9431                  *
9432                  * So we need to update the IRQ control state in hardware
9433                  * whenever the pipe turns on (since it could be previously
9434                  * power gated) or off (since some pipes can't be power gated
9435                  * on some ASICs).
9436                  */
9437                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9438                         dm_update_pflip_irq_state(drm_to_adev(dev),
9439                                                   acrtc_attach);
9440
9441                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9442                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9443                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9444                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9445
9446                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9447                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9448                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9449                         struct amdgpu_dm_connector *aconn =
9450                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9451
9452                         if (aconn->psr_skip_count > 0)
9453                                 aconn->psr_skip_count--;
9454
9455                         /* Allow PSR when skip count is 0. */
9456                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9457                 } else {
9458                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9459                 }
9460
9461                 mutex_unlock(&dm->dc_lock);
9462         }
9463
9464         /*
9465          * Update cursor state *after* programming all the planes.
9466          * This avoids redundant programming in the case where we're going
9467          * to be disabling a single plane - those pipes are being disabled.
9468          */
9469         if (acrtc_state->active_planes)
9470                 amdgpu_dm_commit_cursors(state);
9471
9472 cleanup:
9473         kfree(bundle);
9474 }
9475
9476 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9477                                    struct drm_atomic_state *state)
9478 {
9479         struct amdgpu_device *adev = drm_to_adev(dev);
9480         struct amdgpu_dm_connector *aconnector;
9481         struct drm_connector *connector;
9482         struct drm_connector_state *old_con_state, *new_con_state;
9483         struct drm_crtc_state *new_crtc_state;
9484         struct dm_crtc_state *new_dm_crtc_state;
9485         const struct dc_stream_status *status;
9486         int i, inst;
9487
9488         /* Notify device removals. */
9489         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9490                 if (old_con_state->crtc != new_con_state->crtc) {
9491                         /* CRTC changes require notification. */
9492                         goto notify;
9493                 }
9494
9495                 if (!new_con_state->crtc)
9496                         continue;
9497
9498                 new_crtc_state = drm_atomic_get_new_crtc_state(
9499                         state, new_con_state->crtc);
9500
9501                 if (!new_crtc_state)
9502                         continue;
9503
9504                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9505                         continue;
9506
9507         notify:
9508                 aconnector = to_amdgpu_dm_connector(connector);
9509
9510                 mutex_lock(&adev->dm.audio_lock);
9511                 inst = aconnector->audio_inst;
9512                 aconnector->audio_inst = -1;
9513                 mutex_unlock(&adev->dm.audio_lock);
9514
9515                 amdgpu_dm_audio_eld_notify(adev, inst);
9516         }
9517
9518         /* Notify audio device additions. */
9519         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9520                 if (!new_con_state->crtc)
9521                         continue;
9522
9523                 new_crtc_state = drm_atomic_get_new_crtc_state(
9524                         state, new_con_state->crtc);
9525
9526                 if (!new_crtc_state)
9527                         continue;
9528
9529                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9530                         continue;
9531
9532                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9533                 if (!new_dm_crtc_state->stream)
9534                         continue;
9535
9536                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9537                 if (!status)
9538                         continue;
9539
9540                 aconnector = to_amdgpu_dm_connector(connector);
9541
9542                 mutex_lock(&adev->dm.audio_lock);
9543                 inst = status->audio_inst;
9544                 aconnector->audio_inst = inst;
9545                 mutex_unlock(&adev->dm.audio_lock);
9546
9547                 amdgpu_dm_audio_eld_notify(adev, inst);
9548         }
9549 }
9550
9551 /*
9552  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9553  * @crtc_state: the DRM CRTC state
9554  * @stream_state: the DC stream state.
9555  *
9556  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9557  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9558  */
9559 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9560                                                 struct dc_stream_state *stream_state)
9561 {
9562         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9563 }
9564
9565 /**
9566  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9567  * @state: The atomic state to commit
9568  *
9569  * This will tell DC to commit the constructed DC state from atomic_check,
9570  * programming the hardware. Any failures here implies a hardware failure, since
9571  * atomic check should have filtered anything non-kosher.
9572  */
9573 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9574 {
9575         struct drm_device *dev = state->dev;
9576         struct amdgpu_device *adev = drm_to_adev(dev);
9577         struct amdgpu_display_manager *dm = &adev->dm;
9578         struct dm_atomic_state *dm_state;
9579         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9580         uint32_t i, j;
9581         struct drm_crtc *crtc;
9582         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9583         unsigned long flags;
9584         bool wait_for_vblank = true;
9585         struct drm_connector *connector;
9586         struct drm_connector_state *old_con_state, *new_con_state;
9587         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9588         int crtc_disable_count = 0;
9589         bool mode_set_reset_required = false;
9590
9591         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9592
9593         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9594
9595         dm_state = dm_atomic_get_new_state(state);
9596         if (dm_state && dm_state->context) {
9597                 dc_state = dm_state->context;
9598         } else {
9599                 /* No state changes, retain current state. */
9600                 dc_state_temp = dc_create_state(dm->dc);
9601                 ASSERT(dc_state_temp);
9602                 dc_state = dc_state_temp;
9603                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9604         }
9605
9606         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9607                                        new_crtc_state, i) {
9608                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9609
9610                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9611
9612                 if (old_crtc_state->active &&
9613                     (!new_crtc_state->active ||
9614                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9615                         manage_dm_interrupts(adev, acrtc, false);
9616                         dc_stream_release(dm_old_crtc_state->stream);
9617                 }
9618         }
9619
9620         drm_atomic_helper_calc_timestamping_constants(state);
9621
9622         /* update changed items */
9623         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9624                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9625
9626                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9627                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9628
9629                 DRM_DEBUG_ATOMIC(
9630                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9631                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9632                         "connectors_changed:%d\n",
9633                         acrtc->crtc_id,
9634                         new_crtc_state->enable,
9635                         new_crtc_state->active,
9636                         new_crtc_state->planes_changed,
9637                         new_crtc_state->mode_changed,
9638                         new_crtc_state->active_changed,
9639                         new_crtc_state->connectors_changed);
9640
9641                 /* Disable cursor if disabling crtc */
9642                 if (old_crtc_state->active && !new_crtc_state->active) {
9643                         struct dc_cursor_position position;
9644
9645                         memset(&position, 0, sizeof(position));
9646                         mutex_lock(&dm->dc_lock);
9647                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9648                         mutex_unlock(&dm->dc_lock);
9649                 }
9650
9651                 /* Copy all transient state flags into dc state */
9652                 if (dm_new_crtc_state->stream) {
9653                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9654                                                             dm_new_crtc_state->stream);
9655                 }
9656
9657                 /* handles headless hotplug case, updating new_state and
9658                  * aconnector as needed
9659                  */
9660
9661                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9662
9663                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9664
9665                         if (!dm_new_crtc_state->stream) {
9666                                 /*
9667                                  * this could happen because of issues with
9668                                  * userspace notifications delivery.
9669                                  * In this case userspace tries to set mode on
9670                                  * display which is disconnected in fact.
9671                                  * dc_sink is NULL in this case on aconnector.
9672                                  * We expect reset mode will come soon.
9673                                  *
9674                                  * This can also happen when unplug is done
9675                                  * during resume sequence ended
9676                                  *
9677                                  * In this case, we want to pretend we still
9678                                  * have a sink to keep the pipe running so that
9679                                  * hw state is consistent with the sw state
9680                                  */
9681                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9682                                                 __func__, acrtc->base.base.id);
9683                                 continue;
9684                         }
9685
9686                         if (dm_old_crtc_state->stream)
9687                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9688
9689                         pm_runtime_get_noresume(dev->dev);
9690
9691                         acrtc->enabled = true;
9692                         acrtc->hw_mode = new_crtc_state->mode;
9693                         crtc->hwmode = new_crtc_state->mode;
9694                         mode_set_reset_required = true;
9695                 } else if (modereset_required(new_crtc_state)) {
9696                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9697                         /* i.e. reset mode */
9698                         if (dm_old_crtc_state->stream)
9699                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9700
9701                         mode_set_reset_required = true;
9702                 }
9703         } /* for_each_crtc_in_state() */
9704
9705         if (dc_state) {
9706                 /* if there mode set or reset, disable eDP PSR */
9707                 if (mode_set_reset_required) {
9708 #if defined(CONFIG_DRM_AMD_DC_DCN)
9709                         if (dm->vblank_control_workqueue)
9710                                 flush_workqueue(dm->vblank_control_workqueue);
9711 #endif
9712                         amdgpu_dm_psr_disable_all(dm);
9713                 }
9714
9715                 dm_enable_per_frame_crtc_master_sync(dc_state);
9716                 mutex_lock(&dm->dc_lock);
9717                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9718 #if defined(CONFIG_DRM_AMD_DC_DCN)
9719                /* Allow idle optimization when vblank count is 0 for display off */
9720                if (dm->active_vblank_irq_count == 0)
9721                    dc_allow_idle_optimizations(dm->dc,true);
9722 #endif
9723                 mutex_unlock(&dm->dc_lock);
9724         }
9725
9726         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9727                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9728
9729                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9730
9731                 if (dm_new_crtc_state->stream != NULL) {
9732                         const struct dc_stream_status *status =
9733                                         dc_stream_get_status(dm_new_crtc_state->stream);
9734
9735                         if (!status)
9736                                 status = dc_stream_get_status_from_state(dc_state,
9737                                                                          dm_new_crtc_state->stream);
9738                         if (!status)
9739                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9740                         else
9741                                 acrtc->otg_inst = status->primary_otg_inst;
9742                 }
9743         }
9744 #ifdef CONFIG_DRM_AMD_DC_HDCP
9745         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9746                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9747                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9748                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9749
9750                 new_crtc_state = NULL;
9751
9752                 if (acrtc)
9753                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9754
9755                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9756
9757                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9758                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9759                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9760                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9761                         dm_new_con_state->update_hdcp = true;
9762                         continue;
9763                 }
9764
9765                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9766                         hdcp_update_display(
9767                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9768                                 new_con_state->hdcp_content_type,
9769                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9770         }
9771 #endif
9772
9773         /* Handle connector state changes */
9774         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9775                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9776                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9777                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9778                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9779                 struct dc_stream_update stream_update;
9780                 struct dc_info_packet hdr_packet;
9781                 struct dc_stream_status *status = NULL;
9782                 bool abm_changed, hdr_changed, scaling_changed;
9783
9784                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9785                 memset(&stream_update, 0, sizeof(stream_update));
9786
9787                 if (acrtc) {
9788                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9789                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9790                 }
9791
9792                 /* Skip any modesets/resets */
9793                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9794                         continue;
9795
9796                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9797                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9798
9799                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9800                                                              dm_old_con_state);
9801
9802                 abm_changed = dm_new_crtc_state->abm_level !=
9803                               dm_old_crtc_state->abm_level;
9804
9805                 hdr_changed =
9806                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9807
9808                 if (!scaling_changed && !abm_changed && !hdr_changed)
9809                         continue;
9810
9811                 stream_update.stream = dm_new_crtc_state->stream;
9812                 if (scaling_changed) {
9813                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9814                                         dm_new_con_state, dm_new_crtc_state->stream);
9815
9816                         stream_update.src = dm_new_crtc_state->stream->src;
9817                         stream_update.dst = dm_new_crtc_state->stream->dst;
9818                 }
9819
9820                 if (abm_changed) {
9821                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9822
9823                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9824                 }
9825
9826                 if (hdr_changed) {
9827                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9828                         stream_update.hdr_static_metadata = &hdr_packet;
9829                 }
9830
9831                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9832
9833                 if (WARN_ON(!status))
9834                         continue;
9835
9836                 WARN_ON(!status->plane_count);
9837
9838                 /*
9839                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9840                  * Here we create an empty update on each plane.
9841                  * To fix this, DC should permit updating only stream properties.
9842                  */
9843                 for (j = 0; j < status->plane_count; j++)
9844                         dummy_updates[j].surface = status->plane_states[0];
9845
9846
9847                 mutex_lock(&dm->dc_lock);
9848                 dc_commit_updates_for_stream(dm->dc,
9849                                                      dummy_updates,
9850                                                      status->plane_count,
9851                                                      dm_new_crtc_state->stream,
9852                                                      &stream_update,
9853                                                      dc_state);
9854                 mutex_unlock(&dm->dc_lock);
9855         }
9856
9857         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9858         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9859                                       new_crtc_state, i) {
9860                 if (old_crtc_state->active && !new_crtc_state->active)
9861                         crtc_disable_count++;
9862
9863                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9864                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9865
9866                 /* For freesync config update on crtc state and params for irq */
9867                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9868
9869                 /* Handle vrr on->off / off->on transitions */
9870                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9871                                                 dm_new_crtc_state);
9872         }
9873
9874         /**
9875          * Enable interrupts for CRTCs that are newly enabled or went through
9876          * a modeset. It was intentionally deferred until after the front end
9877          * state was modified to wait until the OTG was on and so the IRQ
9878          * handlers didn't access stale or invalid state.
9879          */
9880         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9881                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9882 #ifdef CONFIG_DEBUG_FS
9883                 bool configure_crc = false;
9884                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9885 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9886                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9887 #endif
9888                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9889                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9890                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9891 #endif
9892                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9893
9894                 if (new_crtc_state->active &&
9895                     (!old_crtc_state->active ||
9896                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9897                         dc_stream_retain(dm_new_crtc_state->stream);
9898                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9899                         manage_dm_interrupts(adev, acrtc, true);
9900
9901 #ifdef CONFIG_DEBUG_FS
9902                         /**
9903                          * Frontend may have changed so reapply the CRC capture
9904                          * settings for the stream.
9905                          */
9906                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9907
9908                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9909                                 configure_crc = true;
9910 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9911                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9912                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9913                                         acrtc->dm_irq_params.crc_window.update_win = true;
9914                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9915                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9916                                         crc_rd_wrk->crtc = crtc;
9917                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9918                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9919                                 }
9920 #endif
9921                         }
9922
9923                         if (configure_crc)
9924                                 if (amdgpu_dm_crtc_configure_crc_source(
9925                                         crtc, dm_new_crtc_state, cur_crc_src))
9926                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9927 #endif
9928                 }
9929         }
9930
9931         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9932                 if (new_crtc_state->async_flip)
9933                         wait_for_vblank = false;
9934
9935         /* update planes when needed per crtc*/
9936         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9937                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9938
9939                 if (dm_new_crtc_state->stream)
9940                         amdgpu_dm_commit_planes(state, dc_state, dev,
9941                                                 dm, crtc, wait_for_vblank);
9942         }
9943
9944         /* Update audio instances for each connector. */
9945         amdgpu_dm_commit_audio(dev, state);
9946
9947 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9948         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9949         /* restore the backlight level */
9950         for (i = 0; i < dm->num_of_edps; i++) {
9951                 if (dm->backlight_dev[i] &&
9952                     (dm->actual_brightness[i] != dm->brightness[i]))
9953                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9954         }
9955 #endif
9956         /*
9957          * send vblank event on all events not handled in flip and
9958          * mark consumed event for drm_atomic_helper_commit_hw_done
9959          */
9960         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9961         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9962
9963                 if (new_crtc_state->event)
9964                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9965
9966                 new_crtc_state->event = NULL;
9967         }
9968         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9969
9970         /* Signal HW programming completion */
9971         drm_atomic_helper_commit_hw_done(state);
9972
9973         if (wait_for_vblank)
9974                 drm_atomic_helper_wait_for_flip_done(dev, state);
9975
9976         drm_atomic_helper_cleanup_planes(dev, state);
9977
9978         /* return the stolen vga memory back to VRAM */
9979         if (!adev->mman.keep_stolen_vga_memory)
9980                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9981         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9982
9983         /*
9984          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9985          * so we can put the GPU into runtime suspend if we're not driving any
9986          * displays anymore
9987          */
9988         for (i = 0; i < crtc_disable_count; i++)
9989                 pm_runtime_put_autosuspend(dev->dev);
9990         pm_runtime_mark_last_busy(dev->dev);
9991
9992         if (dc_state_temp)
9993                 dc_release_state(dc_state_temp);
9994 }
9995
9996
9997 static int dm_force_atomic_commit(struct drm_connector *connector)
9998 {
9999         int ret = 0;
10000         struct drm_device *ddev = connector->dev;
10001         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10002         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10003         struct drm_plane *plane = disconnected_acrtc->base.primary;
10004         struct drm_connector_state *conn_state;
10005         struct drm_crtc_state *crtc_state;
10006         struct drm_plane_state *plane_state;
10007
10008         if (!state)
10009                 return -ENOMEM;
10010
10011         state->acquire_ctx = ddev->mode_config.acquire_ctx;
10012
10013         /* Construct an atomic state to restore previous display setting */
10014
10015         /*
10016          * Attach connectors to drm_atomic_state
10017          */
10018         conn_state = drm_atomic_get_connector_state(state, connector);
10019
10020         ret = PTR_ERR_OR_ZERO(conn_state);
10021         if (ret)
10022                 goto out;
10023
10024         /* Attach crtc to drm_atomic_state*/
10025         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10026
10027         ret = PTR_ERR_OR_ZERO(crtc_state);
10028         if (ret)
10029                 goto out;
10030
10031         /* force a restore */
10032         crtc_state->mode_changed = true;
10033
10034         /* Attach plane to drm_atomic_state */
10035         plane_state = drm_atomic_get_plane_state(state, plane);
10036
10037         ret = PTR_ERR_OR_ZERO(plane_state);
10038         if (ret)
10039                 goto out;
10040
10041         /* Call commit internally with the state we just constructed */
10042         ret = drm_atomic_commit(state);
10043
10044 out:
10045         drm_atomic_state_put(state);
10046         if (ret)
10047                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10048
10049         return ret;
10050 }
10051
10052 /*
10053  * This function handles all cases when set mode does not come upon hotplug.
10054  * This includes when a display is unplugged then plugged back into the
10055  * same port and when running without usermode desktop manager supprot
10056  */
10057 void dm_restore_drm_connector_state(struct drm_device *dev,
10058                                     struct drm_connector *connector)
10059 {
10060         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10061         struct amdgpu_crtc *disconnected_acrtc;
10062         struct dm_crtc_state *acrtc_state;
10063
10064         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10065                 return;
10066
10067         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10068         if (!disconnected_acrtc)
10069                 return;
10070
10071         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10072         if (!acrtc_state->stream)
10073                 return;
10074
10075         /*
10076          * If the previous sink is not released and different from the current,
10077          * we deduce we are in a state where we can not rely on usermode call
10078          * to turn on the display, so we do it here
10079          */
10080         if (acrtc_state->stream->sink != aconnector->dc_sink)
10081                 dm_force_atomic_commit(&aconnector->base);
10082 }
10083
10084 /*
10085  * Grabs all modesetting locks to serialize against any blocking commits,
10086  * Waits for completion of all non blocking commits.
10087  */
10088 static int do_aquire_global_lock(struct drm_device *dev,
10089                                  struct drm_atomic_state *state)
10090 {
10091         struct drm_crtc *crtc;
10092         struct drm_crtc_commit *commit;
10093         long ret;
10094
10095         /*
10096          * Adding all modeset locks to aquire_ctx will
10097          * ensure that when the framework release it the
10098          * extra locks we are locking here will get released to
10099          */
10100         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10101         if (ret)
10102                 return ret;
10103
10104         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10105                 spin_lock(&crtc->commit_lock);
10106                 commit = list_first_entry_or_null(&crtc->commit_list,
10107                                 struct drm_crtc_commit, commit_entry);
10108                 if (commit)
10109                         drm_crtc_commit_get(commit);
10110                 spin_unlock(&crtc->commit_lock);
10111
10112                 if (!commit)
10113                         continue;
10114
10115                 /*
10116                  * Make sure all pending HW programming completed and
10117                  * page flips done
10118                  */
10119                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10120
10121                 if (ret > 0)
10122                         ret = wait_for_completion_interruptible_timeout(
10123                                         &commit->flip_done, 10*HZ);
10124
10125                 if (ret == 0)
10126                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10127                                   "timed out\n", crtc->base.id, crtc->name);
10128
10129                 drm_crtc_commit_put(commit);
10130         }
10131
10132         return ret < 0 ? ret : 0;
10133 }
10134
10135 static void get_freesync_config_for_crtc(
10136         struct dm_crtc_state *new_crtc_state,
10137         struct dm_connector_state *new_con_state)
10138 {
10139         struct mod_freesync_config config = {0};
10140         struct amdgpu_dm_connector *aconnector =
10141                         to_amdgpu_dm_connector(new_con_state->base.connector);
10142         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10143         int vrefresh = drm_mode_vrefresh(mode);
10144         bool fs_vid_mode = false;
10145
10146         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10147                                         vrefresh >= aconnector->min_vfreq &&
10148                                         vrefresh <= aconnector->max_vfreq;
10149
10150         if (new_crtc_state->vrr_supported) {
10151                 new_crtc_state->stream->ignore_msa_timing_param = true;
10152                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10153
10154                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10155                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10156                 config.vsif_supported = true;
10157                 config.btr = true;
10158
10159                 if (fs_vid_mode) {
10160                         config.state = VRR_STATE_ACTIVE_FIXED;
10161                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10162                         goto out;
10163                 } else if (new_crtc_state->base.vrr_enabled) {
10164                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10165                 } else {
10166                         config.state = VRR_STATE_INACTIVE;
10167                 }
10168         }
10169 out:
10170         new_crtc_state->freesync_config = config;
10171 }
10172
10173 static void reset_freesync_config_for_crtc(
10174         struct dm_crtc_state *new_crtc_state)
10175 {
10176         new_crtc_state->vrr_supported = false;
10177
10178         memset(&new_crtc_state->vrr_infopacket, 0,
10179                sizeof(new_crtc_state->vrr_infopacket));
10180 }
10181
10182 static bool
10183 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10184                                  struct drm_crtc_state *new_crtc_state)
10185 {
10186         const struct drm_display_mode *old_mode, *new_mode;
10187
10188         if (!old_crtc_state || !new_crtc_state)
10189                 return false;
10190
10191         old_mode = &old_crtc_state->mode;
10192         new_mode = &new_crtc_state->mode;
10193
10194         if (old_mode->clock       == new_mode->clock &&
10195             old_mode->hdisplay    == new_mode->hdisplay &&
10196             old_mode->vdisplay    == new_mode->vdisplay &&
10197             old_mode->htotal      == new_mode->htotal &&
10198             old_mode->vtotal      != new_mode->vtotal &&
10199             old_mode->hsync_start == new_mode->hsync_start &&
10200             old_mode->vsync_start != new_mode->vsync_start &&
10201             old_mode->hsync_end   == new_mode->hsync_end &&
10202             old_mode->vsync_end   != new_mode->vsync_end &&
10203             old_mode->hskew       == new_mode->hskew &&
10204             old_mode->vscan       == new_mode->vscan &&
10205             (old_mode->vsync_end - old_mode->vsync_start) ==
10206             (new_mode->vsync_end - new_mode->vsync_start))
10207                 return true;
10208
10209         return false;
10210 }
10211
10212 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10213         uint64_t num, den, res;
10214         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10215
10216         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10217
10218         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10219         den = (unsigned long long)new_crtc_state->mode.htotal *
10220               (unsigned long long)new_crtc_state->mode.vtotal;
10221
10222         res = div_u64(num, den);
10223         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10224 }
10225
10226 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10227                          struct drm_atomic_state *state,
10228                          struct drm_crtc *crtc,
10229                          struct drm_crtc_state *old_crtc_state,
10230                          struct drm_crtc_state *new_crtc_state,
10231                          bool enable,
10232                          bool *lock_and_validation_needed)
10233 {
10234         struct dm_atomic_state *dm_state = NULL;
10235         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10236         struct dc_stream_state *new_stream;
10237         int ret = 0;
10238
10239         /*
10240          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10241          * update changed items
10242          */
10243         struct amdgpu_crtc *acrtc = NULL;
10244         struct amdgpu_dm_connector *aconnector = NULL;
10245         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10246         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10247
10248         new_stream = NULL;
10249
10250         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10251         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10252         acrtc = to_amdgpu_crtc(crtc);
10253         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10254
10255         /* TODO This hack should go away */
10256         if (aconnector && enable) {
10257                 /* Make sure fake sink is created in plug-in scenario */
10258                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10259                                                             &aconnector->base);
10260                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10261                                                             &aconnector->base);
10262
10263                 if (IS_ERR(drm_new_conn_state)) {
10264                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10265                         goto fail;
10266                 }
10267
10268                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10269                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10270
10271                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10272                         goto skip_modeset;
10273
10274                 new_stream = create_validate_stream_for_sink(aconnector,
10275                                                              &new_crtc_state->mode,
10276                                                              dm_new_conn_state,
10277                                                              dm_old_crtc_state->stream);
10278
10279                 /*
10280                  * we can have no stream on ACTION_SET if a display
10281                  * was disconnected during S3, in this case it is not an
10282                  * error, the OS will be updated after detection, and
10283                  * will do the right thing on next atomic commit
10284                  */
10285
10286                 if (!new_stream) {
10287                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10288                                         __func__, acrtc->base.base.id);
10289                         ret = -ENOMEM;
10290                         goto fail;
10291                 }
10292
10293                 /*
10294                  * TODO: Check VSDB bits to decide whether this should
10295                  * be enabled or not.
10296                  */
10297                 new_stream->triggered_crtc_reset.enabled =
10298                         dm->force_timing_sync;
10299
10300                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10301
10302                 ret = fill_hdr_info_packet(drm_new_conn_state,
10303                                            &new_stream->hdr_static_metadata);
10304                 if (ret)
10305                         goto fail;
10306
10307                 /*
10308                  * If we already removed the old stream from the context
10309                  * (and set the new stream to NULL) then we can't reuse
10310                  * the old stream even if the stream and scaling are unchanged.
10311                  * We'll hit the BUG_ON and black screen.
10312                  *
10313                  * TODO: Refactor this function to allow this check to work
10314                  * in all conditions.
10315                  */
10316                 if (dm_new_crtc_state->stream &&
10317                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10318                         goto skip_modeset;
10319
10320                 if (dm_new_crtc_state->stream &&
10321                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10322                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10323                         new_crtc_state->mode_changed = false;
10324                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10325                                          new_crtc_state->mode_changed);
10326                 }
10327         }
10328
10329         /* mode_changed flag may get updated above, need to check again */
10330         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10331                 goto skip_modeset;
10332
10333         DRM_DEBUG_ATOMIC(
10334                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10335                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10336                 "connectors_changed:%d\n",
10337                 acrtc->crtc_id,
10338                 new_crtc_state->enable,
10339                 new_crtc_state->active,
10340                 new_crtc_state->planes_changed,
10341                 new_crtc_state->mode_changed,
10342                 new_crtc_state->active_changed,
10343                 new_crtc_state->connectors_changed);
10344
10345         /* Remove stream for any changed/disabled CRTC */
10346         if (!enable) {
10347
10348                 if (!dm_old_crtc_state->stream)
10349                         goto skip_modeset;
10350
10351                 if (dm_new_crtc_state->stream &&
10352                     is_timing_unchanged_for_freesync(new_crtc_state,
10353                                                      old_crtc_state)) {
10354                         new_crtc_state->mode_changed = false;
10355                         DRM_DEBUG_DRIVER(
10356                                 "Mode change not required for front porch change, "
10357                                 "setting mode_changed to %d",
10358                                 new_crtc_state->mode_changed);
10359
10360                         set_freesync_fixed_config(dm_new_crtc_state);
10361
10362                         goto skip_modeset;
10363                 } else if (aconnector &&
10364                            is_freesync_video_mode(&new_crtc_state->mode,
10365                                                   aconnector)) {
10366                         struct drm_display_mode *high_mode;
10367
10368                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10369                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10370                                 set_freesync_fixed_config(dm_new_crtc_state);
10371                         }
10372                 }
10373
10374                 ret = dm_atomic_get_state(state, &dm_state);
10375                 if (ret)
10376                         goto fail;
10377
10378                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10379                                 crtc->base.id);
10380
10381                 /* i.e. reset mode */
10382                 if (dc_remove_stream_from_ctx(
10383                                 dm->dc,
10384                                 dm_state->context,
10385                                 dm_old_crtc_state->stream) != DC_OK) {
10386                         ret = -EINVAL;
10387                         goto fail;
10388                 }
10389
10390                 dc_stream_release(dm_old_crtc_state->stream);
10391                 dm_new_crtc_state->stream = NULL;
10392
10393                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10394
10395                 *lock_and_validation_needed = true;
10396
10397         } else {/* Add stream for any updated/enabled CRTC */
10398                 /*
10399                  * Quick fix to prevent NULL pointer on new_stream when
10400                  * added MST connectors not found in existing crtc_state in the chained mode
10401                  * TODO: need to dig out the root cause of that
10402                  */
10403                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10404                         goto skip_modeset;
10405
10406                 if (modereset_required(new_crtc_state))
10407                         goto skip_modeset;
10408
10409                 if (modeset_required(new_crtc_state, new_stream,
10410                                      dm_old_crtc_state->stream)) {
10411
10412                         WARN_ON(dm_new_crtc_state->stream);
10413
10414                         ret = dm_atomic_get_state(state, &dm_state);
10415                         if (ret)
10416                                 goto fail;
10417
10418                         dm_new_crtc_state->stream = new_stream;
10419
10420                         dc_stream_retain(new_stream);
10421
10422                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10423                                          crtc->base.id);
10424
10425                         if (dc_add_stream_to_ctx(
10426                                         dm->dc,
10427                                         dm_state->context,
10428                                         dm_new_crtc_state->stream) != DC_OK) {
10429                                 ret = -EINVAL;
10430                                 goto fail;
10431                         }
10432
10433                         *lock_and_validation_needed = true;
10434                 }
10435         }
10436
10437 skip_modeset:
10438         /* Release extra reference */
10439         if (new_stream)
10440                  dc_stream_release(new_stream);
10441
10442         /*
10443          * We want to do dc stream updates that do not require a
10444          * full modeset below.
10445          */
10446         if (!(enable && aconnector && new_crtc_state->active))
10447                 return 0;
10448         /*
10449          * Given above conditions, the dc state cannot be NULL because:
10450          * 1. We're in the process of enabling CRTCs (just been added
10451          *    to the dc context, or already is on the context)
10452          * 2. Has a valid connector attached, and
10453          * 3. Is currently active and enabled.
10454          * => The dc stream state currently exists.
10455          */
10456         BUG_ON(dm_new_crtc_state->stream == NULL);
10457
10458         /* Scaling or underscan settings */
10459         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10460                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10461                 update_stream_scaling_settings(
10462                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10463
10464         /* ABM settings */
10465         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10466
10467         /*
10468          * Color management settings. We also update color properties
10469          * when a modeset is needed, to ensure it gets reprogrammed.
10470          */
10471         if (dm_new_crtc_state->base.color_mgmt_changed ||
10472             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10473                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10474                 if (ret)
10475                         goto fail;
10476         }
10477
10478         /* Update Freesync settings. */
10479         get_freesync_config_for_crtc(dm_new_crtc_state,
10480                                      dm_new_conn_state);
10481
10482         return ret;
10483
10484 fail:
10485         if (new_stream)
10486                 dc_stream_release(new_stream);
10487         return ret;
10488 }
10489
10490 static bool should_reset_plane(struct drm_atomic_state *state,
10491                                struct drm_plane *plane,
10492                                struct drm_plane_state *old_plane_state,
10493                                struct drm_plane_state *new_plane_state)
10494 {
10495         struct drm_plane *other;
10496         struct drm_plane_state *old_other_state, *new_other_state;
10497         struct drm_crtc_state *new_crtc_state;
10498         int i;
10499
10500         /*
10501          * TODO: Remove this hack once the checks below are sufficient
10502          * enough to determine when we need to reset all the planes on
10503          * the stream.
10504          */
10505         if (state->allow_modeset)
10506                 return true;
10507
10508         /* Exit early if we know that we're adding or removing the plane. */
10509         if (old_plane_state->crtc != new_plane_state->crtc)
10510                 return true;
10511
10512         /* old crtc == new_crtc == NULL, plane not in context. */
10513         if (!new_plane_state->crtc)
10514                 return false;
10515
10516         new_crtc_state =
10517                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10518
10519         if (!new_crtc_state)
10520                 return true;
10521
10522         /* CRTC Degamma changes currently require us to recreate planes. */
10523         if (new_crtc_state->color_mgmt_changed)
10524                 return true;
10525
10526         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10527                 return true;
10528
10529         /*
10530          * If there are any new primary or overlay planes being added or
10531          * removed then the z-order can potentially change. To ensure
10532          * correct z-order and pipe acquisition the current DC architecture
10533          * requires us to remove and recreate all existing planes.
10534          *
10535          * TODO: Come up with a more elegant solution for this.
10536          */
10537         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10538                 struct amdgpu_framebuffer *old_afb, *new_afb;
10539                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10540                         continue;
10541
10542                 if (old_other_state->crtc != new_plane_state->crtc &&
10543                     new_other_state->crtc != new_plane_state->crtc)
10544                         continue;
10545
10546                 if (old_other_state->crtc != new_other_state->crtc)
10547                         return true;
10548
10549                 /* Src/dst size and scaling updates. */
10550                 if (old_other_state->src_w != new_other_state->src_w ||
10551                     old_other_state->src_h != new_other_state->src_h ||
10552                     old_other_state->crtc_w != new_other_state->crtc_w ||
10553                     old_other_state->crtc_h != new_other_state->crtc_h)
10554                         return true;
10555
10556                 /* Rotation / mirroring updates. */
10557                 if (old_other_state->rotation != new_other_state->rotation)
10558                         return true;
10559
10560                 /* Blending updates. */
10561                 if (old_other_state->pixel_blend_mode !=
10562                     new_other_state->pixel_blend_mode)
10563                         return true;
10564
10565                 /* Alpha updates. */
10566                 if (old_other_state->alpha != new_other_state->alpha)
10567                         return true;
10568
10569                 /* Colorspace changes. */
10570                 if (old_other_state->color_range != new_other_state->color_range ||
10571                     old_other_state->color_encoding != new_other_state->color_encoding)
10572                         return true;
10573
10574                 /* Framebuffer checks fall at the end. */
10575                 if (!old_other_state->fb || !new_other_state->fb)
10576                         continue;
10577
10578                 /* Pixel format changes can require bandwidth updates. */
10579                 if (old_other_state->fb->format != new_other_state->fb->format)
10580                         return true;
10581
10582                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10583                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10584
10585                 /* Tiling and DCC changes also require bandwidth updates. */
10586                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10587                     old_afb->base.modifier != new_afb->base.modifier)
10588                         return true;
10589         }
10590
10591         return false;
10592 }
10593
10594 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10595                               struct drm_plane_state *new_plane_state,
10596                               struct drm_framebuffer *fb)
10597 {
10598         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10599         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10600         unsigned int pitch;
10601         bool linear;
10602
10603         if (fb->width > new_acrtc->max_cursor_width ||
10604             fb->height > new_acrtc->max_cursor_height) {
10605                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10606                                  new_plane_state->fb->width,
10607                                  new_plane_state->fb->height);
10608                 return -EINVAL;
10609         }
10610         if (new_plane_state->src_w != fb->width << 16 ||
10611             new_plane_state->src_h != fb->height << 16) {
10612                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10613                 return -EINVAL;
10614         }
10615
10616         /* Pitch in pixels */
10617         pitch = fb->pitches[0] / fb->format->cpp[0];
10618
10619         if (fb->width != pitch) {
10620                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10621                                  fb->width, pitch);
10622                 return -EINVAL;
10623         }
10624
10625         switch (pitch) {
10626         case 64:
10627         case 128:
10628         case 256:
10629                 /* FB pitch is supported by cursor plane */
10630                 break;
10631         default:
10632                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10633                 return -EINVAL;
10634         }
10635
10636         /* Core DRM takes care of checking FB modifiers, so we only need to
10637          * check tiling flags when the FB doesn't have a modifier. */
10638         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10639                 if (adev->family < AMDGPU_FAMILY_AI) {
10640                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10641                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10642                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10643                 } else {
10644                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10645                 }
10646                 if (!linear) {
10647                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10648                         return -EINVAL;
10649                 }
10650         }
10651
10652         return 0;
10653 }
10654
10655 static int dm_update_plane_state(struct dc *dc,
10656                                  struct drm_atomic_state *state,
10657                                  struct drm_plane *plane,
10658                                  struct drm_plane_state *old_plane_state,
10659                                  struct drm_plane_state *new_plane_state,
10660                                  bool enable,
10661                                  bool *lock_and_validation_needed)
10662 {
10663
10664         struct dm_atomic_state *dm_state = NULL;
10665         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10666         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10667         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10668         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10669         struct amdgpu_crtc *new_acrtc;
10670         bool needs_reset;
10671         int ret = 0;
10672
10673
10674         new_plane_crtc = new_plane_state->crtc;
10675         old_plane_crtc = old_plane_state->crtc;
10676         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10677         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10678
10679         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10680                 if (!enable || !new_plane_crtc ||
10681                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10682                         return 0;
10683
10684                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10685
10686                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10687                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10688                         return -EINVAL;
10689                 }
10690
10691                 if (new_plane_state->fb) {
10692                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10693                                                  new_plane_state->fb);
10694                         if (ret)
10695                                 return ret;
10696                 }
10697
10698                 return 0;
10699         }
10700
10701         needs_reset = should_reset_plane(state, plane, old_plane_state,
10702                                          new_plane_state);
10703
10704         /* Remove any changed/removed planes */
10705         if (!enable) {
10706                 if (!needs_reset)
10707                         return 0;
10708
10709                 if (!old_plane_crtc)
10710                         return 0;
10711
10712                 old_crtc_state = drm_atomic_get_old_crtc_state(
10713                                 state, old_plane_crtc);
10714                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10715
10716                 if (!dm_old_crtc_state->stream)
10717                         return 0;
10718
10719                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10720                                 plane->base.id, old_plane_crtc->base.id);
10721
10722                 ret = dm_atomic_get_state(state, &dm_state);
10723                 if (ret)
10724                         return ret;
10725
10726                 if (!dc_remove_plane_from_context(
10727                                 dc,
10728                                 dm_old_crtc_state->stream,
10729                                 dm_old_plane_state->dc_state,
10730                                 dm_state->context)) {
10731
10732                         return -EINVAL;
10733                 }
10734
10735
10736                 dc_plane_state_release(dm_old_plane_state->dc_state);
10737                 dm_new_plane_state->dc_state = NULL;
10738
10739                 *lock_and_validation_needed = true;
10740
10741         } else { /* Add new planes */
10742                 struct dc_plane_state *dc_new_plane_state;
10743
10744                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10745                         return 0;
10746
10747                 if (!new_plane_crtc)
10748                         return 0;
10749
10750                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10751                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10752
10753                 if (!dm_new_crtc_state->stream)
10754                         return 0;
10755
10756                 if (!needs_reset)
10757                         return 0;
10758
10759                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10760                 if (ret)
10761                         return ret;
10762
10763                 WARN_ON(dm_new_plane_state->dc_state);
10764
10765                 dc_new_plane_state = dc_create_plane_state(dc);
10766                 if (!dc_new_plane_state)
10767                         return -ENOMEM;
10768
10769                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10770                                  plane->base.id, new_plane_crtc->base.id);
10771
10772                 ret = fill_dc_plane_attributes(
10773                         drm_to_adev(new_plane_crtc->dev),
10774                         dc_new_plane_state,
10775                         new_plane_state,
10776                         new_crtc_state);
10777                 if (ret) {
10778                         dc_plane_state_release(dc_new_plane_state);
10779                         return ret;
10780                 }
10781
10782                 ret = dm_atomic_get_state(state, &dm_state);
10783                 if (ret) {
10784                         dc_plane_state_release(dc_new_plane_state);
10785                         return ret;
10786                 }
10787
10788                 /*
10789                  * Any atomic check errors that occur after this will
10790                  * not need a release. The plane state will be attached
10791                  * to the stream, and therefore part of the atomic
10792                  * state. It'll be released when the atomic state is
10793                  * cleaned.
10794                  */
10795                 if (!dc_add_plane_to_context(
10796                                 dc,
10797                                 dm_new_crtc_state->stream,
10798                                 dc_new_plane_state,
10799                                 dm_state->context)) {
10800
10801                         dc_plane_state_release(dc_new_plane_state);
10802                         return -EINVAL;
10803                 }
10804
10805                 dm_new_plane_state->dc_state = dc_new_plane_state;
10806
10807                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10808
10809                 /* Tell DC to do a full surface update every time there
10810                  * is a plane change. Inefficient, but works for now.
10811                  */
10812                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10813
10814                 *lock_and_validation_needed = true;
10815         }
10816
10817
10818         return ret;
10819 }
10820
10821 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10822                                        int *src_w, int *src_h)
10823 {
10824         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10825         case DRM_MODE_ROTATE_90:
10826         case DRM_MODE_ROTATE_270:
10827                 *src_w = plane_state->src_h >> 16;
10828                 *src_h = plane_state->src_w >> 16;
10829                 break;
10830         case DRM_MODE_ROTATE_0:
10831         case DRM_MODE_ROTATE_180:
10832         default:
10833                 *src_w = plane_state->src_w >> 16;
10834                 *src_h = plane_state->src_h >> 16;
10835                 break;
10836         }
10837 }
10838
10839 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10840                                 struct drm_crtc *crtc,
10841                                 struct drm_crtc_state *new_crtc_state)
10842 {
10843         struct drm_plane *cursor = crtc->cursor, *underlying;
10844         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10845         int i;
10846         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10847         int cursor_src_w, cursor_src_h;
10848         int underlying_src_w, underlying_src_h;
10849
10850         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10851          * cursor per pipe but it's going to inherit the scaling and
10852          * positioning from the underlying pipe. Check the cursor plane's
10853          * blending properties match the underlying planes'. */
10854
10855         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10856         if (!new_cursor_state || !new_cursor_state->fb) {
10857                 return 0;
10858         }
10859
10860         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10861         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10862         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10863
10864         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10865                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10866                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10867                         continue;
10868
10869                 /* Ignore disabled planes */
10870                 if (!new_underlying_state->fb)
10871                         continue;
10872
10873                 dm_get_oriented_plane_size(new_underlying_state,
10874                                            &underlying_src_w, &underlying_src_h);
10875                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10876                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10877
10878                 if (cursor_scale_w != underlying_scale_w ||
10879                     cursor_scale_h != underlying_scale_h) {
10880                         drm_dbg_atomic(crtc->dev,
10881                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10882                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10883                         return -EINVAL;
10884                 }
10885
10886                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10887                 if (new_underlying_state->crtc_x <= 0 &&
10888                     new_underlying_state->crtc_y <= 0 &&
10889                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10890                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10891                         break;
10892         }
10893
10894         return 0;
10895 }
10896
10897 #if defined(CONFIG_DRM_AMD_DC_DCN)
10898 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10899 {
10900         struct drm_connector *connector;
10901         struct drm_connector_state *conn_state, *old_conn_state;
10902         struct amdgpu_dm_connector *aconnector = NULL;
10903         int i;
10904         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10905                 if (!conn_state->crtc)
10906                         conn_state = old_conn_state;
10907
10908                 if (conn_state->crtc != crtc)
10909                         continue;
10910
10911                 aconnector = to_amdgpu_dm_connector(connector);
10912                 if (!aconnector->port || !aconnector->mst_port)
10913                         aconnector = NULL;
10914                 else
10915                         break;
10916         }
10917
10918         if (!aconnector)
10919                 return 0;
10920
10921         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10922 }
10923 #endif
10924
10925 /**
10926  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10927  * @dev: The DRM device
10928  * @state: The atomic state to commit
10929  *
10930  * Validate that the given atomic state is programmable by DC into hardware.
10931  * This involves constructing a &struct dc_state reflecting the new hardware
10932  * state we wish to commit, then querying DC to see if it is programmable. It's
10933  * important not to modify the existing DC state. Otherwise, atomic_check
10934  * may unexpectedly commit hardware changes.
10935  *
10936  * When validating the DC state, it's important that the right locks are
10937  * acquired. For full updates case which removes/adds/updates streams on one
10938  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10939  * that any such full update commit will wait for completion of any outstanding
10940  * flip using DRMs synchronization events.
10941  *
10942  * Note that DM adds the affected connectors for all CRTCs in state, when that
10943  * might not seem necessary. This is because DC stream creation requires the
10944  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10945  * be possible but non-trivial - a possible TODO item.
10946  *
10947  * Return: -Error code if validation failed.
10948  */
10949 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10950                                   struct drm_atomic_state *state)
10951 {
10952         struct amdgpu_device *adev = drm_to_adev(dev);
10953         struct dm_atomic_state *dm_state = NULL;
10954         struct dc *dc = adev->dm.dc;
10955         struct drm_connector *connector;
10956         struct drm_connector_state *old_con_state, *new_con_state;
10957         struct drm_crtc *crtc;
10958         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10959         struct drm_plane *plane;
10960         struct drm_plane_state *old_plane_state, *new_plane_state;
10961         enum dc_status status;
10962         int ret, i;
10963         bool lock_and_validation_needed = false;
10964         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10965 #if defined(CONFIG_DRM_AMD_DC_DCN)
10966         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10967         struct drm_dp_mst_topology_state *mst_state;
10968         struct drm_dp_mst_topology_mgr *mgr;
10969 #endif
10970
10971         trace_amdgpu_dm_atomic_check_begin(state);
10972
10973         ret = drm_atomic_helper_check_modeset(dev, state);
10974         if (ret) {
10975                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10976                 goto fail;
10977         }
10978
10979         /* Check connector changes */
10980         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10981                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10982                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10983
10984                 /* Skip connectors that are disabled or part of modeset already. */
10985                 if (!old_con_state->crtc && !new_con_state->crtc)
10986                         continue;
10987
10988                 if (!new_con_state->crtc)
10989                         continue;
10990
10991                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10992                 if (IS_ERR(new_crtc_state)) {
10993                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10994                         ret = PTR_ERR(new_crtc_state);
10995                         goto fail;
10996                 }
10997
10998                 if (dm_old_con_state->abm_level !=
10999                     dm_new_con_state->abm_level)
11000                         new_crtc_state->connectors_changed = true;
11001         }
11002
11003 #if defined(CONFIG_DRM_AMD_DC_DCN)
11004         if (dc_resource_is_dsc_encoding_supported(dc)) {
11005                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11006                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11007                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
11008                                 if (ret) {
11009                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11010                                         goto fail;
11011                                 }
11012                         }
11013                 }
11014                 pre_validate_dsc(state, &dm_state, vars);
11015         }
11016 #endif
11017         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11018                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11019
11020                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11021                     !new_crtc_state->color_mgmt_changed &&
11022                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11023                         dm_old_crtc_state->dsc_force_changed == false)
11024                         continue;
11025
11026                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11027                 if (ret) {
11028                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11029                         goto fail;
11030                 }
11031
11032                 if (!new_crtc_state->enable)
11033                         continue;
11034
11035                 ret = drm_atomic_add_affected_connectors(state, crtc);
11036                 if (ret) {
11037                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11038                         goto fail;
11039                 }
11040
11041                 ret = drm_atomic_add_affected_planes(state, crtc);
11042                 if (ret) {
11043                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11044                         goto fail;
11045                 }
11046
11047                 if (dm_old_crtc_state->dsc_force_changed)
11048                         new_crtc_state->mode_changed = true;
11049         }
11050
11051         /*
11052          * Add all primary and overlay planes on the CRTC to the state
11053          * whenever a plane is enabled to maintain correct z-ordering
11054          * and to enable fast surface updates.
11055          */
11056         drm_for_each_crtc(crtc, dev) {
11057                 bool modified = false;
11058
11059                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11060                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11061                                 continue;
11062
11063                         if (new_plane_state->crtc == crtc ||
11064                             old_plane_state->crtc == crtc) {
11065                                 modified = true;
11066                                 break;
11067                         }
11068                 }
11069
11070                 if (!modified)
11071                         continue;
11072
11073                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11074                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11075                                 continue;
11076
11077                         new_plane_state =
11078                                 drm_atomic_get_plane_state(state, plane);
11079
11080                         if (IS_ERR(new_plane_state)) {
11081                                 ret = PTR_ERR(new_plane_state);
11082                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11083                                 goto fail;
11084                         }
11085                 }
11086         }
11087
11088         /* Remove exiting planes if they are modified */
11089         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11090                 ret = dm_update_plane_state(dc, state, plane,
11091                                             old_plane_state,
11092                                             new_plane_state,
11093                                             false,
11094                                             &lock_and_validation_needed);
11095                 if (ret) {
11096                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11097                         goto fail;
11098                 }
11099         }
11100
11101         /* Disable all crtcs which require disable */
11102         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11103                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11104                                            old_crtc_state,
11105                                            new_crtc_state,
11106                                            false,
11107                                            &lock_and_validation_needed);
11108                 if (ret) {
11109                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11110                         goto fail;
11111                 }
11112         }
11113
11114         /* Enable all crtcs which require enable */
11115         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11116                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11117                                            old_crtc_state,
11118                                            new_crtc_state,
11119                                            true,
11120                                            &lock_and_validation_needed);
11121                 if (ret) {
11122                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11123                         goto fail;
11124                 }
11125         }
11126
11127         /* Add new/modified planes */
11128         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11129                 ret = dm_update_plane_state(dc, state, plane,
11130                                             old_plane_state,
11131                                             new_plane_state,
11132                                             true,
11133                                             &lock_and_validation_needed);
11134                 if (ret) {
11135                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11136                         goto fail;
11137                 }
11138         }
11139
11140         /* Run this here since we want to validate the streams we created */
11141         ret = drm_atomic_helper_check_planes(dev, state);
11142         if (ret) {
11143                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11144                 goto fail;
11145         }
11146
11147         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11148                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11149                 if (dm_new_crtc_state->mpo_requested)
11150                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11151         }
11152
11153         /* Check cursor planes scaling */
11154         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11155                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11156                 if (ret) {
11157                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11158                         goto fail;
11159                 }
11160         }
11161
11162         if (state->legacy_cursor_update) {
11163                 /*
11164                  * This is a fast cursor update coming from the plane update
11165                  * helper, check if it can be done asynchronously for better
11166                  * performance.
11167                  */
11168                 state->async_update =
11169                         !drm_atomic_helper_async_check(dev, state);
11170
11171                 /*
11172                  * Skip the remaining global validation if this is an async
11173                  * update. Cursor updates can be done without affecting
11174                  * state or bandwidth calcs and this avoids the performance
11175                  * penalty of locking the private state object and
11176                  * allocating a new dc_state.
11177                  */
11178                 if (state->async_update)
11179                         return 0;
11180         }
11181
11182         /* Check scaling and underscan changes*/
11183         /* TODO Removed scaling changes validation due to inability to commit
11184          * new stream into context w\o causing full reset. Need to
11185          * decide how to handle.
11186          */
11187         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11188                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11189                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11190                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11191
11192                 /* Skip any modesets/resets */
11193                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11194                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11195                         continue;
11196
11197                 /* Skip any thing not scale or underscan changes */
11198                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11199                         continue;
11200
11201                 lock_and_validation_needed = true;
11202         }
11203
11204 #if defined(CONFIG_DRM_AMD_DC_DCN)
11205         /* set the slot info for each mst_state based on the link encoding format */
11206         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11207                 struct amdgpu_dm_connector *aconnector;
11208                 struct drm_connector *connector;
11209                 struct drm_connector_list_iter iter;
11210                 u8 link_coding_cap;
11211
11212                 if (!mgr->mst_state )
11213                         continue;
11214
11215                 drm_connector_list_iter_begin(dev, &iter);
11216                 drm_for_each_connector_iter(connector, &iter) {
11217                         int id = connector->index;
11218
11219                         if (id == mst_state->mgr->conn_base_id) {
11220                                 aconnector = to_amdgpu_dm_connector(connector);
11221                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11222                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11223
11224                                 break;
11225                         }
11226                 }
11227                 drm_connector_list_iter_end(&iter);
11228
11229         }
11230 #endif
11231         /**
11232          * Streams and planes are reset when there are changes that affect
11233          * bandwidth. Anything that affects bandwidth needs to go through
11234          * DC global validation to ensure that the configuration can be applied
11235          * to hardware.
11236          *
11237          * We have to currently stall out here in atomic_check for outstanding
11238          * commits to finish in this case because our IRQ handlers reference
11239          * DRM state directly - we can end up disabling interrupts too early
11240          * if we don't.
11241          *
11242          * TODO: Remove this stall and drop DM state private objects.
11243          */
11244         if (lock_and_validation_needed) {
11245                 ret = dm_atomic_get_state(state, &dm_state);
11246                 if (ret) {
11247                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11248                         goto fail;
11249                 }
11250
11251                 ret = do_aquire_global_lock(dev, state);
11252                 if (ret) {
11253                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11254                         goto fail;
11255                 }
11256
11257 #if defined(CONFIG_DRM_AMD_DC_DCN)
11258                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11259                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11260                         goto fail;
11261                 }
11262
11263                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11264                 if (ret) {
11265                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11266                         goto fail;
11267                 }
11268 #endif
11269
11270                 /*
11271                  * Perform validation of MST topology in the state:
11272                  * We need to perform MST atomic check before calling
11273                  * dc_validate_global_state(), or there is a chance
11274                  * to get stuck in an infinite loop and hang eventually.
11275                  */
11276                 ret = drm_dp_mst_atomic_check(state);
11277                 if (ret) {
11278                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11279                         goto fail;
11280                 }
11281                 status = dc_validate_global_state(dc, dm_state->context, true);
11282                 if (status != DC_OK) {
11283                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11284                                        dc_status_to_str(status), status);
11285                         ret = -EINVAL;
11286                         goto fail;
11287                 }
11288         } else {
11289                 /*
11290                  * The commit is a fast update. Fast updates shouldn't change
11291                  * the DC context, affect global validation, and can have their
11292                  * commit work done in parallel with other commits not touching
11293                  * the same resource. If we have a new DC context as part of
11294                  * the DM atomic state from validation we need to free it and
11295                  * retain the existing one instead.
11296                  *
11297                  * Furthermore, since the DM atomic state only contains the DC
11298                  * context and can safely be annulled, we can free the state
11299                  * and clear the associated private object now to free
11300                  * some memory and avoid a possible use-after-free later.
11301                  */
11302
11303                 for (i = 0; i < state->num_private_objs; i++) {
11304                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11305
11306                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11307                                 int j = state->num_private_objs-1;
11308
11309                                 dm_atomic_destroy_state(obj,
11310                                                 state->private_objs[i].state);
11311
11312                                 /* If i is not at the end of the array then the
11313                                  * last element needs to be moved to where i was
11314                                  * before the array can safely be truncated.
11315                                  */
11316                                 if (i != j)
11317                                         state->private_objs[i] =
11318                                                 state->private_objs[j];
11319
11320                                 state->private_objs[j].ptr = NULL;
11321                                 state->private_objs[j].state = NULL;
11322                                 state->private_objs[j].old_state = NULL;
11323                                 state->private_objs[j].new_state = NULL;
11324
11325                                 state->num_private_objs = j;
11326                                 break;
11327                         }
11328                 }
11329         }
11330
11331         /* Store the overall update type for use later in atomic check. */
11332         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11333                 struct dm_crtc_state *dm_new_crtc_state =
11334                         to_dm_crtc_state(new_crtc_state);
11335
11336                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11337                                                          UPDATE_TYPE_FULL :
11338                                                          UPDATE_TYPE_FAST;
11339         }
11340
11341         /* Must be success */
11342         WARN_ON(ret);
11343
11344         trace_amdgpu_dm_atomic_check_finish(state, ret);
11345
11346         return ret;
11347
11348 fail:
11349         if (ret == -EDEADLK)
11350                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11351         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11352                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11353         else
11354                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11355
11356         trace_amdgpu_dm_atomic_check_finish(state, ret);
11357
11358         return ret;
11359 }
11360
11361 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11362                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11363 {
11364         uint8_t dpcd_data;
11365         bool capable = false;
11366
11367         if (amdgpu_dm_connector->dc_link &&
11368                 dm_helpers_dp_read_dpcd(
11369                                 NULL,
11370                                 amdgpu_dm_connector->dc_link,
11371                                 DP_DOWN_STREAM_PORT_COUNT,
11372                                 &dpcd_data,
11373                                 sizeof(dpcd_data))) {
11374                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11375         }
11376
11377         return capable;
11378 }
11379
11380 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11381                 unsigned int offset,
11382                 unsigned int total_length,
11383                 uint8_t *data,
11384                 unsigned int length,
11385                 struct amdgpu_hdmi_vsdb_info *vsdb)
11386 {
11387         bool res;
11388         union dmub_rb_cmd cmd;
11389         struct dmub_cmd_send_edid_cea *input;
11390         struct dmub_cmd_edid_cea_output *output;
11391
11392         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11393                 return false;
11394
11395         memset(&cmd, 0, sizeof(cmd));
11396
11397         input = &cmd.edid_cea.data.input;
11398
11399         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11400         cmd.edid_cea.header.sub_type = 0;
11401         cmd.edid_cea.header.payload_bytes =
11402                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11403         input->offset = offset;
11404         input->length = length;
11405         input->cea_total_length = total_length;
11406         memcpy(input->payload, data, length);
11407
11408         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11409         if (!res) {
11410                 DRM_ERROR("EDID CEA parser failed\n");
11411                 return false;
11412         }
11413
11414         output = &cmd.edid_cea.data.output;
11415
11416         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11417                 if (!output->ack.success) {
11418                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11419                                         output->ack.offset);
11420                 }
11421         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11422                 if (!output->amd_vsdb.vsdb_found)
11423                         return false;
11424
11425                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11426                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11427                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11428                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11429         } else {
11430                 DRM_WARN("Unknown EDID CEA parser results\n");
11431                 return false;
11432         }
11433
11434         return true;
11435 }
11436
11437 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11438                 uint8_t *edid_ext, int len,
11439                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11440 {
11441         int i;
11442
11443         /* send extension block to DMCU for parsing */
11444         for (i = 0; i < len; i += 8) {
11445                 bool res;
11446                 int offset;
11447
11448                 /* send 8 bytes a time */
11449                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11450                         return false;
11451
11452                 if (i+8 == len) {
11453                         /* EDID block sent completed, expect result */
11454                         int version, min_rate, max_rate;
11455
11456                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11457                         if (res) {
11458                                 /* amd vsdb found */
11459                                 vsdb_info->freesync_supported = 1;
11460                                 vsdb_info->amd_vsdb_version = version;
11461                                 vsdb_info->min_refresh_rate_hz = min_rate;
11462                                 vsdb_info->max_refresh_rate_hz = max_rate;
11463                                 return true;
11464                         }
11465                         /* not amd vsdb */
11466                         return false;
11467                 }
11468
11469                 /* check for ack*/
11470                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11471                 if (!res)
11472                         return false;
11473         }
11474
11475         return false;
11476 }
11477
11478 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11479                 uint8_t *edid_ext, int len,
11480                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11481 {
11482         int i;
11483
11484         /* send extension block to DMCU for parsing */
11485         for (i = 0; i < len; i += 8) {
11486                 /* send 8 bytes a time */
11487                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11488                         return false;
11489         }
11490
11491         return vsdb_info->freesync_supported;
11492 }
11493
11494 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11495                 uint8_t *edid_ext, int len,
11496                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11497 {
11498         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11499
11500         if (adev->dm.dmub_srv)
11501                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11502         else
11503                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11504 }
11505
11506 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11507                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11508 {
11509         uint8_t *edid_ext = NULL;
11510         int i;
11511         bool valid_vsdb_found = false;
11512
11513         /*----- drm_find_cea_extension() -----*/
11514         /* No EDID or EDID extensions */
11515         if (edid == NULL || edid->extensions == 0)
11516                 return -ENODEV;
11517
11518         /* Find CEA extension */
11519         for (i = 0; i < edid->extensions; i++) {
11520                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11521                 if (edid_ext[0] == CEA_EXT)
11522                         break;
11523         }
11524
11525         if (i == edid->extensions)
11526                 return -ENODEV;
11527
11528         /*----- cea_db_offsets() -----*/
11529         if (edid_ext[0] != CEA_EXT)
11530                 return -ENODEV;
11531
11532         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11533
11534         return valid_vsdb_found ? i : -ENODEV;
11535 }
11536
11537 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11538                                         struct edid *edid)
11539 {
11540         int i = 0;
11541         struct detailed_timing *timing;
11542         struct detailed_non_pixel *data;
11543         struct detailed_data_monitor_range *range;
11544         struct amdgpu_dm_connector *amdgpu_dm_connector =
11545                         to_amdgpu_dm_connector(connector);
11546         struct dm_connector_state *dm_con_state = NULL;
11547         struct dc_sink *sink;
11548
11549         struct drm_device *dev = connector->dev;
11550         struct amdgpu_device *adev = drm_to_adev(dev);
11551         bool freesync_capable = false;
11552         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11553
11554         if (!connector->state) {
11555                 DRM_ERROR("%s - Connector has no state", __func__);
11556                 goto update;
11557         }
11558
11559         sink = amdgpu_dm_connector->dc_sink ?
11560                 amdgpu_dm_connector->dc_sink :
11561                 amdgpu_dm_connector->dc_em_sink;
11562
11563         if (!edid || !sink) {
11564                 dm_con_state = to_dm_connector_state(connector->state);
11565
11566                 amdgpu_dm_connector->min_vfreq = 0;
11567                 amdgpu_dm_connector->max_vfreq = 0;
11568                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11569                 connector->display_info.monitor_range.min_vfreq = 0;
11570                 connector->display_info.monitor_range.max_vfreq = 0;
11571                 freesync_capable = false;
11572
11573                 goto update;
11574         }
11575
11576         dm_con_state = to_dm_connector_state(connector->state);
11577
11578         if (!adev->dm.freesync_module)
11579                 goto update;
11580
11581
11582         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11583                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11584                 bool edid_check_required = false;
11585
11586                 if (edid) {
11587                         edid_check_required = is_dp_capable_without_timing_msa(
11588                                                 adev->dm.dc,
11589                                                 amdgpu_dm_connector);
11590                 }
11591
11592                 if (edid_check_required == true && (edid->version > 1 ||
11593                    (edid->version == 1 && edid->revision > 1))) {
11594                         for (i = 0; i < 4; i++) {
11595
11596                                 timing  = &edid->detailed_timings[i];
11597                                 data    = &timing->data.other_data;
11598                                 range   = &data->data.range;
11599                                 /*
11600                                  * Check if monitor has continuous frequency mode
11601                                  */
11602                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11603                                         continue;
11604                                 /*
11605                                  * Check for flag range limits only. If flag == 1 then
11606                                  * no additional timing information provided.
11607                                  * Default GTF, GTF Secondary curve and CVT are not
11608                                  * supported
11609                                  */
11610                                 if (range->flags != 1)
11611                                         continue;
11612
11613                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11614                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11615                                 amdgpu_dm_connector->pixel_clock_mhz =
11616                                         range->pixel_clock_mhz * 10;
11617
11618                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11619                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11620
11621                                 break;
11622                         }
11623
11624                         if (amdgpu_dm_connector->max_vfreq -
11625                             amdgpu_dm_connector->min_vfreq > 10) {
11626
11627                                 freesync_capable = true;
11628                         }
11629                 }
11630         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11631                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11632                 if (i >= 0 && vsdb_info.freesync_supported) {
11633                         timing  = &edid->detailed_timings[i];
11634                         data    = &timing->data.other_data;
11635
11636                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11637                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11638                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11639                                 freesync_capable = true;
11640
11641                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11642                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11643                 }
11644         }
11645
11646 update:
11647         if (dm_con_state)
11648                 dm_con_state->freesync_capable = freesync_capable;
11649
11650         if (connector->vrr_capable_property)
11651                 drm_connector_set_vrr_capable_property(connector,
11652                                                        freesync_capable);
11653 }
11654
11655 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11656 {
11657         struct amdgpu_device *adev = drm_to_adev(dev);
11658         struct dc *dc = adev->dm.dc;
11659         int i;
11660
11661         mutex_lock(&adev->dm.dc_lock);
11662         if (dc->current_state) {
11663                 for (i = 0; i < dc->current_state->stream_count; ++i)
11664                         dc->current_state->streams[i]
11665                                 ->triggered_crtc_reset.enabled =
11666                                 adev->dm.force_timing_sync;
11667
11668                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11669                 dc_trigger_sync(dc, dc->current_state);
11670         }
11671         mutex_unlock(&adev->dm.dc_lock);
11672 }
11673
11674 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11675                        uint32_t value, const char *func_name)
11676 {
11677 #ifdef DM_CHECK_ADDR_0
11678         if (address == 0) {
11679                 DC_ERR("invalid register write. address = 0");
11680                 return;
11681         }
11682 #endif
11683         cgs_write_register(ctx->cgs_device, address, value);
11684         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11685 }
11686
11687 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11688                           const char *func_name)
11689 {
11690         uint32_t value;
11691 #ifdef DM_CHECK_ADDR_0
11692         if (address == 0) {
11693                 DC_ERR("invalid register read; address = 0\n");
11694                 return 0;
11695         }
11696 #endif
11697
11698         if (ctx->dmub_srv &&
11699             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11700             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11701                 ASSERT(false);
11702                 return 0;
11703         }
11704
11705         value = cgs_read_register(ctx->cgs_device, address);
11706
11707         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11708
11709         return value;
11710 }
11711
11712 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11713                                                 struct dc_context *ctx,
11714                                                 uint8_t status_type,
11715                                                 uint32_t *operation_result)
11716 {
11717         struct amdgpu_device *adev = ctx->driver_context;
11718         int return_status = -1;
11719         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11720
11721         if (is_cmd_aux) {
11722                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11723                         return_status = p_notify->aux_reply.length;
11724                         *operation_result = p_notify->result;
11725                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11726                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11727                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11728                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11729                 } else {
11730                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11731                 }
11732         } else {
11733                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11734                         return_status = 0;
11735                         *operation_result = p_notify->sc_status;
11736                 } else {
11737                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11738                 }
11739         }
11740
11741         return return_status;
11742 }
11743
11744 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11745         unsigned int link_index, void *cmd_payload, void *operation_result)
11746 {
11747         struct amdgpu_device *adev = ctx->driver_context;
11748         int ret = 0;
11749
11750         if (is_cmd_aux) {
11751                 dc_process_dmub_aux_transfer_async(ctx->dc,
11752                         link_index, (struct aux_payload *)cmd_payload);
11753         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11754                                         (struct set_config_cmd_payload *)cmd_payload,
11755                                         adev->dm.dmub_notify)) {
11756                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11757                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11758                                         (uint32_t *)operation_result);
11759         }
11760
11761         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11762         if (ret == 0) {
11763                 DRM_ERROR("wait_for_completion_timeout timeout!");
11764                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11765                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11766                                 (uint32_t *)operation_result);
11767         }
11768
11769         if (is_cmd_aux) {
11770                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11771                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11772
11773                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11774                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11775                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11776                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11777                                        adev->dm.dmub_notify->aux_reply.length);
11778                         }
11779                 }
11780         }
11781
11782         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11783                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11784                         (uint32_t *)operation_result);
11785 }
11786
11787 /*
11788  * Check whether seamless boot is supported.
11789  *
11790  * So far we only support seamless boot on CHIP_VANGOGH.
11791  * If everything goes well, we may consider expanding
11792  * seamless boot to other ASICs.
11793  */
11794 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11795 {
11796         switch (adev->asic_type) {
11797         case CHIP_VANGOGH:
11798                 if (!adev->mman.keep_stolen_vga_memory)
11799                         return true;
11800                 break;
11801         default:
11802                 break;
11803         }
11804
11805         return false;
11806 }