Merge tag 'drm-misc-next-fixes-2022-05-19' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
118 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
120
121 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
123
124 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
125 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
126
127 /* Number of bytes in PSP header for firmware. */
128 #define PSP_HEADER_BYTES 0x100
129
130 /* Number of bytes in PSP footer for firmware. */
131 #define PSP_FOOTER_BYTES 0x100
132
133 /**
134  * DOC: overview
135  *
136  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
137  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
138  * requests into DC requests, and DC responses into DRM responses.
139  *
140  * The root control structure is &struct amdgpu_display_manager.
141  */
142
143 /* basic init/fini API */
144 static int amdgpu_dm_init(struct amdgpu_device *adev);
145 static void amdgpu_dm_fini(struct amdgpu_device *adev);
146 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
147
148 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
149 {
150         switch (link->dpcd_caps.dongle_type) {
151         case DISPLAY_DONGLE_NONE:
152                 return DRM_MODE_SUBCONNECTOR_Native;
153         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
154                 return DRM_MODE_SUBCONNECTOR_VGA;
155         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
156         case DISPLAY_DONGLE_DP_DVI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_DVID;
158         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
159         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
160                 return DRM_MODE_SUBCONNECTOR_HDMIA;
161         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
162         default:
163                 return DRM_MODE_SUBCONNECTOR_Unknown;
164         }
165 }
166
167 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
168 {
169         struct dc_link *link = aconnector->dc_link;
170         struct drm_connector *connector = &aconnector->base;
171         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
172
173         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
174                 return;
175
176         if (aconnector->dc_sink)
177                 subconnector = get_subconnector_type(link);
178
179         drm_object_property_set_value(&connector->base,
180                         connector->dev->mode_config.dp_subconnector_property,
181                         subconnector);
182 }
183
184 /*
185  * initializes drm_device display related structures, based on the information
186  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
187  * drm_encoder, drm_mode_config
188  *
189  * Returns 0 on success
190  */
191 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
192 /* removes and deallocates the drm structures, created by the above function */
193 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
194
195 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
196                                 struct drm_plane *plane,
197                                 unsigned long possible_crtcs,
198                                 const struct dc_plane_cap *plane_cap);
199 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
200                                struct drm_plane *plane,
201                                uint32_t link_index);
202 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
203                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
204                                     uint32_t link_index,
205                                     struct amdgpu_encoder *amdgpu_encoder);
206 static int amdgpu_dm_encoder_init(struct drm_device *dev,
207                                   struct amdgpu_encoder *aencoder,
208                                   uint32_t link_index);
209
210 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
211
212 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
213
214 static int amdgpu_dm_atomic_check(struct drm_device *dev,
215                                   struct drm_atomic_state *state);
216
217 static void handle_cursor_update(struct drm_plane *plane,
218                                  struct drm_plane_state *old_plane_state);
219
220 static const struct drm_format_info *
221 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
222
223 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
224 static void handle_hpd_rx_irq(void *param);
225
226 static bool
227 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
228                                  struct drm_crtc_state *new_crtc_state);
229 /*
230  * dm_vblank_get_counter
231  *
232  * @brief
233  * Get counter for number of vertical blanks
234  *
235  * @param
236  * struct amdgpu_device *adev - [in] desired amdgpu device
237  * int disp_idx - [in] which CRTC to get the counter from
238  *
239  * @return
240  * Counter for vertical blanks
241  */
242 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
243 {
244         if (crtc >= adev->mode_info.num_crtc)
245                 return 0;
246         else {
247                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
248
249                 if (acrtc->dm_irq_params.stream == NULL) {
250                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
251                                   crtc);
252                         return 0;
253                 }
254
255                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
256         }
257 }
258
259 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
260                                   u32 *vbl, u32 *position)
261 {
262         uint32_t v_blank_start, v_blank_end, h_position, v_position;
263
264         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
265                 return -EINVAL;
266         else {
267                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
268
269                 if (acrtc->dm_irq_params.stream ==  NULL) {
270                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271                                   crtc);
272                         return 0;
273                 }
274
275                 /*
276                  * TODO rework base driver to use values directly.
277                  * for now parse it back into reg-format
278                  */
279                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280                                          &v_blank_start,
281                                          &v_blank_end,
282                                          &h_position,
283                                          &v_position);
284
285                 *position = v_position | (h_position << 16);
286                 *vbl = v_blank_start | (v_blank_end << 16);
287         }
288
289         return 0;
290 }
291
292 static bool dm_is_idle(void *handle)
293 {
294         /* XXX todo */
295         return true;
296 }
297
298 static int dm_wait_for_idle(void *handle)
299 {
300         /* XXX todo */
301         return 0;
302 }
303
304 static bool dm_check_soft_reset(void *handle)
305 {
306         return false;
307 }
308
309 static int dm_soft_reset(void *handle)
310 {
311         /* XXX todo */
312         return 0;
313 }
314
315 static struct amdgpu_crtc *
316 get_crtc_by_otg_inst(struct amdgpu_device *adev,
317                      int otg_inst)
318 {
319         struct drm_device *dev = adev_to_drm(adev);
320         struct drm_crtc *crtc;
321         struct amdgpu_crtc *amdgpu_crtc;
322
323         if (WARN_ON(otg_inst == -1))
324                 return adev->mode_info.crtcs[0];
325
326         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
327                 amdgpu_crtc = to_amdgpu_crtc(crtc);
328
329                 if (amdgpu_crtc->otg_inst == otg_inst)
330                         return amdgpu_crtc;
331         }
332
333         return NULL;
334 }
335
336 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
337 {
338         return acrtc->dm_irq_params.freesync_config.state ==
339                        VRR_STATE_ACTIVE_VARIABLE ||
340                acrtc->dm_irq_params.freesync_config.state ==
341                        VRR_STATE_ACTIVE_FIXED;
342 }
343
344 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
345 {
346         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
347                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
348 }
349
350 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
351                                               struct dm_crtc_state *new_state)
352 {
353         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
354                 return true;
355         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
356                 return true;
357         else
358                 return false;
359 }
360
361 /**
362  * dm_pflip_high_irq() - Handle pageflip interrupt
363  * @interrupt_params: ignored
364  *
365  * Handles the pageflip interrupt by notifying all interested parties
366  * that the pageflip has been completed.
367  */
368 static void dm_pflip_high_irq(void *interrupt_params)
369 {
370         struct amdgpu_crtc *amdgpu_crtc;
371         struct common_irq_params *irq_params = interrupt_params;
372         struct amdgpu_device *adev = irq_params->adev;
373         unsigned long flags;
374         struct drm_pending_vblank_event *e;
375         uint32_t vpos, hpos, v_blank_start, v_blank_end;
376         bool vrr_active;
377
378         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
379
380         /* IRQ could occur when in initial stage */
381         /* TODO work and BO cleanup */
382         if (amdgpu_crtc == NULL) {
383                 DC_LOG_PFLIP("CRTC is null, returning.\n");
384                 return;
385         }
386
387         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
388
389         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
390                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
391                                                  amdgpu_crtc->pflip_status,
392                                                  AMDGPU_FLIP_SUBMITTED,
393                                                  amdgpu_crtc->crtc_id,
394                                                  amdgpu_crtc);
395                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396                 return;
397         }
398
399         /* page flip completed. */
400         e = amdgpu_crtc->event;
401         amdgpu_crtc->event = NULL;
402
403         WARN_ON(!e);
404
405         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
406
407         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
408         if (!vrr_active ||
409             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
410                                       &v_blank_end, &hpos, &vpos) ||
411             (vpos < v_blank_start)) {
412                 /* Update to correct count and vblank timestamp if racing with
413                  * vblank irq. This also updates to the correct vblank timestamp
414                  * even in VRR mode, as scanout is past the front-porch atm.
415                  */
416                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
417
418                 /* Wake up userspace by sending the pageflip event with proper
419                  * count and timestamp of vblank of flip completion.
420                  */
421                 if (e) {
422                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
423
424                         /* Event sent, so done with vblank for this flip */
425                         drm_crtc_vblank_put(&amdgpu_crtc->base);
426                 }
427         } else if (e) {
428                 /* VRR active and inside front-porch: vblank count and
429                  * timestamp for pageflip event will only be up to date after
430                  * drm_crtc_handle_vblank() has been executed from late vblank
431                  * irq handler after start of back-porch (vline 0). We queue the
432                  * pageflip event for send-out by drm_crtc_handle_vblank() with
433                  * updated timestamp and count, once it runs after us.
434                  *
435                  * We need to open-code this instead of using the helper
436                  * drm_crtc_arm_vblank_event(), as that helper would
437                  * call drm_crtc_accurate_vblank_count(), which we must
438                  * not call in VRR mode while we are in front-porch!
439                  */
440
441                 /* sequence will be replaced by real count during send-out. */
442                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
443                 e->pipe = amdgpu_crtc->crtc_id;
444
445                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446                 e = NULL;
447         }
448
449         /* Keep track of vblank of this flip for flip throttling. We use the
450          * cooked hw counter, as that one incremented at start of this vblank
451          * of pageflip completion, so last_flip_vblank is the forbidden count
452          * for queueing new pageflips if vsync + VRR is enabled.
453          */
454         amdgpu_crtc->dm_irq_params.last_flip_vblank =
455                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
456
457         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
458         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
459
460         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
461                      amdgpu_crtc->crtc_id, amdgpu_crtc,
462                      vrr_active, (int) !e);
463 }
464
465 static void dm_vupdate_high_irq(void *interrupt_params)
466 {
467         struct common_irq_params *irq_params = interrupt_params;
468         struct amdgpu_device *adev = irq_params->adev;
469         struct amdgpu_crtc *acrtc;
470         struct drm_device *drm_dev;
471         struct drm_vblank_crtc *vblank;
472         ktime_t frame_duration_ns, previous_timestamp;
473         unsigned long flags;
474         int vrr_active;
475
476         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
477
478         if (acrtc) {
479                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
480                 drm_dev = acrtc->base.dev;
481                 vblank = &drm_dev->vblank[acrtc->base.index];
482                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
483                 frame_duration_ns = vblank->time - previous_timestamp;
484
485                 if (frame_duration_ns > 0) {
486                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
487                                                 frame_duration_ns,
488                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
489                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
490                 }
491
492                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493                               acrtc->crtc_id,
494                               vrr_active);
495
496                 /* Core vblank handling is done here after end of front-porch in
497                  * vrr mode, as vblank timestamping will give valid results
498                  * while now done after front-porch. This will also deliver
499                  * page-flip completion events that have been queued to us
500                  * if a pageflip happened inside front-porch.
501                  */
502                 if (vrr_active) {
503                         drm_crtc_handle_vblank(&acrtc->base);
504
505                         /* BTR processing for pre-DCE12 ASICs */
506                         if (acrtc->dm_irq_params.stream &&
507                             adev->family < AMDGPU_FAMILY_AI) {
508                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
509                                 mod_freesync_handle_v_update(
510                                     adev->dm.freesync_module,
511                                     acrtc->dm_irq_params.stream,
512                                     &acrtc->dm_irq_params.vrr_params);
513
514                                 dc_stream_adjust_vmin_vmax(
515                                     adev->dm.dc,
516                                     acrtc->dm_irq_params.stream,
517                                     &acrtc->dm_irq_params.vrr_params.adjust);
518                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
519                         }
520                 }
521         }
522 }
523
524 /**
525  * dm_crtc_high_irq() - Handles CRTC interrupt
526  * @interrupt_params: used for determining the CRTC instance
527  *
528  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
529  * event handler.
530  */
531 static void dm_crtc_high_irq(void *interrupt_params)
532 {
533         struct common_irq_params *irq_params = interrupt_params;
534         struct amdgpu_device *adev = irq_params->adev;
535         struct amdgpu_crtc *acrtc;
536         unsigned long flags;
537         int vrr_active;
538
539         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540         if (!acrtc)
541                 return;
542
543         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
544
545         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
546                       vrr_active, acrtc->dm_irq_params.active_planes);
547
548         /**
549          * Core vblank handling at start of front-porch is only possible
550          * in non-vrr mode, as only there vblank timestamping will give
551          * valid results while done in front-porch. Otherwise defer it
552          * to dm_vupdate_high_irq after end of front-porch.
553          */
554         if (!vrr_active)
555                 drm_crtc_handle_vblank(&acrtc->base);
556
557         /**
558          * Following stuff must happen at start of vblank, for crc
559          * computation and below-the-range btr support in vrr mode.
560          */
561         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
562
563         /* BTR updates need to happen before VUPDATE on Vega and above. */
564         if (adev->family < AMDGPU_FAMILY_AI)
565                 return;
566
567         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
568
569         if (acrtc->dm_irq_params.stream &&
570             acrtc->dm_irq_params.vrr_params.supported &&
571             acrtc->dm_irq_params.freesync_config.state ==
572                     VRR_STATE_ACTIVE_VARIABLE) {
573                 mod_freesync_handle_v_update(adev->dm.freesync_module,
574                                              acrtc->dm_irq_params.stream,
575                                              &acrtc->dm_irq_params.vrr_params);
576
577                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
578                                            &acrtc->dm_irq_params.vrr_params.adjust);
579         }
580
581         /*
582          * If there aren't any active_planes then DCH HUBP may be clock-gated.
583          * In that case, pageflip completion interrupts won't fire and pageflip
584          * completion events won't get delivered. Prevent this by sending
585          * pending pageflip events from here if a flip is still pending.
586          *
587          * If any planes are enabled, use dm_pflip_high_irq() instead, to
588          * avoid race conditions between flip programming and completion,
589          * which could cause too early flip completion events.
590          */
591         if (adev->family >= AMDGPU_FAMILY_RV &&
592             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
593             acrtc->dm_irq_params.active_planes == 0) {
594                 if (acrtc->event) {
595                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
596                         acrtc->event = NULL;
597                         drm_crtc_vblank_put(&acrtc->base);
598                 }
599                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
600         }
601
602         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
603 }
604
605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
606 /**
607  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
608  * DCN generation ASICs
609  * @interrupt_params: interrupt parameters
610  *
611  * Used to set crc window/read out crc value at vertical line 0 position
612  */
613 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614 {
615         struct common_irq_params *irq_params = interrupt_params;
616         struct amdgpu_device *adev = irq_params->adev;
617         struct amdgpu_crtc *acrtc;
618
619         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620
621         if (!acrtc)
622                 return;
623
624         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 }
626 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627
628 /**
629  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
630  * @adev: amdgpu_device pointer
631  * @notify: dmub notification structure
632  *
633  * Dmub AUX or SET_CONFIG command completion processing callback
634  * Copies dmub notification to DM which is to be read by AUX command.
635  * issuing thread and also signals the event to wake up the thread.
636  */
637 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
638                                         struct dmub_notification *notify)
639 {
640         if (adev->dm.dmub_notify)
641                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
642         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
643                 complete(&adev->dm.dmub_aux_transfer_done);
644 }
645
646 /**
647  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
648  * @adev: amdgpu_device pointer
649  * @notify: dmub notification structure
650  *
651  * Dmub Hpd interrupt processing callback. Gets displayindex through the
652  * ink index and calls helper to do the processing.
653  */
654 static void dmub_hpd_callback(struct amdgpu_device *adev,
655                               struct dmub_notification *notify)
656 {
657         struct amdgpu_dm_connector *aconnector;
658         struct amdgpu_dm_connector *hpd_aconnector = NULL;
659         struct drm_connector *connector;
660         struct drm_connector_list_iter iter;
661         struct dc_link *link;
662         uint8_t link_index = 0;
663         struct drm_device *dev;
664
665         if (adev == NULL)
666                 return;
667
668         if (notify == NULL) {
669                 DRM_ERROR("DMUB HPD callback notification was NULL");
670                 return;
671         }
672
673         if (notify->link_index > adev->dm.dc->link_count) {
674                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
675                 return;
676         }
677
678         link_index = notify->link_index;
679         link = adev->dm.dc->links[link_index];
680         dev = adev->dm.ddev;
681
682         drm_connector_list_iter_begin(dev, &iter);
683         drm_for_each_connector_iter(connector, &iter) {
684                 aconnector = to_amdgpu_dm_connector(connector);
685                 if (link && aconnector->dc_link == link) {
686                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
687                         hpd_aconnector = aconnector;
688                         break;
689                 }
690         }
691         drm_connector_list_iter_end(&iter);
692
693         if (hpd_aconnector) {
694                 if (notify->type == DMUB_NOTIFICATION_HPD)
695                         handle_hpd_irq_helper(hpd_aconnector);
696                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
697                         handle_hpd_rx_irq(hpd_aconnector);
698         }
699 }
700
701 /**
702  * register_dmub_notify_callback - Sets callback for DMUB notify
703  * @adev: amdgpu_device pointer
704  * @type: Type of dmub notification
705  * @callback: Dmub interrupt callback function
706  * @dmub_int_thread_offload: offload indicator
707  *
708  * API to register a dmub callback handler for a dmub notification
709  * Also sets indicator whether callback processing to be offloaded.
710  * to dmub interrupt handling thread
711  * Return: true if successfully registered, false if there is existing registration
712  */
713 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
714                                           enum dmub_notification_type type,
715                                           dmub_notify_interrupt_callback_t callback,
716                                           bool dmub_int_thread_offload)
717 {
718         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
719                 adev->dm.dmub_callback[type] = callback;
720                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
721         } else
722                 return false;
723
724         return true;
725 }
726
727 static void dm_handle_hpd_work(struct work_struct *work)
728 {
729         struct dmub_hpd_work *dmub_hpd_wrk;
730
731         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
732
733         if (!dmub_hpd_wrk->dmub_notify) {
734                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
735                 return;
736         }
737
738         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
739                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
740                 dmub_hpd_wrk->dmub_notify);
741         }
742
743         kfree(dmub_hpd_wrk->dmub_notify);
744         kfree(dmub_hpd_wrk);
745
746 }
747
748 #define DMUB_TRACE_MAX_READ 64
749 /**
750  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
751  * @interrupt_params: used for determining the Outbox instance
752  *
753  * Handles the Outbox Interrupt
754  * event handler.
755  */
756 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
757 {
758         struct dmub_notification notify;
759         struct common_irq_params *irq_params = interrupt_params;
760         struct amdgpu_device *adev = irq_params->adev;
761         struct amdgpu_display_manager *dm = &adev->dm;
762         struct dmcub_trace_buf_entry entry = { 0 };
763         uint32_t count = 0;
764         struct dmub_hpd_work *dmub_hpd_wrk;
765         struct dc_link *plink = NULL;
766
767         if (dc_enable_dmub_notifications(adev->dm.dc) &&
768                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
769
770                 do {
771                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
772                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
773                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
774                                 continue;
775                         }
776                         if (!dm->dmub_callback[notify.type]) {
777                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
778                                 continue;
779                         }
780                         if (dm->dmub_thread_offload[notify.type] == true) {
781                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
782                                 if (!dmub_hpd_wrk) {
783                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
784                                         return;
785                                 }
786                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
787                                 if (!dmub_hpd_wrk->dmub_notify) {
788                                         kfree(dmub_hpd_wrk);
789                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
790                                         return;
791                                 }
792                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
793                                 if (dmub_hpd_wrk->dmub_notify)
794                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
795                                 dmub_hpd_wrk->adev = adev;
796                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
797                                         plink = adev->dm.dc->links[notify.link_index];
798                                         if (plink) {
799                                                 plink->hpd_status =
800                                                         notify.hpd_status == DP_HPD_PLUG;
801                                         }
802                                 }
803                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
804                         } else {
805                                 dm->dmub_callback[notify.type](adev, &notify);
806                         }
807                 } while (notify.pending_notification);
808         }
809
810
811         do {
812                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
813                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
814                                                         entry.param0, entry.param1);
815
816                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
817                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
818                 } else
819                         break;
820
821                 count++;
822
823         } while (count <= DMUB_TRACE_MAX_READ);
824
825         if (count > DMUB_TRACE_MAX_READ)
826                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
827 }
828
829 static int dm_set_clockgating_state(void *handle,
830                   enum amd_clockgating_state state)
831 {
832         return 0;
833 }
834
835 static int dm_set_powergating_state(void *handle,
836                   enum amd_powergating_state state)
837 {
838         return 0;
839 }
840
841 /* Prototypes of private functions */
842 static int dm_early_init(void* handle);
843
844 /* Allocate memory for FBC compressed data  */
845 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
846 {
847         struct drm_device *dev = connector->dev;
848         struct amdgpu_device *adev = drm_to_adev(dev);
849         struct dm_compressor_info *compressor = &adev->dm.compressor;
850         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
851         struct drm_display_mode *mode;
852         unsigned long max_size = 0;
853
854         if (adev->dm.dc->fbc_compressor == NULL)
855                 return;
856
857         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
858                 return;
859
860         if (compressor->bo_ptr)
861                 return;
862
863
864         list_for_each_entry(mode, &connector->modes, head) {
865                 if (max_size < mode->htotal * mode->vtotal)
866                         max_size = mode->htotal * mode->vtotal;
867         }
868
869         if (max_size) {
870                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
871                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
872                             &compressor->gpu_addr, &compressor->cpu_addr);
873
874                 if (r)
875                         DRM_ERROR("DM: Failed to initialize FBC\n");
876                 else {
877                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
878                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
879                 }
880
881         }
882
883 }
884
885 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
886                                           int pipe, bool *enabled,
887                                           unsigned char *buf, int max_bytes)
888 {
889         struct drm_device *dev = dev_get_drvdata(kdev);
890         struct amdgpu_device *adev = drm_to_adev(dev);
891         struct drm_connector *connector;
892         struct drm_connector_list_iter conn_iter;
893         struct amdgpu_dm_connector *aconnector;
894         int ret = 0;
895
896         *enabled = false;
897
898         mutex_lock(&adev->dm.audio_lock);
899
900         drm_connector_list_iter_begin(dev, &conn_iter);
901         drm_for_each_connector_iter(connector, &conn_iter) {
902                 aconnector = to_amdgpu_dm_connector(connector);
903                 if (aconnector->audio_inst != port)
904                         continue;
905
906                 *enabled = true;
907                 ret = drm_eld_size(connector->eld);
908                 memcpy(buf, connector->eld, min(max_bytes, ret));
909
910                 break;
911         }
912         drm_connector_list_iter_end(&conn_iter);
913
914         mutex_unlock(&adev->dm.audio_lock);
915
916         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
917
918         return ret;
919 }
920
921 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
922         .get_eld = amdgpu_dm_audio_component_get_eld,
923 };
924
925 static int amdgpu_dm_audio_component_bind(struct device *kdev,
926                                        struct device *hda_kdev, void *data)
927 {
928         struct drm_device *dev = dev_get_drvdata(kdev);
929         struct amdgpu_device *adev = drm_to_adev(dev);
930         struct drm_audio_component *acomp = data;
931
932         acomp->ops = &amdgpu_dm_audio_component_ops;
933         acomp->dev = kdev;
934         adev->dm.audio_component = acomp;
935
936         return 0;
937 }
938
939 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
940                                           struct device *hda_kdev, void *data)
941 {
942         struct drm_device *dev = dev_get_drvdata(kdev);
943         struct amdgpu_device *adev = drm_to_adev(dev);
944         struct drm_audio_component *acomp = data;
945
946         acomp->ops = NULL;
947         acomp->dev = NULL;
948         adev->dm.audio_component = NULL;
949 }
950
951 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
952         .bind   = amdgpu_dm_audio_component_bind,
953         .unbind = amdgpu_dm_audio_component_unbind,
954 };
955
956 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
957 {
958         int i, ret;
959
960         if (!amdgpu_audio)
961                 return 0;
962
963         adev->mode_info.audio.enabled = true;
964
965         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
966
967         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
968                 adev->mode_info.audio.pin[i].channels = -1;
969                 adev->mode_info.audio.pin[i].rate = -1;
970                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
971                 adev->mode_info.audio.pin[i].status_bits = 0;
972                 adev->mode_info.audio.pin[i].category_code = 0;
973                 adev->mode_info.audio.pin[i].connected = false;
974                 adev->mode_info.audio.pin[i].id =
975                         adev->dm.dc->res_pool->audios[i]->inst;
976                 adev->mode_info.audio.pin[i].offset = 0;
977         }
978
979         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
980         if (ret < 0)
981                 return ret;
982
983         adev->dm.audio_registered = true;
984
985         return 0;
986 }
987
988 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
989 {
990         if (!amdgpu_audio)
991                 return;
992
993         if (!adev->mode_info.audio.enabled)
994                 return;
995
996         if (adev->dm.audio_registered) {
997                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
998                 adev->dm.audio_registered = false;
999         }
1000
1001         /* TODO: Disable audio? */
1002
1003         adev->mode_info.audio.enabled = false;
1004 }
1005
1006 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1007 {
1008         struct drm_audio_component *acomp = adev->dm.audio_component;
1009
1010         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1011                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1012
1013                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1014                                                  pin, -1);
1015         }
1016 }
1017
1018 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1019 {
1020         const struct dmcub_firmware_header_v1_0 *hdr;
1021         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1022         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1023         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1024         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1025         struct abm *abm = adev->dm.dc->res_pool->abm;
1026         struct dmub_srv_hw_params hw_params;
1027         enum dmub_status status;
1028         const unsigned char *fw_inst_const, *fw_bss_data;
1029         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1030         bool has_hw_support;
1031
1032         if (!dmub_srv)
1033                 /* DMUB isn't supported on the ASIC. */
1034                 return 0;
1035
1036         if (!fb_info) {
1037                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1038                 return -EINVAL;
1039         }
1040
1041         if (!dmub_fw) {
1042                 /* Firmware required for DMUB support. */
1043                 DRM_ERROR("No firmware provided for DMUB.\n");
1044                 return -EINVAL;
1045         }
1046
1047         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1048         if (status != DMUB_STATUS_OK) {
1049                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1050                 return -EINVAL;
1051         }
1052
1053         if (!has_hw_support) {
1054                 DRM_INFO("DMUB unsupported on ASIC\n");
1055                 return 0;
1056         }
1057
1058         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1059         status = dmub_srv_hw_reset(dmub_srv);
1060         if (status != DMUB_STATUS_OK)
1061                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1062
1063         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1064
1065         fw_inst_const = dmub_fw->data +
1066                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1067                         PSP_HEADER_BYTES;
1068
1069         fw_bss_data = dmub_fw->data +
1070                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071                       le32_to_cpu(hdr->inst_const_bytes);
1072
1073         /* Copy firmware and bios info into FB memory. */
1074         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1075                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1076
1077         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1078
1079         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1080          * amdgpu_ucode_init_single_fw will load dmub firmware
1081          * fw_inst_const part to cw0; otherwise, the firmware back door load
1082          * will be done by dm_dmub_hw_init
1083          */
1084         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1085                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1086                                 fw_inst_const_size);
1087         }
1088
1089         if (fw_bss_data_size)
1090                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1091                        fw_bss_data, fw_bss_data_size);
1092
1093         /* Copy firmware bios info into FB memory. */
1094         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1095                adev->bios_size);
1096
1097         /* Reset regions that need to be reset. */
1098         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1099         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1100
1101         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1102                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1103
1104         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1105                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1106
1107         /* Initialize hardware. */
1108         memset(&hw_params, 0, sizeof(hw_params));
1109         hw_params.fb_base = adev->gmc.fb_start;
1110         hw_params.fb_offset = adev->gmc.aper_base;
1111
1112         /* backdoor load firmware and trigger dmub running */
1113         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1114                 hw_params.load_inst_const = true;
1115
1116         if (dmcu)
1117                 hw_params.psp_version = dmcu->psp_version;
1118
1119         for (i = 0; i < fb_info->num_fb; ++i)
1120                 hw_params.fb[i] = &fb_info->fb[i];
1121
1122         switch (adev->ip_versions[DCE_HWIP][0]) {
1123         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1124                 hw_params.dpia_supported = true;
1125                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1126                 break;
1127         default:
1128                 break;
1129         }
1130
1131         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1132         if (status != DMUB_STATUS_OK) {
1133                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1134                 return -EINVAL;
1135         }
1136
1137         /* Wait for firmware load to finish. */
1138         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1139         if (status != DMUB_STATUS_OK)
1140                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1141
1142         /* Init DMCU and ABM if available. */
1143         if (dmcu && abm) {
1144                 dmcu->funcs->dmcu_init(dmcu);
1145                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1146         }
1147
1148         if (!adev->dm.dc->ctx->dmub_srv)
1149                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1150         if (!adev->dm.dc->ctx->dmub_srv) {
1151                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1152                 return -ENOMEM;
1153         }
1154
1155         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1156                  adev->dm.dmcub_fw_version);
1157
1158         return 0;
1159 }
1160
1161 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1162 {
1163         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1164         enum dmub_status status;
1165         bool init;
1166
1167         if (!dmub_srv) {
1168                 /* DMUB isn't supported on the ASIC. */
1169                 return;
1170         }
1171
1172         status = dmub_srv_is_hw_init(dmub_srv, &init);
1173         if (status != DMUB_STATUS_OK)
1174                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1175
1176         if (status == DMUB_STATUS_OK && init) {
1177                 /* Wait for firmware load to finish. */
1178                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1179                 if (status != DMUB_STATUS_OK)
1180                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1181         } else {
1182                 /* Perform the full hardware initialization. */
1183                 dm_dmub_hw_init(adev);
1184         }
1185 }
1186
1187 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1188 {
1189         uint64_t pt_base;
1190         uint32_t logical_addr_low;
1191         uint32_t logical_addr_high;
1192         uint32_t agp_base, agp_bot, agp_top;
1193         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1194
1195         memset(pa_config, 0, sizeof(*pa_config));
1196
1197         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1198         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1199
1200         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1201                 /*
1202                  * Raven2 has a HW issue that it is unable to use the vram which
1203                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1204                  * workaround that increase system aperture high address (add 1)
1205                  * to get rid of the VM fault and hardware hang.
1206                  */
1207                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1208         else
1209                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1210
1211         agp_base = 0;
1212         agp_bot = adev->gmc.agp_start >> 24;
1213         agp_top = adev->gmc.agp_end >> 24;
1214
1215
1216         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1217         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1218         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1219         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1220         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1221         page_table_base.low_part = lower_32_bits(pt_base);
1222
1223         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1224         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1225
1226         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1227         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1228         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1229
1230         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1231         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1232         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1233
1234         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1235         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1236         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1237
1238         pa_config->is_hvm_enabled = 0;
1239
1240 }
1241
1242 static void vblank_control_worker(struct work_struct *work)
1243 {
1244         struct vblank_control_work *vblank_work =
1245                 container_of(work, struct vblank_control_work, work);
1246         struct amdgpu_display_manager *dm = vblank_work->dm;
1247
1248         mutex_lock(&dm->dc_lock);
1249
1250         if (vblank_work->enable)
1251                 dm->active_vblank_irq_count++;
1252         else if(dm->active_vblank_irq_count)
1253                 dm->active_vblank_irq_count--;
1254
1255         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1256
1257         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1258
1259         /* Control PSR based on vblank requirements from OS */
1260         if (vblank_work->stream && vblank_work->stream->link) {
1261                 if (vblank_work->enable) {
1262                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1263                                 amdgpu_dm_psr_disable(vblank_work->stream);
1264                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1265                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1266                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1267                         amdgpu_dm_psr_enable(vblank_work->stream);
1268                 }
1269         }
1270
1271         mutex_unlock(&dm->dc_lock);
1272
1273         dc_stream_release(vblank_work->stream);
1274
1275         kfree(vblank_work);
1276 }
1277
1278 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1279 {
1280         struct hpd_rx_irq_offload_work *offload_work;
1281         struct amdgpu_dm_connector *aconnector;
1282         struct dc_link *dc_link;
1283         struct amdgpu_device *adev;
1284         enum dc_connection_type new_connection_type = dc_connection_none;
1285         unsigned long flags;
1286
1287         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1288         aconnector = offload_work->offload_wq->aconnector;
1289
1290         if (!aconnector) {
1291                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1292                 goto skip;
1293         }
1294
1295         adev = drm_to_adev(aconnector->base.dev);
1296         dc_link = aconnector->dc_link;
1297
1298         mutex_lock(&aconnector->hpd_lock);
1299         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1300                 DRM_ERROR("KMS: Failed to detect connector\n");
1301         mutex_unlock(&aconnector->hpd_lock);
1302
1303         if (new_connection_type == dc_connection_none)
1304                 goto skip;
1305
1306         if (amdgpu_in_reset(adev))
1307                 goto skip;
1308
1309         mutex_lock(&adev->dm.dc_lock);
1310         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1311                 dc_link_dp_handle_automated_test(dc_link);
1312         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1313                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1314                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1315                 dc_link_dp_handle_link_loss(dc_link);
1316                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1317                 offload_work->offload_wq->is_handling_link_loss = false;
1318                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1319         }
1320         mutex_unlock(&adev->dm.dc_lock);
1321
1322 skip:
1323         kfree(offload_work);
1324
1325 }
1326
1327 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1328 {
1329         int max_caps = dc->caps.max_links;
1330         int i = 0;
1331         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1332
1333         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1334
1335         if (!hpd_rx_offload_wq)
1336                 return NULL;
1337
1338
1339         for (i = 0; i < max_caps; i++) {
1340                 hpd_rx_offload_wq[i].wq =
1341                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1342
1343                 if (hpd_rx_offload_wq[i].wq == NULL) {
1344                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1345                         return NULL;
1346                 }
1347
1348                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1349         }
1350
1351         return hpd_rx_offload_wq;
1352 }
1353
1354 struct amdgpu_stutter_quirk {
1355         u16 chip_vendor;
1356         u16 chip_device;
1357         u16 subsys_vendor;
1358         u16 subsys_device;
1359         u8 revision;
1360 };
1361
1362 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1363         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1364         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1365         { 0, 0, 0, 0, 0 },
1366 };
1367
1368 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1369 {
1370         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1371
1372         while (p && p->chip_device != 0) {
1373                 if (pdev->vendor == p->chip_vendor &&
1374                     pdev->device == p->chip_device &&
1375                     pdev->subsystem_vendor == p->subsys_vendor &&
1376                     pdev->subsystem_device == p->subsys_device &&
1377                     pdev->revision == p->revision) {
1378                         return true;
1379                 }
1380                 ++p;
1381         }
1382         return false;
1383 }
1384
1385 static int amdgpu_dm_init(struct amdgpu_device *adev)
1386 {
1387         struct dc_init_data init_data;
1388 #ifdef CONFIG_DRM_AMD_DC_HDCP
1389         struct dc_callback_init init_params;
1390 #endif
1391         int r;
1392
1393         adev->dm.ddev = adev_to_drm(adev);
1394         adev->dm.adev = adev;
1395
1396         /* Zero all the fields */
1397         memset(&init_data, 0, sizeof(init_data));
1398 #ifdef CONFIG_DRM_AMD_DC_HDCP
1399         memset(&init_params, 0, sizeof(init_params));
1400 #endif
1401
1402         mutex_init(&adev->dm.dc_lock);
1403         mutex_init(&adev->dm.audio_lock);
1404         spin_lock_init(&adev->dm.vblank_lock);
1405
1406         if(amdgpu_dm_irq_init(adev)) {
1407                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1408                 goto error;
1409         }
1410
1411         init_data.asic_id.chip_family = adev->family;
1412
1413         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1414         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1415         init_data.asic_id.chip_id = adev->pdev->device;
1416
1417         init_data.asic_id.vram_width = adev->gmc.vram_width;
1418         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1419         init_data.asic_id.atombios_base_address =
1420                 adev->mode_info.atom_context->bios;
1421
1422         init_data.driver = adev;
1423
1424         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1425
1426         if (!adev->dm.cgs_device) {
1427                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1428                 goto error;
1429         }
1430
1431         init_data.cgs_device = adev->dm.cgs_device;
1432
1433         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1434
1435         switch (adev->ip_versions[DCE_HWIP][0]) {
1436         case IP_VERSION(2, 1, 0):
1437                 switch (adev->dm.dmcub_fw_version) {
1438                 case 0: /* development */
1439                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1440                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1441                         init_data.flags.disable_dmcu = false;
1442                         break;
1443                 default:
1444                         init_data.flags.disable_dmcu = true;
1445                 }
1446                 break;
1447         case IP_VERSION(2, 0, 3):
1448                 init_data.flags.disable_dmcu = true;
1449                 break;
1450         default:
1451                 break;
1452         }
1453
1454         switch (adev->asic_type) {
1455         case CHIP_CARRIZO:
1456         case CHIP_STONEY:
1457                 init_data.flags.gpu_vm_support = true;
1458                 break;
1459         default:
1460                 switch (adev->ip_versions[DCE_HWIP][0]) {
1461                 case IP_VERSION(1, 0, 0):
1462                 case IP_VERSION(1, 0, 1):
1463                         /* enable S/G on PCO and RV2 */
1464                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1465                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1466                                 init_data.flags.gpu_vm_support = true;
1467                         break;
1468                 case IP_VERSION(2, 1, 0):
1469                 case IP_VERSION(3, 0, 1):
1470                 case IP_VERSION(3, 1, 2):
1471                 case IP_VERSION(3, 1, 3):
1472                 case IP_VERSION(3, 1, 5):
1473                 case IP_VERSION(3, 1, 6):
1474                         init_data.flags.gpu_vm_support = true;
1475                         break;
1476                 default:
1477                         break;
1478                 }
1479                 break;
1480         }
1481
1482         if (init_data.flags.gpu_vm_support)
1483                 adev->mode_info.gpu_vm_support = true;
1484
1485         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1486                 init_data.flags.fbc_support = true;
1487
1488         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1489                 init_data.flags.multi_mon_pp_mclk_switch = true;
1490
1491         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1492                 init_data.flags.disable_fractional_pwm = true;
1493
1494         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1495                 init_data.flags.edp_no_power_sequencing = true;
1496
1497         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1498                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1499         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1500                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1501
1502         init_data.flags.seamless_boot_edp_requested = false;
1503
1504         if (check_seamless_boot_capability(adev)) {
1505                 init_data.flags.seamless_boot_edp_requested = true;
1506                 init_data.flags.allow_seamless_boot_optimization = true;
1507                 DRM_INFO("Seamless boot condition check passed\n");
1508         }
1509
1510         INIT_LIST_HEAD(&adev->dm.da_list);
1511         /* Display Core create. */
1512         adev->dm.dc = dc_create(&init_data);
1513
1514         if (adev->dm.dc) {
1515                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1516         } else {
1517                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1518                 goto error;
1519         }
1520
1521         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1522                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1523                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1524         }
1525
1526         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1527                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1528         if (dm_should_disable_stutter(adev->pdev))
1529                 adev->dm.dc->debug.disable_stutter = true;
1530
1531         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1532                 adev->dm.dc->debug.disable_stutter = true;
1533
1534         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1535                 adev->dm.dc->debug.disable_dsc = true;
1536                 adev->dm.dc->debug.disable_dsc_edp = true;
1537         }
1538
1539         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1540                 adev->dm.dc->debug.disable_clock_gate = true;
1541
1542         r = dm_dmub_hw_init(adev);
1543         if (r) {
1544                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1545                 goto error;
1546         }
1547
1548         dc_hardware_init(adev->dm.dc);
1549
1550         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1551         if (!adev->dm.hpd_rx_offload_wq) {
1552                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1553                 goto error;
1554         }
1555
1556         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1557                 struct dc_phy_addr_space_config pa_config;
1558
1559                 mmhub_read_system_context(adev, &pa_config);
1560
1561                 // Call the DC init_memory func
1562                 dc_setup_system_context(adev->dm.dc, &pa_config);
1563         }
1564
1565         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1566         if (!adev->dm.freesync_module) {
1567                 DRM_ERROR(
1568                 "amdgpu: failed to initialize freesync_module.\n");
1569         } else
1570                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1571                                 adev->dm.freesync_module);
1572
1573         amdgpu_dm_init_color_mod();
1574
1575         if (adev->dm.dc->caps.max_links > 0) {
1576                 adev->dm.vblank_control_workqueue =
1577                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1578                 if (!adev->dm.vblank_control_workqueue)
1579                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1580         }
1581
1582 #ifdef CONFIG_DRM_AMD_DC_HDCP
1583         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1584                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1585
1586                 if (!adev->dm.hdcp_workqueue)
1587                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1588                 else
1589                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1590
1591                 dc_init_callbacks(adev->dm.dc, &init_params);
1592         }
1593 #endif
1594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1595         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1596 #endif
1597         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1598                 init_completion(&adev->dm.dmub_aux_transfer_done);
1599                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1600                 if (!adev->dm.dmub_notify) {
1601                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1602                         goto error;
1603                 }
1604
1605                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1606                 if (!adev->dm.delayed_hpd_wq) {
1607                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1608                         goto error;
1609                 }
1610
1611                 amdgpu_dm_outbox_init(adev);
1612                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1613                         dmub_aux_setconfig_callback, false)) {
1614                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1615                         goto error;
1616                 }
1617                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1618                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1619                         goto error;
1620                 }
1621                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1622                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1623                         goto error;
1624                 }
1625         }
1626
1627         if (amdgpu_dm_initialize_drm_device(adev)) {
1628                 DRM_ERROR(
1629                 "amdgpu: failed to initialize sw for display support.\n");
1630                 goto error;
1631         }
1632
1633         /* create fake encoders for MST */
1634         dm_dp_create_fake_mst_encoders(adev);
1635
1636         /* TODO: Add_display_info? */
1637
1638         /* TODO use dynamic cursor width */
1639         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1640         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1641
1642         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1643                 DRM_ERROR(
1644                 "amdgpu: failed to initialize sw for display support.\n");
1645                 goto error;
1646         }
1647
1648
1649         DRM_DEBUG_DRIVER("KMS initialized.\n");
1650
1651         return 0;
1652 error:
1653         amdgpu_dm_fini(adev);
1654
1655         return -EINVAL;
1656 }
1657
1658 static int amdgpu_dm_early_fini(void *handle)
1659 {
1660         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1661
1662         amdgpu_dm_audio_fini(adev);
1663
1664         return 0;
1665 }
1666
1667 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1668 {
1669         int i;
1670
1671         if (adev->dm.vblank_control_workqueue) {
1672                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1673                 adev->dm.vblank_control_workqueue = NULL;
1674         }
1675
1676         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1677                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1678         }
1679
1680         amdgpu_dm_destroy_drm_device(&adev->dm);
1681
1682 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1683         if (adev->dm.crc_rd_wrk) {
1684                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1685                 kfree(adev->dm.crc_rd_wrk);
1686                 adev->dm.crc_rd_wrk = NULL;
1687         }
1688 #endif
1689 #ifdef CONFIG_DRM_AMD_DC_HDCP
1690         if (adev->dm.hdcp_workqueue) {
1691                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1692                 adev->dm.hdcp_workqueue = NULL;
1693         }
1694
1695         if (adev->dm.dc)
1696                 dc_deinit_callbacks(adev->dm.dc);
1697 #endif
1698
1699         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1700
1701         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1702                 kfree(adev->dm.dmub_notify);
1703                 adev->dm.dmub_notify = NULL;
1704                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1705                 adev->dm.delayed_hpd_wq = NULL;
1706         }
1707
1708         if (adev->dm.dmub_bo)
1709                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1710                                       &adev->dm.dmub_bo_gpu_addr,
1711                                       &adev->dm.dmub_bo_cpu_addr);
1712
1713         if (adev->dm.hpd_rx_offload_wq) {
1714                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1715                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1716                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1717                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1718                         }
1719                 }
1720
1721                 kfree(adev->dm.hpd_rx_offload_wq);
1722                 adev->dm.hpd_rx_offload_wq = NULL;
1723         }
1724
1725         /* DC Destroy TODO: Replace destroy DAL */
1726         if (adev->dm.dc)
1727                 dc_destroy(&adev->dm.dc);
1728         /*
1729          * TODO: pageflip, vlank interrupt
1730          *
1731          * amdgpu_dm_irq_fini(adev);
1732          */
1733
1734         if (adev->dm.cgs_device) {
1735                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1736                 adev->dm.cgs_device = NULL;
1737         }
1738         if (adev->dm.freesync_module) {
1739                 mod_freesync_destroy(adev->dm.freesync_module);
1740                 adev->dm.freesync_module = NULL;
1741         }
1742
1743         mutex_destroy(&adev->dm.audio_lock);
1744         mutex_destroy(&adev->dm.dc_lock);
1745
1746         return;
1747 }
1748
1749 static int load_dmcu_fw(struct amdgpu_device *adev)
1750 {
1751         const char *fw_name_dmcu = NULL;
1752         int r;
1753         const struct dmcu_firmware_header_v1_0 *hdr;
1754
1755         switch(adev->asic_type) {
1756 #if defined(CONFIG_DRM_AMD_DC_SI)
1757         case CHIP_TAHITI:
1758         case CHIP_PITCAIRN:
1759         case CHIP_VERDE:
1760         case CHIP_OLAND:
1761 #endif
1762         case CHIP_BONAIRE:
1763         case CHIP_HAWAII:
1764         case CHIP_KAVERI:
1765         case CHIP_KABINI:
1766         case CHIP_MULLINS:
1767         case CHIP_TONGA:
1768         case CHIP_FIJI:
1769         case CHIP_CARRIZO:
1770         case CHIP_STONEY:
1771         case CHIP_POLARIS11:
1772         case CHIP_POLARIS10:
1773         case CHIP_POLARIS12:
1774         case CHIP_VEGAM:
1775         case CHIP_VEGA10:
1776         case CHIP_VEGA12:
1777         case CHIP_VEGA20:
1778                 return 0;
1779         case CHIP_NAVI12:
1780                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1781                 break;
1782         case CHIP_RAVEN:
1783                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1784                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1785                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1786                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787                 else
1788                         return 0;
1789                 break;
1790         default:
1791                 switch (adev->ip_versions[DCE_HWIP][0]) {
1792                 case IP_VERSION(2, 0, 2):
1793                 case IP_VERSION(2, 0, 3):
1794                 case IP_VERSION(2, 0, 0):
1795                 case IP_VERSION(2, 1, 0):
1796                 case IP_VERSION(3, 0, 0):
1797                 case IP_VERSION(3, 0, 2):
1798                 case IP_VERSION(3, 0, 3):
1799                 case IP_VERSION(3, 0, 1):
1800                 case IP_VERSION(3, 1, 2):
1801                 case IP_VERSION(3, 1, 3):
1802                 case IP_VERSION(3, 1, 5):
1803                 case IP_VERSION(3, 1, 6):
1804                         return 0;
1805                 default:
1806                         break;
1807                 }
1808                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1809                 return -EINVAL;
1810         }
1811
1812         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1813                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1814                 return 0;
1815         }
1816
1817         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1818         if (r == -ENOENT) {
1819                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1820                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1821                 adev->dm.fw_dmcu = NULL;
1822                 return 0;
1823         }
1824         if (r) {
1825                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1826                         fw_name_dmcu);
1827                 return r;
1828         }
1829
1830         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1831         if (r) {
1832                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1833                         fw_name_dmcu);
1834                 release_firmware(adev->dm.fw_dmcu);
1835                 adev->dm.fw_dmcu = NULL;
1836                 return r;
1837         }
1838
1839         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1840         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1841         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1842         adev->firmware.fw_size +=
1843                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1844
1845         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1846         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1847         adev->firmware.fw_size +=
1848                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1849
1850         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1851
1852         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1853
1854         return 0;
1855 }
1856
1857 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1858 {
1859         struct amdgpu_device *adev = ctx;
1860
1861         return dm_read_reg(adev->dm.dc->ctx, address);
1862 }
1863
1864 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1865                                      uint32_t value)
1866 {
1867         struct amdgpu_device *adev = ctx;
1868
1869         return dm_write_reg(adev->dm.dc->ctx, address, value);
1870 }
1871
1872 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1873 {
1874         struct dmub_srv_create_params create_params;
1875         struct dmub_srv_region_params region_params;
1876         struct dmub_srv_region_info region_info;
1877         struct dmub_srv_fb_params fb_params;
1878         struct dmub_srv_fb_info *fb_info;
1879         struct dmub_srv *dmub_srv;
1880         const struct dmcub_firmware_header_v1_0 *hdr;
1881         const char *fw_name_dmub;
1882         enum dmub_asic dmub_asic;
1883         enum dmub_status status;
1884         int r;
1885
1886         switch (adev->ip_versions[DCE_HWIP][0]) {
1887         case IP_VERSION(2, 1, 0):
1888                 dmub_asic = DMUB_ASIC_DCN21;
1889                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1890                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1891                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1892                 break;
1893         case IP_VERSION(3, 0, 0):
1894                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1895                         dmub_asic = DMUB_ASIC_DCN30;
1896                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1897                 } else {
1898                         dmub_asic = DMUB_ASIC_DCN30;
1899                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1900                 }
1901                 break;
1902         case IP_VERSION(3, 0, 1):
1903                 dmub_asic = DMUB_ASIC_DCN301;
1904                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1905                 break;
1906         case IP_VERSION(3, 0, 2):
1907                 dmub_asic = DMUB_ASIC_DCN302;
1908                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1909                 break;
1910         case IP_VERSION(3, 0, 3):
1911                 dmub_asic = DMUB_ASIC_DCN303;
1912                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1913                 break;
1914         case IP_VERSION(3, 1, 2):
1915         case IP_VERSION(3, 1, 3):
1916                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1917                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1918                 break;
1919         case IP_VERSION(3, 1, 5):
1920                 dmub_asic = DMUB_ASIC_DCN315;
1921                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1922                 break;
1923         case IP_VERSION(3, 1, 6):
1924                 dmub_asic = DMUB_ASIC_DCN316;
1925                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1926                 break;
1927         default:
1928                 /* ASIC doesn't support DMUB. */
1929                 return 0;
1930         }
1931
1932         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1933         if (r) {
1934                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1935                 return 0;
1936         }
1937
1938         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1939         if (r) {
1940                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1941                 return 0;
1942         }
1943
1944         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1945         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1946
1947         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1948                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1949                         AMDGPU_UCODE_ID_DMCUB;
1950                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1951                         adev->dm.dmub_fw;
1952                 adev->firmware.fw_size +=
1953                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1954
1955                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1956                          adev->dm.dmcub_fw_version);
1957         }
1958
1959
1960         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1961         dmub_srv = adev->dm.dmub_srv;
1962
1963         if (!dmub_srv) {
1964                 DRM_ERROR("Failed to allocate DMUB service!\n");
1965                 return -ENOMEM;
1966         }
1967
1968         memset(&create_params, 0, sizeof(create_params));
1969         create_params.user_ctx = adev;
1970         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1971         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1972         create_params.asic = dmub_asic;
1973
1974         /* Create the DMUB service. */
1975         status = dmub_srv_create(dmub_srv, &create_params);
1976         if (status != DMUB_STATUS_OK) {
1977                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1978                 return -EINVAL;
1979         }
1980
1981         /* Calculate the size of all the regions for the DMUB service. */
1982         memset(&region_params, 0, sizeof(region_params));
1983
1984         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1985                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1986         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1987         region_params.vbios_size = adev->bios_size;
1988         region_params.fw_bss_data = region_params.bss_data_size ?
1989                 adev->dm.dmub_fw->data +
1990                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1991                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1992         region_params.fw_inst_const =
1993                 adev->dm.dmub_fw->data +
1994                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1995                 PSP_HEADER_BYTES;
1996
1997         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1998                                            &region_info);
1999
2000         if (status != DMUB_STATUS_OK) {
2001                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2002                 return -EINVAL;
2003         }
2004
2005         /*
2006          * Allocate a framebuffer based on the total size of all the regions.
2007          * TODO: Move this into GART.
2008          */
2009         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2010                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2011                                     &adev->dm.dmub_bo_gpu_addr,
2012                                     &adev->dm.dmub_bo_cpu_addr);
2013         if (r)
2014                 return r;
2015
2016         /* Rebase the regions on the framebuffer address. */
2017         memset(&fb_params, 0, sizeof(fb_params));
2018         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2019         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2020         fb_params.region_info = &region_info;
2021
2022         adev->dm.dmub_fb_info =
2023                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2024         fb_info = adev->dm.dmub_fb_info;
2025
2026         if (!fb_info) {
2027                 DRM_ERROR(
2028                         "Failed to allocate framebuffer info for DMUB service!\n");
2029                 return -ENOMEM;
2030         }
2031
2032         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2033         if (status != DMUB_STATUS_OK) {
2034                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2035                 return -EINVAL;
2036         }
2037
2038         return 0;
2039 }
2040
2041 static int dm_sw_init(void *handle)
2042 {
2043         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2044         int r;
2045
2046         r = dm_dmub_sw_init(adev);
2047         if (r)
2048                 return r;
2049
2050         return load_dmcu_fw(adev);
2051 }
2052
2053 static int dm_sw_fini(void *handle)
2054 {
2055         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2056
2057         kfree(adev->dm.dmub_fb_info);
2058         adev->dm.dmub_fb_info = NULL;
2059
2060         if (adev->dm.dmub_srv) {
2061                 dmub_srv_destroy(adev->dm.dmub_srv);
2062                 adev->dm.dmub_srv = NULL;
2063         }
2064
2065         release_firmware(adev->dm.dmub_fw);
2066         adev->dm.dmub_fw = NULL;
2067
2068         release_firmware(adev->dm.fw_dmcu);
2069         adev->dm.fw_dmcu = NULL;
2070
2071         return 0;
2072 }
2073
2074 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2075 {
2076         struct amdgpu_dm_connector *aconnector;
2077         struct drm_connector *connector;
2078         struct drm_connector_list_iter iter;
2079         int ret = 0;
2080
2081         drm_connector_list_iter_begin(dev, &iter);
2082         drm_for_each_connector_iter(connector, &iter) {
2083                 aconnector = to_amdgpu_dm_connector(connector);
2084                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2085                     aconnector->mst_mgr.aux) {
2086                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2087                                          aconnector,
2088                                          aconnector->base.base.id);
2089
2090                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2091                         if (ret < 0) {
2092                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2093                                 aconnector->dc_link->type =
2094                                         dc_connection_single;
2095                                 break;
2096                         }
2097                 }
2098         }
2099         drm_connector_list_iter_end(&iter);
2100
2101         return ret;
2102 }
2103
2104 static int dm_late_init(void *handle)
2105 {
2106         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2107
2108         struct dmcu_iram_parameters params;
2109         unsigned int linear_lut[16];
2110         int i;
2111         struct dmcu *dmcu = NULL;
2112
2113         dmcu = adev->dm.dc->res_pool->dmcu;
2114
2115         for (i = 0; i < 16; i++)
2116                 linear_lut[i] = 0xFFFF * i / 15;
2117
2118         params.set = 0;
2119         params.backlight_ramping_override = false;
2120         params.backlight_ramping_start = 0xCCCC;
2121         params.backlight_ramping_reduction = 0xCCCCCCCC;
2122         params.backlight_lut_array_size = 16;
2123         params.backlight_lut_array = linear_lut;
2124
2125         /* Min backlight level after ABM reduction,  Don't allow below 1%
2126          * 0xFFFF x 0.01 = 0x28F
2127          */
2128         params.min_abm_backlight = 0x28F;
2129         /* In the case where abm is implemented on dmcub,
2130         * dmcu object will be null.
2131         * ABM 2.4 and up are implemented on dmcub.
2132         */
2133         if (dmcu) {
2134                 if (!dmcu_load_iram(dmcu, params))
2135                         return -EINVAL;
2136         } else if (adev->dm.dc->ctx->dmub_srv) {
2137                 struct dc_link *edp_links[MAX_NUM_EDP];
2138                 int edp_num;
2139
2140                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2141                 for (i = 0; i < edp_num; i++) {
2142                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2143                                 return -EINVAL;
2144                 }
2145         }
2146
2147         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2148 }
2149
2150 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2151 {
2152         struct amdgpu_dm_connector *aconnector;
2153         struct drm_connector *connector;
2154         struct drm_connector_list_iter iter;
2155         struct drm_dp_mst_topology_mgr *mgr;
2156         int ret;
2157         bool need_hotplug = false;
2158
2159         drm_connector_list_iter_begin(dev, &iter);
2160         drm_for_each_connector_iter(connector, &iter) {
2161                 aconnector = to_amdgpu_dm_connector(connector);
2162                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2163                     aconnector->mst_port)
2164                         continue;
2165
2166                 mgr = &aconnector->mst_mgr;
2167
2168                 if (suspend) {
2169                         drm_dp_mst_topology_mgr_suspend(mgr);
2170                 } else {
2171                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2172                         if (ret < 0) {
2173                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2174                                 need_hotplug = true;
2175                         }
2176                 }
2177         }
2178         drm_connector_list_iter_end(&iter);
2179
2180         if (need_hotplug)
2181                 drm_kms_helper_hotplug_event(dev);
2182 }
2183
2184 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2185 {
2186         int ret = 0;
2187
2188         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2189          * on window driver dc implementation.
2190          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2191          * should be passed to smu during boot up and resume from s3.
2192          * boot up: dc calculate dcn watermark clock settings within dc_create,
2193          * dcn20_resource_construct
2194          * then call pplib functions below to pass the settings to smu:
2195          * smu_set_watermarks_for_clock_ranges
2196          * smu_set_watermarks_table
2197          * navi10_set_watermarks_table
2198          * smu_write_watermarks_table
2199          *
2200          * For Renoir, clock settings of dcn watermark are also fixed values.
2201          * dc has implemented different flow for window driver:
2202          * dc_hardware_init / dc_set_power_state
2203          * dcn10_init_hw
2204          * notify_wm_ranges
2205          * set_wm_ranges
2206          * -- Linux
2207          * smu_set_watermarks_for_clock_ranges
2208          * renoir_set_watermarks_table
2209          * smu_write_watermarks_table
2210          *
2211          * For Linux,
2212          * dc_hardware_init -> amdgpu_dm_init
2213          * dc_set_power_state --> dm_resume
2214          *
2215          * therefore, this function apply to navi10/12/14 but not Renoir
2216          * *
2217          */
2218         switch (adev->ip_versions[DCE_HWIP][0]) {
2219         case IP_VERSION(2, 0, 2):
2220         case IP_VERSION(2, 0, 0):
2221                 break;
2222         default:
2223                 return 0;
2224         }
2225
2226         ret = amdgpu_dpm_write_watermarks_table(adev);
2227         if (ret) {
2228                 DRM_ERROR("Failed to update WMTABLE!\n");
2229                 return ret;
2230         }
2231
2232         return 0;
2233 }
2234
2235 /**
2236  * dm_hw_init() - Initialize DC device
2237  * @handle: The base driver device containing the amdgpu_dm device.
2238  *
2239  * Initialize the &struct amdgpu_display_manager device. This involves calling
2240  * the initializers of each DM component, then populating the struct with them.
2241  *
2242  * Although the function implies hardware initialization, both hardware and
2243  * software are initialized here. Splitting them out to their relevant init
2244  * hooks is a future TODO item.
2245  *
2246  * Some notable things that are initialized here:
2247  *
2248  * - Display Core, both software and hardware
2249  * - DC modules that we need (freesync and color management)
2250  * - DRM software states
2251  * - Interrupt sources and handlers
2252  * - Vblank support
2253  * - Debug FS entries, if enabled
2254  */
2255 static int dm_hw_init(void *handle)
2256 {
2257         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2258         /* Create DAL display manager */
2259         amdgpu_dm_init(adev);
2260         amdgpu_dm_hpd_init(adev);
2261
2262         return 0;
2263 }
2264
2265 /**
2266  * dm_hw_fini() - Teardown DC device
2267  * @handle: The base driver device containing the amdgpu_dm device.
2268  *
2269  * Teardown components within &struct amdgpu_display_manager that require
2270  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2271  * were loaded. Also flush IRQ workqueues and disable them.
2272  */
2273 static int dm_hw_fini(void *handle)
2274 {
2275         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2276
2277         amdgpu_dm_hpd_fini(adev);
2278
2279         amdgpu_dm_irq_fini(adev);
2280         amdgpu_dm_fini(adev);
2281         return 0;
2282 }
2283
2284
2285 static int dm_enable_vblank(struct drm_crtc *crtc);
2286 static void dm_disable_vblank(struct drm_crtc *crtc);
2287
2288 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2289                                  struct dc_state *state, bool enable)
2290 {
2291         enum dc_irq_source irq_source;
2292         struct amdgpu_crtc *acrtc;
2293         int rc = -EBUSY;
2294         int i = 0;
2295
2296         for (i = 0; i < state->stream_count; i++) {
2297                 acrtc = get_crtc_by_otg_inst(
2298                                 adev, state->stream_status[i].primary_otg_inst);
2299
2300                 if (acrtc && state->stream_status[i].plane_count != 0) {
2301                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2302                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2303                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2304                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2305                         if (rc)
2306                                 DRM_WARN("Failed to %s pflip interrupts\n",
2307                                          enable ? "enable" : "disable");
2308
2309                         if (enable) {
2310                                 rc = dm_enable_vblank(&acrtc->base);
2311                                 if (rc)
2312                                         DRM_WARN("Failed to enable vblank interrupts\n");
2313                         } else {
2314                                 dm_disable_vblank(&acrtc->base);
2315                         }
2316
2317                 }
2318         }
2319
2320 }
2321
2322 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2323 {
2324         struct dc_state *context = NULL;
2325         enum dc_status res = DC_ERROR_UNEXPECTED;
2326         int i;
2327         struct dc_stream_state *del_streams[MAX_PIPES];
2328         int del_streams_count = 0;
2329
2330         memset(del_streams, 0, sizeof(del_streams));
2331
2332         context = dc_create_state(dc);
2333         if (context == NULL)
2334                 goto context_alloc_fail;
2335
2336         dc_resource_state_copy_construct_current(dc, context);
2337
2338         /* First remove from context all streams */
2339         for (i = 0; i < context->stream_count; i++) {
2340                 struct dc_stream_state *stream = context->streams[i];
2341
2342                 del_streams[del_streams_count++] = stream;
2343         }
2344
2345         /* Remove all planes for removed streams and then remove the streams */
2346         for (i = 0; i < del_streams_count; i++) {
2347                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2348                         res = DC_FAIL_DETACH_SURFACES;
2349                         goto fail;
2350                 }
2351
2352                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2353                 if (res != DC_OK)
2354                         goto fail;
2355         }
2356
2357         res = dc_commit_state(dc, context);
2358
2359 fail:
2360         dc_release_state(context);
2361
2362 context_alloc_fail:
2363         return res;
2364 }
2365
2366 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2367 {
2368         int i;
2369
2370         if (dm->hpd_rx_offload_wq) {
2371                 for (i = 0; i < dm->dc->caps.max_links; i++)
2372                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2373         }
2374 }
2375
2376 static int dm_suspend(void *handle)
2377 {
2378         struct amdgpu_device *adev = handle;
2379         struct amdgpu_display_manager *dm = &adev->dm;
2380         int ret = 0;
2381
2382         if (amdgpu_in_reset(adev)) {
2383                 mutex_lock(&dm->dc_lock);
2384
2385                 dc_allow_idle_optimizations(adev->dm.dc, false);
2386
2387                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2388
2389                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2390
2391                 amdgpu_dm_commit_zero_streams(dm->dc);
2392
2393                 amdgpu_dm_irq_suspend(adev);
2394
2395                 hpd_rx_irq_work_suspend(dm);
2396
2397                 return ret;
2398         }
2399
2400         WARN_ON(adev->dm.cached_state);
2401         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2402
2403         s3_handle_mst(adev_to_drm(adev), true);
2404
2405         amdgpu_dm_irq_suspend(adev);
2406
2407         hpd_rx_irq_work_suspend(dm);
2408
2409         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2410
2411         return 0;
2412 }
2413
2414 struct amdgpu_dm_connector *
2415 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2416                                              struct drm_crtc *crtc)
2417 {
2418         uint32_t i;
2419         struct drm_connector_state *new_con_state;
2420         struct drm_connector *connector;
2421         struct drm_crtc *crtc_from_state;
2422
2423         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2424                 crtc_from_state = new_con_state->crtc;
2425
2426                 if (crtc_from_state == crtc)
2427                         return to_amdgpu_dm_connector(connector);
2428         }
2429
2430         return NULL;
2431 }
2432
2433 static void emulated_link_detect(struct dc_link *link)
2434 {
2435         struct dc_sink_init_data sink_init_data = { 0 };
2436         struct display_sink_capability sink_caps = { 0 };
2437         enum dc_edid_status edid_status;
2438         struct dc_context *dc_ctx = link->ctx;
2439         struct dc_sink *sink = NULL;
2440         struct dc_sink *prev_sink = NULL;
2441
2442         link->type = dc_connection_none;
2443         prev_sink = link->local_sink;
2444
2445         if (prev_sink)
2446                 dc_sink_release(prev_sink);
2447
2448         switch (link->connector_signal) {
2449         case SIGNAL_TYPE_HDMI_TYPE_A: {
2450                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2451                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2452                 break;
2453         }
2454
2455         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2456                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2457                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2458                 break;
2459         }
2460
2461         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2462                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2463                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2464                 break;
2465         }
2466
2467         case SIGNAL_TYPE_LVDS: {
2468                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2469                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2470                 break;
2471         }
2472
2473         case SIGNAL_TYPE_EDP: {
2474                 sink_caps.transaction_type =
2475                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2476                 sink_caps.signal = SIGNAL_TYPE_EDP;
2477                 break;
2478         }
2479
2480         case SIGNAL_TYPE_DISPLAY_PORT: {
2481                 sink_caps.transaction_type =
2482                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2483                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2484                 break;
2485         }
2486
2487         default:
2488                 DC_ERROR("Invalid connector type! signal:%d\n",
2489                         link->connector_signal);
2490                 return;
2491         }
2492
2493         sink_init_data.link = link;
2494         sink_init_data.sink_signal = sink_caps.signal;
2495
2496         sink = dc_sink_create(&sink_init_data);
2497         if (!sink) {
2498                 DC_ERROR("Failed to create sink!\n");
2499                 return;
2500         }
2501
2502         /* dc_sink_create returns a new reference */
2503         link->local_sink = sink;
2504
2505         edid_status = dm_helpers_read_local_edid(
2506                         link->ctx,
2507                         link,
2508                         sink);
2509
2510         if (edid_status != EDID_OK)
2511                 DC_ERROR("Failed to read EDID");
2512
2513 }
2514
2515 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2516                                      struct amdgpu_display_manager *dm)
2517 {
2518         struct {
2519                 struct dc_surface_update surface_updates[MAX_SURFACES];
2520                 struct dc_plane_info plane_infos[MAX_SURFACES];
2521                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2522                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2523                 struct dc_stream_update stream_update;
2524         } * bundle;
2525         int k, m;
2526
2527         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2528
2529         if (!bundle) {
2530                 dm_error("Failed to allocate update bundle\n");
2531                 goto cleanup;
2532         }
2533
2534         for (k = 0; k < dc_state->stream_count; k++) {
2535                 bundle->stream_update.stream = dc_state->streams[k];
2536
2537                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2538                         bundle->surface_updates[m].surface =
2539                                 dc_state->stream_status->plane_states[m];
2540                         bundle->surface_updates[m].surface->force_full_update =
2541                                 true;
2542                 }
2543                 dc_commit_updates_for_stream(
2544                         dm->dc, bundle->surface_updates,
2545                         dc_state->stream_status->plane_count,
2546                         dc_state->streams[k], &bundle->stream_update, dc_state);
2547         }
2548
2549 cleanup:
2550         kfree(bundle);
2551
2552         return;
2553 }
2554
2555 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2556 {
2557         struct dc_stream_state *stream_state;
2558         struct amdgpu_dm_connector *aconnector = link->priv;
2559         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2560         struct dc_stream_update stream_update;
2561         bool dpms_off = true;
2562
2563         memset(&stream_update, 0, sizeof(stream_update));
2564         stream_update.dpms_off = &dpms_off;
2565
2566         mutex_lock(&adev->dm.dc_lock);
2567         stream_state = dc_stream_find_from_link(link);
2568
2569         if (stream_state == NULL) {
2570                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2571                 mutex_unlock(&adev->dm.dc_lock);
2572                 return;
2573         }
2574
2575         stream_update.stream = stream_state;
2576         acrtc_state->force_dpms_off = true;
2577         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2578                                      stream_state, &stream_update,
2579                                      stream_state->ctx->dc->current_state);
2580         mutex_unlock(&adev->dm.dc_lock);
2581 }
2582
2583 static int dm_resume(void *handle)
2584 {
2585         struct amdgpu_device *adev = handle;
2586         struct drm_device *ddev = adev_to_drm(adev);
2587         struct amdgpu_display_manager *dm = &adev->dm;
2588         struct amdgpu_dm_connector *aconnector;
2589         struct drm_connector *connector;
2590         struct drm_connector_list_iter iter;
2591         struct drm_crtc *crtc;
2592         struct drm_crtc_state *new_crtc_state;
2593         struct dm_crtc_state *dm_new_crtc_state;
2594         struct drm_plane *plane;
2595         struct drm_plane_state *new_plane_state;
2596         struct dm_plane_state *dm_new_plane_state;
2597         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2598         enum dc_connection_type new_connection_type = dc_connection_none;
2599         struct dc_state *dc_state;
2600         int i, r, j;
2601
2602         if (amdgpu_in_reset(adev)) {
2603                 dc_state = dm->cached_dc_state;
2604
2605                 /*
2606                  * The dc->current_state is backed up into dm->cached_dc_state
2607                  * before we commit 0 streams.
2608                  *
2609                  * DC will clear link encoder assignments on the real state
2610                  * but the changes won't propagate over to the copy we made
2611                  * before the 0 streams commit.
2612                  *
2613                  * DC expects that link encoder assignments are *not* valid
2614                  * when committing a state, so as a workaround we can copy
2615                  * off of the current state.
2616                  *
2617                  * We lose the previous assignments, but we had already
2618                  * commit 0 streams anyway.
2619                  */
2620                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2621
2622                 if (dc_enable_dmub_notifications(adev->dm.dc))
2623                         amdgpu_dm_outbox_init(adev);
2624
2625                 r = dm_dmub_hw_init(adev);
2626                 if (r)
2627                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2628
2629                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2630                 dc_resume(dm->dc);
2631
2632                 amdgpu_dm_irq_resume_early(adev);
2633
2634                 for (i = 0; i < dc_state->stream_count; i++) {
2635                         dc_state->streams[i]->mode_changed = true;
2636                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2637                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2638                                         = 0xffffffff;
2639                         }
2640                 }
2641
2642                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2643
2644                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2645
2646                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2647
2648                 dc_release_state(dm->cached_dc_state);
2649                 dm->cached_dc_state = NULL;
2650
2651                 amdgpu_dm_irq_resume_late(adev);
2652
2653                 mutex_unlock(&dm->dc_lock);
2654
2655                 return 0;
2656         }
2657         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2658         dc_release_state(dm_state->context);
2659         dm_state->context = dc_create_state(dm->dc);
2660         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2661         dc_resource_state_construct(dm->dc, dm_state->context);
2662
2663         /* Re-enable outbox interrupts for DPIA. */
2664         if (dc_enable_dmub_notifications(adev->dm.dc))
2665                 amdgpu_dm_outbox_init(adev);
2666
2667         /* Before powering on DC we need to re-initialize DMUB. */
2668         dm_dmub_hw_resume(adev);
2669
2670         /* power on hardware */
2671         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2672
2673         /* program HPD filter */
2674         dc_resume(dm->dc);
2675
2676         /*
2677          * early enable HPD Rx IRQ, should be done before set mode as short
2678          * pulse interrupts are used for MST
2679          */
2680         amdgpu_dm_irq_resume_early(adev);
2681
2682         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2683         s3_handle_mst(ddev, false);
2684
2685         /* Do detection*/
2686         drm_connector_list_iter_begin(ddev, &iter);
2687         drm_for_each_connector_iter(connector, &iter) {
2688                 aconnector = to_amdgpu_dm_connector(connector);
2689
2690                 /*
2691                  * this is the case when traversing through already created
2692                  * MST connectors, should be skipped
2693                  */
2694                 if (aconnector->dc_link &&
2695                     aconnector->dc_link->type == dc_connection_mst_branch)
2696                         continue;
2697
2698                 mutex_lock(&aconnector->hpd_lock);
2699                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2700                         DRM_ERROR("KMS: Failed to detect connector\n");
2701
2702                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2703                         emulated_link_detect(aconnector->dc_link);
2704                 else
2705                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2706
2707                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2708                         aconnector->fake_enable = false;
2709
2710                 if (aconnector->dc_sink)
2711                         dc_sink_release(aconnector->dc_sink);
2712                 aconnector->dc_sink = NULL;
2713                 amdgpu_dm_update_connector_after_detect(aconnector);
2714                 mutex_unlock(&aconnector->hpd_lock);
2715         }
2716         drm_connector_list_iter_end(&iter);
2717
2718         /* Force mode set in atomic commit */
2719         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2720                 new_crtc_state->active_changed = true;
2721
2722         /*
2723          * atomic_check is expected to create the dc states. We need to release
2724          * them here, since they were duplicated as part of the suspend
2725          * procedure.
2726          */
2727         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2728                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2729                 if (dm_new_crtc_state->stream) {
2730                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2731                         dc_stream_release(dm_new_crtc_state->stream);
2732                         dm_new_crtc_state->stream = NULL;
2733                 }
2734         }
2735
2736         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2737                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2738                 if (dm_new_plane_state->dc_state) {
2739                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2740                         dc_plane_state_release(dm_new_plane_state->dc_state);
2741                         dm_new_plane_state->dc_state = NULL;
2742                 }
2743         }
2744
2745         drm_atomic_helper_resume(ddev, dm->cached_state);
2746
2747         dm->cached_state = NULL;
2748
2749         amdgpu_dm_irq_resume_late(adev);
2750
2751         amdgpu_dm_smu_write_watermarks_table(adev);
2752
2753         return 0;
2754 }
2755
2756 /**
2757  * DOC: DM Lifecycle
2758  *
2759  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2760  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2761  * the base driver's device list to be initialized and torn down accordingly.
2762  *
2763  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2764  */
2765
2766 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2767         .name = "dm",
2768         .early_init = dm_early_init,
2769         .late_init = dm_late_init,
2770         .sw_init = dm_sw_init,
2771         .sw_fini = dm_sw_fini,
2772         .early_fini = amdgpu_dm_early_fini,
2773         .hw_init = dm_hw_init,
2774         .hw_fini = dm_hw_fini,
2775         .suspend = dm_suspend,
2776         .resume = dm_resume,
2777         .is_idle = dm_is_idle,
2778         .wait_for_idle = dm_wait_for_idle,
2779         .check_soft_reset = dm_check_soft_reset,
2780         .soft_reset = dm_soft_reset,
2781         .set_clockgating_state = dm_set_clockgating_state,
2782         .set_powergating_state = dm_set_powergating_state,
2783 };
2784
2785 const struct amdgpu_ip_block_version dm_ip_block =
2786 {
2787         .type = AMD_IP_BLOCK_TYPE_DCE,
2788         .major = 1,
2789         .minor = 0,
2790         .rev = 0,
2791         .funcs = &amdgpu_dm_funcs,
2792 };
2793
2794
2795 /**
2796  * DOC: atomic
2797  *
2798  * *WIP*
2799  */
2800
2801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2802         .fb_create = amdgpu_display_user_framebuffer_create,
2803         .get_format_info = amd_get_format_info,
2804         .output_poll_changed = drm_fb_helper_output_poll_changed,
2805         .atomic_check = amdgpu_dm_atomic_check,
2806         .atomic_commit = drm_atomic_helper_commit,
2807 };
2808
2809 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2810         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2811 };
2812
2813 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2814 {
2815         u32 max_cll, min_cll, max, min, q, r;
2816         struct amdgpu_dm_backlight_caps *caps;
2817         struct amdgpu_display_manager *dm;
2818         struct drm_connector *conn_base;
2819         struct amdgpu_device *adev;
2820         struct dc_link *link = NULL;
2821         static const u8 pre_computed_values[] = {
2822                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2823                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2824         int i;
2825
2826         if (!aconnector || !aconnector->dc_link)
2827                 return;
2828
2829         link = aconnector->dc_link;
2830         if (link->connector_signal != SIGNAL_TYPE_EDP)
2831                 return;
2832
2833         conn_base = &aconnector->base;
2834         adev = drm_to_adev(conn_base->dev);
2835         dm = &adev->dm;
2836         for (i = 0; i < dm->num_of_edps; i++) {
2837                 if (link == dm->backlight_link[i])
2838                         break;
2839         }
2840         if (i >= dm->num_of_edps)
2841                 return;
2842         caps = &dm->backlight_caps[i];
2843         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2844         caps->aux_support = false;
2845         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2846         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2847
2848         if (caps->ext_caps->bits.oled == 1 /*||
2849             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2850             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2851                 caps->aux_support = true;
2852
2853         if (amdgpu_backlight == 0)
2854                 caps->aux_support = false;
2855         else if (amdgpu_backlight == 1)
2856                 caps->aux_support = true;
2857
2858         /* From the specification (CTA-861-G), for calculating the maximum
2859          * luminance we need to use:
2860          *      Luminance = 50*2**(CV/32)
2861          * Where CV is a one-byte value.
2862          * For calculating this expression we may need float point precision;
2863          * to avoid this complexity level, we take advantage that CV is divided
2864          * by a constant. From the Euclids division algorithm, we know that CV
2865          * can be written as: CV = 32*q + r. Next, we replace CV in the
2866          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2867          * need to pre-compute the value of r/32. For pre-computing the values
2868          * We just used the following Ruby line:
2869          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2870          * The results of the above expressions can be verified at
2871          * pre_computed_values.
2872          */
2873         q = max_cll >> 5;
2874         r = max_cll % 32;
2875         max = (1 << q) * pre_computed_values[r];
2876
2877         // min luminance: maxLum * (CV/255)^2 / 100
2878         q = DIV_ROUND_CLOSEST(min_cll, 255);
2879         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2880
2881         caps->aux_max_input_signal = max;
2882         caps->aux_min_input_signal = min;
2883 }
2884
2885 void amdgpu_dm_update_connector_after_detect(
2886                 struct amdgpu_dm_connector *aconnector)
2887 {
2888         struct drm_connector *connector = &aconnector->base;
2889         struct drm_device *dev = connector->dev;
2890         struct dc_sink *sink;
2891
2892         /* MST handled by drm_mst framework */
2893         if (aconnector->mst_mgr.mst_state == true)
2894                 return;
2895
2896         sink = aconnector->dc_link->local_sink;
2897         if (sink)
2898                 dc_sink_retain(sink);
2899
2900         /*
2901          * Edid mgmt connector gets first update only in mode_valid hook and then
2902          * the connector sink is set to either fake or physical sink depends on link status.
2903          * Skip if already done during boot.
2904          */
2905         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2906                         && aconnector->dc_em_sink) {
2907
2908                 /*
2909                  * For S3 resume with headless use eml_sink to fake stream
2910                  * because on resume connector->sink is set to NULL
2911                  */
2912                 mutex_lock(&dev->mode_config.mutex);
2913
2914                 if (sink) {
2915                         if (aconnector->dc_sink) {
2916                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2917                                 /*
2918                                  * retain and release below are used to
2919                                  * bump up refcount for sink because the link doesn't point
2920                                  * to it anymore after disconnect, so on next crtc to connector
2921                                  * reshuffle by UMD we will get into unwanted dc_sink release
2922                                  */
2923                                 dc_sink_release(aconnector->dc_sink);
2924                         }
2925                         aconnector->dc_sink = sink;
2926                         dc_sink_retain(aconnector->dc_sink);
2927                         amdgpu_dm_update_freesync_caps(connector,
2928                                         aconnector->edid);
2929                 } else {
2930                         amdgpu_dm_update_freesync_caps(connector, NULL);
2931                         if (!aconnector->dc_sink) {
2932                                 aconnector->dc_sink = aconnector->dc_em_sink;
2933                                 dc_sink_retain(aconnector->dc_sink);
2934                         }
2935                 }
2936
2937                 mutex_unlock(&dev->mode_config.mutex);
2938
2939                 if (sink)
2940                         dc_sink_release(sink);
2941                 return;
2942         }
2943
2944         /*
2945          * TODO: temporary guard to look for proper fix
2946          * if this sink is MST sink, we should not do anything
2947          */
2948         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2949                 dc_sink_release(sink);
2950                 return;
2951         }
2952
2953         if (aconnector->dc_sink == sink) {
2954                 /*
2955                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2956                  * Do nothing!!
2957                  */
2958                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2959                                 aconnector->connector_id);
2960                 if (sink)
2961                         dc_sink_release(sink);
2962                 return;
2963         }
2964
2965         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2966                 aconnector->connector_id, aconnector->dc_sink, sink);
2967
2968         mutex_lock(&dev->mode_config.mutex);
2969
2970         /*
2971          * 1. Update status of the drm connector
2972          * 2. Send an event and let userspace tell us what to do
2973          */
2974         if (sink) {
2975                 /*
2976                  * TODO: check if we still need the S3 mode update workaround.
2977                  * If yes, put it here.
2978                  */
2979                 if (aconnector->dc_sink) {
2980                         amdgpu_dm_update_freesync_caps(connector, NULL);
2981                         dc_sink_release(aconnector->dc_sink);
2982                 }
2983
2984                 aconnector->dc_sink = sink;
2985                 dc_sink_retain(aconnector->dc_sink);
2986                 if (sink->dc_edid.length == 0) {
2987                         aconnector->edid = NULL;
2988                         if (aconnector->dc_link->aux_mode) {
2989                                 drm_dp_cec_unset_edid(
2990                                         &aconnector->dm_dp_aux.aux);
2991                         }
2992                 } else {
2993                         aconnector->edid =
2994                                 (struct edid *)sink->dc_edid.raw_edid;
2995
2996                         if (aconnector->dc_link->aux_mode)
2997                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2998                                                     aconnector->edid);
2999                 }
3000
3001                 drm_connector_update_edid_property(connector, aconnector->edid);
3002                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3003                 update_connector_ext_caps(aconnector);
3004         } else {
3005                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3006                 amdgpu_dm_update_freesync_caps(connector, NULL);
3007                 drm_connector_update_edid_property(connector, NULL);
3008                 aconnector->num_modes = 0;
3009                 dc_sink_release(aconnector->dc_sink);
3010                 aconnector->dc_sink = NULL;
3011                 aconnector->edid = NULL;
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3014                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3015                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3016 #endif
3017         }
3018
3019         mutex_unlock(&dev->mode_config.mutex);
3020
3021         update_subconnector_property(aconnector);
3022
3023         if (sink)
3024                 dc_sink_release(sink);
3025 }
3026
3027 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3028 {
3029         struct drm_connector *connector = &aconnector->base;
3030         struct drm_device *dev = connector->dev;
3031         enum dc_connection_type new_connection_type = dc_connection_none;
3032         struct amdgpu_device *adev = drm_to_adev(dev);
3033         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3034         struct dm_crtc_state *dm_crtc_state = NULL;
3035
3036         if (adev->dm.disable_hpd_irq)
3037                 return;
3038
3039         if (dm_con_state->base.state && dm_con_state->base.crtc)
3040                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3041                                         dm_con_state->base.state,
3042                                         dm_con_state->base.crtc));
3043         /*
3044          * In case of failure or MST no need to update connector status or notify the OS
3045          * since (for MST case) MST does this in its own context.
3046          */
3047         mutex_lock(&aconnector->hpd_lock);
3048
3049 #ifdef CONFIG_DRM_AMD_DC_HDCP
3050         if (adev->dm.hdcp_workqueue) {
3051                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3052                 dm_con_state->update_hdcp = true;
3053         }
3054 #endif
3055         if (aconnector->fake_enable)
3056                 aconnector->fake_enable = false;
3057
3058         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3059                 DRM_ERROR("KMS: Failed to detect connector\n");
3060
3061         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3062                 emulated_link_detect(aconnector->dc_link);
3063
3064                 drm_modeset_lock_all(dev);
3065                 dm_restore_drm_connector_state(dev, connector);
3066                 drm_modeset_unlock_all(dev);
3067
3068                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3069                         drm_kms_helper_connector_hotplug_event(connector);
3070
3071         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3072                 if (new_connection_type == dc_connection_none &&
3073                     aconnector->dc_link->type == dc_connection_none &&
3074                     dm_crtc_state)
3075                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3076
3077                 amdgpu_dm_update_connector_after_detect(aconnector);
3078
3079                 drm_modeset_lock_all(dev);
3080                 dm_restore_drm_connector_state(dev, connector);
3081                 drm_modeset_unlock_all(dev);
3082
3083                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3084                         drm_kms_helper_connector_hotplug_event(connector);
3085         }
3086         mutex_unlock(&aconnector->hpd_lock);
3087
3088 }
3089
3090 static void handle_hpd_irq(void *param)
3091 {
3092         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3093
3094         handle_hpd_irq_helper(aconnector);
3095
3096 }
3097
3098 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3099 {
3100         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3101         uint8_t dret;
3102         bool new_irq_handled = false;
3103         int dpcd_addr;
3104         int dpcd_bytes_to_read;
3105
3106         const int max_process_count = 30;
3107         int process_count = 0;
3108
3109         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3110
3111         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3112                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3113                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3114                 dpcd_addr = DP_SINK_COUNT;
3115         } else {
3116                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3117                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3118                 dpcd_addr = DP_SINK_COUNT_ESI;
3119         }
3120
3121         dret = drm_dp_dpcd_read(
3122                 &aconnector->dm_dp_aux.aux,
3123                 dpcd_addr,
3124                 esi,
3125                 dpcd_bytes_to_read);
3126
3127         while (dret == dpcd_bytes_to_read &&
3128                 process_count < max_process_count) {
3129                 uint8_t retry;
3130                 dret = 0;
3131
3132                 process_count++;
3133
3134                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3135                 /* handle HPD short pulse irq */
3136                 if (aconnector->mst_mgr.mst_state)
3137                         drm_dp_mst_hpd_irq(
3138                                 &aconnector->mst_mgr,
3139                                 esi,
3140                                 &new_irq_handled);
3141
3142                 if (new_irq_handled) {
3143                         /* ACK at DPCD to notify down stream */
3144                         const int ack_dpcd_bytes_to_write =
3145                                 dpcd_bytes_to_read - 1;
3146
3147                         for (retry = 0; retry < 3; retry++) {
3148                                 uint8_t wret;
3149
3150                                 wret = drm_dp_dpcd_write(
3151                                         &aconnector->dm_dp_aux.aux,
3152                                         dpcd_addr + 1,
3153                                         &esi[1],
3154                                         ack_dpcd_bytes_to_write);
3155                                 if (wret == ack_dpcd_bytes_to_write)
3156                                         break;
3157                         }
3158
3159                         /* check if there is new irq to be handled */
3160                         dret = drm_dp_dpcd_read(
3161                                 &aconnector->dm_dp_aux.aux,
3162                                 dpcd_addr,
3163                                 esi,
3164                                 dpcd_bytes_to_read);
3165
3166                         new_irq_handled = false;
3167                 } else {
3168                         break;
3169                 }
3170         }
3171
3172         if (process_count == max_process_count)
3173                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3174 }
3175
3176 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3177                                                         union hpd_irq_data hpd_irq_data)
3178 {
3179         struct hpd_rx_irq_offload_work *offload_work =
3180                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3181
3182         if (!offload_work) {
3183                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3184                 return;
3185         }
3186
3187         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3188         offload_work->data = hpd_irq_data;
3189         offload_work->offload_wq = offload_wq;
3190
3191         queue_work(offload_wq->wq, &offload_work->work);
3192         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3193 }
3194
3195 static void handle_hpd_rx_irq(void *param)
3196 {
3197         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3198         struct drm_connector *connector = &aconnector->base;
3199         struct drm_device *dev = connector->dev;
3200         struct dc_link *dc_link = aconnector->dc_link;
3201         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3202         bool result = false;
3203         enum dc_connection_type new_connection_type = dc_connection_none;
3204         struct amdgpu_device *adev = drm_to_adev(dev);
3205         union hpd_irq_data hpd_irq_data;
3206         bool link_loss = false;
3207         bool has_left_work = false;
3208         int idx = aconnector->base.index;
3209         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3210
3211         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3212
3213         if (adev->dm.disable_hpd_irq)
3214                 return;
3215
3216         /*
3217          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3218          * conflict, after implement i2c helper, this mutex should be
3219          * retired.
3220          */
3221         mutex_lock(&aconnector->hpd_lock);
3222
3223         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3224                                                 &link_loss, true, &has_left_work);
3225
3226         if (!has_left_work)
3227                 goto out;
3228
3229         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3230                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3231                 goto out;
3232         }
3233
3234         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3235                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3236                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3237                         dm_handle_mst_sideband_msg(aconnector);
3238                         goto out;
3239                 }
3240
3241                 if (link_loss) {
3242                         bool skip = false;
3243
3244                         spin_lock(&offload_wq->offload_lock);
3245                         skip = offload_wq->is_handling_link_loss;
3246
3247                         if (!skip)
3248                                 offload_wq->is_handling_link_loss = true;
3249
3250                         spin_unlock(&offload_wq->offload_lock);
3251
3252                         if (!skip)
3253                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3254
3255                         goto out;
3256                 }
3257         }
3258
3259 out:
3260         if (result && !is_mst_root_connector) {
3261                 /* Downstream Port status changed. */
3262                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3263                         DRM_ERROR("KMS: Failed to detect connector\n");
3264
3265                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3266                         emulated_link_detect(dc_link);
3267
3268                         if (aconnector->fake_enable)
3269                                 aconnector->fake_enable = false;
3270
3271                         amdgpu_dm_update_connector_after_detect(aconnector);
3272
3273
3274                         drm_modeset_lock_all(dev);
3275                         dm_restore_drm_connector_state(dev, connector);
3276                         drm_modeset_unlock_all(dev);
3277
3278                         drm_kms_helper_connector_hotplug_event(connector);
3279                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3280
3281                         if (aconnector->fake_enable)
3282                                 aconnector->fake_enable = false;
3283
3284                         amdgpu_dm_update_connector_after_detect(aconnector);
3285
3286
3287                         drm_modeset_lock_all(dev);
3288                         dm_restore_drm_connector_state(dev, connector);
3289                         drm_modeset_unlock_all(dev);
3290
3291                         drm_kms_helper_connector_hotplug_event(connector);
3292                 }
3293         }
3294 #ifdef CONFIG_DRM_AMD_DC_HDCP
3295         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3296                 if (adev->dm.hdcp_workqueue)
3297                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3298         }
3299 #endif
3300
3301         if (dc_link->type != dc_connection_mst_branch)
3302                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3303
3304         mutex_unlock(&aconnector->hpd_lock);
3305 }
3306
3307 static void register_hpd_handlers(struct amdgpu_device *adev)
3308 {
3309         struct drm_device *dev = adev_to_drm(adev);
3310         struct drm_connector *connector;
3311         struct amdgpu_dm_connector *aconnector;
3312         const struct dc_link *dc_link;
3313         struct dc_interrupt_params int_params = {0};
3314
3315         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3316         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3317
3318         list_for_each_entry(connector,
3319                         &dev->mode_config.connector_list, head) {
3320
3321                 aconnector = to_amdgpu_dm_connector(connector);
3322                 dc_link = aconnector->dc_link;
3323
3324                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3325                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3326                         int_params.irq_source = dc_link->irq_source_hpd;
3327
3328                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3329                                         handle_hpd_irq,
3330                                         (void *) aconnector);
3331                 }
3332
3333                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3334
3335                         /* Also register for DP short pulse (hpd_rx). */
3336                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3337                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3338
3339                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3340                                         handle_hpd_rx_irq,
3341                                         (void *) aconnector);
3342
3343                         if (adev->dm.hpd_rx_offload_wq)
3344                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3345                                         aconnector;
3346                 }
3347         }
3348 }
3349
3350 #if defined(CONFIG_DRM_AMD_DC_SI)
3351 /* Register IRQ sources and initialize IRQ callbacks */
3352 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3353 {
3354         struct dc *dc = adev->dm.dc;
3355         struct common_irq_params *c_irq_params;
3356         struct dc_interrupt_params int_params = {0};
3357         int r;
3358         int i;
3359         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3360
3361         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3362         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3363
3364         /*
3365          * Actions of amdgpu_irq_add_id():
3366          * 1. Register a set() function with base driver.
3367          *    Base driver will call set() function to enable/disable an
3368          *    interrupt in DC hardware.
3369          * 2. Register amdgpu_dm_irq_handler().
3370          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3371          *    coming from DC hardware.
3372          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3373          *    for acknowledging and handling. */
3374
3375         /* Use VBLANK interrupt */
3376         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3377                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3378                 if (r) {
3379                         DRM_ERROR("Failed to add crtc irq id!\n");
3380                         return r;
3381                 }
3382
3383                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3384                 int_params.irq_source =
3385                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3386
3387                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3388
3389                 c_irq_params->adev = adev;
3390                 c_irq_params->irq_src = int_params.irq_source;
3391
3392                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3393                                 dm_crtc_high_irq, c_irq_params);
3394         }
3395
3396         /* Use GRPH_PFLIP interrupt */
3397         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3398                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3399                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3400                 if (r) {
3401                         DRM_ERROR("Failed to add page flip irq id!\n");
3402                         return r;
3403                 }
3404
3405                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3406                 int_params.irq_source =
3407                         dc_interrupt_to_irq_source(dc, i, 0);
3408
3409                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3410
3411                 c_irq_params->adev = adev;
3412                 c_irq_params->irq_src = int_params.irq_source;
3413
3414                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3415                                 dm_pflip_high_irq, c_irq_params);
3416
3417         }
3418
3419         /* HPD */
3420         r = amdgpu_irq_add_id(adev, client_id,
3421                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3422         if (r) {
3423                 DRM_ERROR("Failed to add hpd irq id!\n");
3424                 return r;
3425         }
3426
3427         register_hpd_handlers(adev);
3428
3429         return 0;
3430 }
3431 #endif
3432
3433 /* Register IRQ sources and initialize IRQ callbacks */
3434 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3435 {
3436         struct dc *dc = adev->dm.dc;
3437         struct common_irq_params *c_irq_params;
3438         struct dc_interrupt_params int_params = {0};
3439         int r;
3440         int i;
3441         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3442
3443         if (adev->family >= AMDGPU_FAMILY_AI)
3444                 client_id = SOC15_IH_CLIENTID_DCE;
3445
3446         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3447         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3448
3449         /*
3450          * Actions of amdgpu_irq_add_id():
3451          * 1. Register a set() function with base driver.
3452          *    Base driver will call set() function to enable/disable an
3453          *    interrupt in DC hardware.
3454          * 2. Register amdgpu_dm_irq_handler().
3455          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3456          *    coming from DC hardware.
3457          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3458          *    for acknowledging and handling. */
3459
3460         /* Use VBLANK interrupt */
3461         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3462                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3463                 if (r) {
3464                         DRM_ERROR("Failed to add crtc irq id!\n");
3465                         return r;
3466                 }
3467
3468                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3469                 int_params.irq_source =
3470                         dc_interrupt_to_irq_source(dc, i, 0);
3471
3472                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3473
3474                 c_irq_params->adev = adev;
3475                 c_irq_params->irq_src = int_params.irq_source;
3476
3477                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3478                                 dm_crtc_high_irq, c_irq_params);
3479         }
3480
3481         /* Use VUPDATE interrupt */
3482         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3483                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3484                 if (r) {
3485                         DRM_ERROR("Failed to add vupdate irq id!\n");
3486                         return r;
3487                 }
3488
3489                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3490                 int_params.irq_source =
3491                         dc_interrupt_to_irq_source(dc, i, 0);
3492
3493                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3494
3495                 c_irq_params->adev = adev;
3496                 c_irq_params->irq_src = int_params.irq_source;
3497
3498                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3499                                 dm_vupdate_high_irq, c_irq_params);
3500         }
3501
3502         /* Use GRPH_PFLIP interrupt */
3503         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3504                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3505                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3506                 if (r) {
3507                         DRM_ERROR("Failed to add page flip irq id!\n");
3508                         return r;
3509                 }
3510
3511                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3512                 int_params.irq_source =
3513                         dc_interrupt_to_irq_source(dc, i, 0);
3514
3515                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3516
3517                 c_irq_params->adev = adev;
3518                 c_irq_params->irq_src = int_params.irq_source;
3519
3520                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3521                                 dm_pflip_high_irq, c_irq_params);
3522
3523         }
3524
3525         /* HPD */
3526         r = amdgpu_irq_add_id(adev, client_id,
3527                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3528         if (r) {
3529                 DRM_ERROR("Failed to add hpd irq id!\n");
3530                 return r;
3531         }
3532
3533         register_hpd_handlers(adev);
3534
3535         return 0;
3536 }
3537
3538 /* Register IRQ sources and initialize IRQ callbacks */
3539 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3540 {
3541         struct dc *dc = adev->dm.dc;
3542         struct common_irq_params *c_irq_params;
3543         struct dc_interrupt_params int_params = {0};
3544         int r;
3545         int i;
3546 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3547         static const unsigned int vrtl_int_srcid[] = {
3548                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3549                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3550                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3551                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3552                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3553                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3554         };
3555 #endif
3556
3557         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3558         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3559
3560         /*
3561          * Actions of amdgpu_irq_add_id():
3562          * 1. Register a set() function with base driver.
3563          *    Base driver will call set() function to enable/disable an
3564          *    interrupt in DC hardware.
3565          * 2. Register amdgpu_dm_irq_handler().
3566          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3567          *    coming from DC hardware.
3568          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3569          *    for acknowledging and handling.
3570          */
3571
3572         /* Use VSTARTUP interrupt */
3573         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3574                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3575                         i++) {
3576                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3577
3578                 if (r) {
3579                         DRM_ERROR("Failed to add crtc irq id!\n");
3580                         return r;
3581                 }
3582
3583                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3584                 int_params.irq_source =
3585                         dc_interrupt_to_irq_source(dc, i, 0);
3586
3587                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3588
3589                 c_irq_params->adev = adev;
3590                 c_irq_params->irq_src = int_params.irq_source;
3591
3592                 amdgpu_dm_irq_register_interrupt(
3593                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3594         }
3595
3596         /* Use otg vertical line interrupt */
3597 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3598         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3599                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3600                                 vrtl_int_srcid[i], &adev->vline0_irq);
3601
3602                 if (r) {
3603                         DRM_ERROR("Failed to add vline0 irq id!\n");
3604                         return r;
3605                 }
3606
3607                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608                 int_params.irq_source =
3609                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3610
3611                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3612                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3613                         break;
3614                 }
3615
3616                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3617                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3618
3619                 c_irq_params->adev = adev;
3620                 c_irq_params->irq_src = int_params.irq_source;
3621
3622                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3623                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3624         }
3625 #endif
3626
3627         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3628          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3629          * to trigger at end of each vblank, regardless of state of the lock,
3630          * matching DCE behaviour.
3631          */
3632         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3633              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3634              i++) {
3635                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3636
3637                 if (r) {
3638                         DRM_ERROR("Failed to add vupdate irq id!\n");
3639                         return r;
3640                 }
3641
3642                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3643                 int_params.irq_source =
3644                         dc_interrupt_to_irq_source(dc, i, 0);
3645
3646                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3647
3648                 c_irq_params->adev = adev;
3649                 c_irq_params->irq_src = int_params.irq_source;
3650
3651                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3652                                 dm_vupdate_high_irq, c_irq_params);
3653         }
3654
3655         /* Use GRPH_PFLIP interrupt */
3656         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3657                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3658                         i++) {
3659                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3660                 if (r) {
3661                         DRM_ERROR("Failed to add page flip irq id!\n");
3662                         return r;
3663                 }
3664
3665                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3666                 int_params.irq_source =
3667                         dc_interrupt_to_irq_source(dc, i, 0);
3668
3669                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3670
3671                 c_irq_params->adev = adev;
3672                 c_irq_params->irq_src = int_params.irq_source;
3673
3674                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3675                                 dm_pflip_high_irq, c_irq_params);
3676
3677         }
3678
3679         /* HPD */
3680         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3681                         &adev->hpd_irq);
3682         if (r) {
3683                 DRM_ERROR("Failed to add hpd irq id!\n");
3684                 return r;
3685         }
3686
3687         register_hpd_handlers(adev);
3688
3689         return 0;
3690 }
3691 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3692 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3693 {
3694         struct dc *dc = adev->dm.dc;
3695         struct common_irq_params *c_irq_params;
3696         struct dc_interrupt_params int_params = {0};
3697         int r, i;
3698
3699         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3700         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3701
3702         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3703                         &adev->dmub_outbox_irq);
3704         if (r) {
3705                 DRM_ERROR("Failed to add outbox irq id!\n");
3706                 return r;
3707         }
3708
3709         if (dc->ctx->dmub_srv) {
3710                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3711                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3712                 int_params.irq_source =
3713                 dc_interrupt_to_irq_source(dc, i, 0);
3714
3715                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3716
3717                 c_irq_params->adev = adev;
3718                 c_irq_params->irq_src = int_params.irq_source;
3719
3720                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3721                                 dm_dmub_outbox1_low_irq, c_irq_params);
3722         }
3723
3724         return 0;
3725 }
3726
3727 /*
3728  * Acquires the lock for the atomic state object and returns
3729  * the new atomic state.
3730  *
3731  * This should only be called during atomic check.
3732  */
3733 int dm_atomic_get_state(struct drm_atomic_state *state,
3734                         struct dm_atomic_state **dm_state)
3735 {
3736         struct drm_device *dev = state->dev;
3737         struct amdgpu_device *adev = drm_to_adev(dev);
3738         struct amdgpu_display_manager *dm = &adev->dm;
3739         struct drm_private_state *priv_state;
3740
3741         if (*dm_state)
3742                 return 0;
3743
3744         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3745         if (IS_ERR(priv_state))
3746                 return PTR_ERR(priv_state);
3747
3748         *dm_state = to_dm_atomic_state(priv_state);
3749
3750         return 0;
3751 }
3752
3753 static struct dm_atomic_state *
3754 dm_atomic_get_new_state(struct drm_atomic_state *state)
3755 {
3756         struct drm_device *dev = state->dev;
3757         struct amdgpu_device *adev = drm_to_adev(dev);
3758         struct amdgpu_display_manager *dm = &adev->dm;
3759         struct drm_private_obj *obj;
3760         struct drm_private_state *new_obj_state;
3761         int i;
3762
3763         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3764                 if (obj->funcs == dm->atomic_obj.funcs)
3765                         return to_dm_atomic_state(new_obj_state);
3766         }
3767
3768         return NULL;
3769 }
3770
3771 static struct drm_private_state *
3772 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3773 {
3774         struct dm_atomic_state *old_state, *new_state;
3775
3776         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3777         if (!new_state)
3778                 return NULL;
3779
3780         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3781
3782         old_state = to_dm_atomic_state(obj->state);
3783
3784         if (old_state && old_state->context)
3785                 new_state->context = dc_copy_state(old_state->context);
3786
3787         if (!new_state->context) {
3788                 kfree(new_state);
3789                 return NULL;
3790         }
3791
3792         return &new_state->base;
3793 }
3794
3795 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3796                                     struct drm_private_state *state)
3797 {
3798         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3799
3800         if (dm_state && dm_state->context)
3801                 dc_release_state(dm_state->context);
3802
3803         kfree(dm_state);
3804 }
3805
3806 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3807         .atomic_duplicate_state = dm_atomic_duplicate_state,
3808         .atomic_destroy_state = dm_atomic_destroy_state,
3809 };
3810
3811 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3812 {
3813         struct dm_atomic_state *state;
3814         int r;
3815
3816         adev->mode_info.mode_config_initialized = true;
3817
3818         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3819         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3820
3821         adev_to_drm(adev)->mode_config.max_width = 16384;
3822         adev_to_drm(adev)->mode_config.max_height = 16384;
3823
3824         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3825         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3826         /* indicates support for immediate flip */
3827         adev_to_drm(adev)->mode_config.async_page_flip = true;
3828
3829         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3830
3831         state = kzalloc(sizeof(*state), GFP_KERNEL);
3832         if (!state)
3833                 return -ENOMEM;
3834
3835         state->context = dc_create_state(adev->dm.dc);
3836         if (!state->context) {
3837                 kfree(state);
3838                 return -ENOMEM;
3839         }
3840
3841         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3842
3843         drm_atomic_private_obj_init(adev_to_drm(adev),
3844                                     &adev->dm.atomic_obj,
3845                                     &state->base,
3846                                     &dm_atomic_state_funcs);
3847
3848         r = amdgpu_display_modeset_create_props(adev);
3849         if (r) {
3850                 dc_release_state(state->context);
3851                 kfree(state);
3852                 return r;
3853         }
3854
3855         r = amdgpu_dm_audio_init(adev);
3856         if (r) {
3857                 dc_release_state(state->context);
3858                 kfree(state);
3859                 return r;
3860         }
3861
3862         return 0;
3863 }
3864
3865 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3866 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3867 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3868
3869 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3870         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3871
3872 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3873                                             int bl_idx)
3874 {
3875 #if defined(CONFIG_ACPI)
3876         struct amdgpu_dm_backlight_caps caps;
3877
3878         memset(&caps, 0, sizeof(caps));
3879
3880         if (dm->backlight_caps[bl_idx].caps_valid)
3881                 return;
3882
3883         amdgpu_acpi_get_backlight_caps(&caps);
3884         if (caps.caps_valid) {
3885                 dm->backlight_caps[bl_idx].caps_valid = true;
3886                 if (caps.aux_support)
3887                         return;
3888                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3889                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3890         } else {
3891                 dm->backlight_caps[bl_idx].min_input_signal =
3892                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3893                 dm->backlight_caps[bl_idx].max_input_signal =
3894                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3895         }
3896 #else
3897         if (dm->backlight_caps[bl_idx].aux_support)
3898                 return;
3899
3900         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3901         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3902 #endif
3903 }
3904
3905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3906                                 unsigned *min, unsigned *max)
3907 {
3908         if (!caps)
3909                 return 0;
3910
3911         if (caps->aux_support) {
3912                 // Firmware limits are in nits, DC API wants millinits.
3913                 *max = 1000 * caps->aux_max_input_signal;
3914                 *min = 1000 * caps->aux_min_input_signal;
3915         } else {
3916                 // Firmware limits are 8-bit, PWM control is 16-bit.
3917                 *max = 0x101 * caps->max_input_signal;
3918                 *min = 0x101 * caps->min_input_signal;
3919         }
3920         return 1;
3921 }
3922
3923 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3924                                         uint32_t brightness)
3925 {
3926         unsigned min, max;
3927
3928         if (!get_brightness_range(caps, &min, &max))
3929                 return brightness;
3930
3931         // Rescale 0..255 to min..max
3932         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3933                                        AMDGPU_MAX_BL_LEVEL);
3934 }
3935
3936 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3937                                       uint32_t brightness)
3938 {
3939         unsigned min, max;
3940
3941         if (!get_brightness_range(caps, &min, &max))
3942                 return brightness;
3943
3944         if (brightness < min)
3945                 return 0;
3946         // Rescale min..max to 0..255
3947         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3948                                  max - min);
3949 }
3950
3951 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3952                                          int bl_idx,
3953                                          u32 user_brightness)
3954 {
3955         struct amdgpu_dm_backlight_caps caps;
3956         struct dc_link *link;
3957         u32 brightness;
3958         bool rc;
3959
3960         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3961         caps = dm->backlight_caps[bl_idx];
3962
3963         dm->brightness[bl_idx] = user_brightness;
3964         /* update scratch register */
3965         if (bl_idx == 0)
3966                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3967         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3968         link = (struct dc_link *)dm->backlight_link[bl_idx];
3969
3970         /* Change brightness based on AUX property */
3971         if (caps.aux_support) {
3972                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3973                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3974                 if (!rc)
3975                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3976         } else {
3977                 rc = dc_link_set_backlight_level(link, brightness, 0);
3978                 if (!rc)
3979                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3980         }
3981
3982         if (rc)
3983                 dm->actual_brightness[bl_idx] = user_brightness;
3984 }
3985
3986 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3987 {
3988         struct amdgpu_display_manager *dm = bl_get_data(bd);
3989         int i;
3990
3991         for (i = 0; i < dm->num_of_edps; i++) {
3992                 if (bd == dm->backlight_dev[i])
3993                         break;
3994         }
3995         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3996                 i = 0;
3997         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3998
3999         return 0;
4000 }
4001
4002 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4003                                          int bl_idx)
4004 {
4005         struct amdgpu_dm_backlight_caps caps;
4006         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4007
4008         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4009         caps = dm->backlight_caps[bl_idx];
4010
4011         if (caps.aux_support) {
4012                 u32 avg, peak;
4013                 bool rc;
4014
4015                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4016                 if (!rc)
4017                         return dm->brightness[bl_idx];
4018                 return convert_brightness_to_user(&caps, avg);
4019         } else {
4020                 int ret = dc_link_get_backlight_level(link);
4021
4022                 if (ret == DC_ERROR_UNEXPECTED)
4023                         return dm->brightness[bl_idx];
4024                 return convert_brightness_to_user(&caps, ret);
4025         }
4026 }
4027
4028 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4029 {
4030         struct amdgpu_display_manager *dm = bl_get_data(bd);
4031         int i;
4032
4033         for (i = 0; i < dm->num_of_edps; i++) {
4034                 if (bd == dm->backlight_dev[i])
4035                         break;
4036         }
4037         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4038                 i = 0;
4039         return amdgpu_dm_backlight_get_level(dm, i);
4040 }
4041
4042 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4043         .options = BL_CORE_SUSPENDRESUME,
4044         .get_brightness = amdgpu_dm_backlight_get_brightness,
4045         .update_status  = amdgpu_dm_backlight_update_status,
4046 };
4047
4048 static void
4049 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4050 {
4051         char bl_name[16];
4052         struct backlight_properties props = { 0 };
4053
4054         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4055         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4056
4057         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4058         props.brightness = AMDGPU_MAX_BL_LEVEL;
4059         props.type = BACKLIGHT_RAW;
4060
4061         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4062                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4063
4064         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4065                                                                        adev_to_drm(dm->adev)->dev,
4066                                                                        dm,
4067                                                                        &amdgpu_dm_backlight_ops,
4068                                                                        &props);
4069
4070         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4071                 DRM_ERROR("DM: Backlight registration failed!\n");
4072         else
4073                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4074 }
4075 #endif
4076
4077 static int initialize_plane(struct amdgpu_display_manager *dm,
4078                             struct amdgpu_mode_info *mode_info, int plane_id,
4079                             enum drm_plane_type plane_type,
4080                             const struct dc_plane_cap *plane_cap)
4081 {
4082         struct drm_plane *plane;
4083         unsigned long possible_crtcs;
4084         int ret = 0;
4085
4086         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4087         if (!plane) {
4088                 DRM_ERROR("KMS: Failed to allocate plane\n");
4089                 return -ENOMEM;
4090         }
4091         plane->type = plane_type;
4092
4093         /*
4094          * HACK: IGT tests expect that the primary plane for a CRTC
4095          * can only have one possible CRTC. Only expose support for
4096          * any CRTC if they're not going to be used as a primary plane
4097          * for a CRTC - like overlay or underlay planes.
4098          */
4099         possible_crtcs = 1 << plane_id;
4100         if (plane_id >= dm->dc->caps.max_streams)
4101                 possible_crtcs = 0xff;
4102
4103         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4104
4105         if (ret) {
4106                 DRM_ERROR("KMS: Failed to initialize plane\n");
4107                 kfree(plane);
4108                 return ret;
4109         }
4110
4111         if (mode_info)
4112                 mode_info->planes[plane_id] = plane;
4113
4114         return ret;
4115 }
4116
4117
4118 static void register_backlight_device(struct amdgpu_display_manager *dm,
4119                                       struct dc_link *link)
4120 {
4121 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4122         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4123
4124         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4125             link->type != dc_connection_none) {
4126                 /*
4127                  * Event if registration failed, we should continue with
4128                  * DM initialization because not having a backlight control
4129                  * is better then a black screen.
4130                  */
4131                 if (!dm->backlight_dev[dm->num_of_edps])
4132                         amdgpu_dm_register_backlight_device(dm);
4133
4134                 if (dm->backlight_dev[dm->num_of_edps]) {
4135                         dm->backlight_link[dm->num_of_edps] = link;
4136                         dm->num_of_edps++;
4137                 }
4138         }
4139 #endif
4140 }
4141
4142
4143 /*
4144  * In this architecture, the association
4145  * connector -> encoder -> crtc
4146  * id not really requried. The crtc and connector will hold the
4147  * display_index as an abstraction to use with DAL component
4148  *
4149  * Returns 0 on success
4150  */
4151 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4152 {
4153         struct amdgpu_display_manager *dm = &adev->dm;
4154         int32_t i;
4155         struct amdgpu_dm_connector *aconnector = NULL;
4156         struct amdgpu_encoder *aencoder = NULL;
4157         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4158         uint32_t link_cnt;
4159         int32_t primary_planes;
4160         enum dc_connection_type new_connection_type = dc_connection_none;
4161         const struct dc_plane_cap *plane;
4162         bool psr_feature_enabled = false;
4163
4164         dm->display_indexes_num = dm->dc->caps.max_streams;
4165         /* Update the actual used number of crtc */
4166         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4167
4168         link_cnt = dm->dc->caps.max_links;
4169         if (amdgpu_dm_mode_config_init(dm->adev)) {
4170                 DRM_ERROR("DM: Failed to initialize mode config\n");
4171                 return -EINVAL;
4172         }
4173
4174         /* There is one primary plane per CRTC */
4175         primary_planes = dm->dc->caps.max_streams;
4176         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4177
4178         /*
4179          * Initialize primary planes, implicit planes for legacy IOCTLS.
4180          * Order is reversed to match iteration order in atomic check.
4181          */
4182         for (i = (primary_planes - 1); i >= 0; i--) {
4183                 plane = &dm->dc->caps.planes[i];
4184
4185                 if (initialize_plane(dm, mode_info, i,
4186                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4187                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4188                         goto fail;
4189                 }
4190         }
4191
4192         /*
4193          * Initialize overlay planes, index starting after primary planes.
4194          * These planes have a higher DRM index than the primary planes since
4195          * they should be considered as having a higher z-order.
4196          * Order is reversed to match iteration order in atomic check.
4197          *
4198          * Only support DCN for now, and only expose one so we don't encourage
4199          * userspace to use up all the pipes.
4200          */
4201         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4202                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4203
4204                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4205                         continue;
4206
4207                 if (!plane->blends_with_above || !plane->blends_with_below)
4208                         continue;
4209
4210                 if (!plane->pixel_format_support.argb8888)
4211                         continue;
4212
4213                 if (initialize_plane(dm, NULL, primary_planes + i,
4214                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4215                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4216                         goto fail;
4217                 }
4218
4219                 /* Only create one overlay plane. */
4220                 break;
4221         }
4222
4223         for (i = 0; i < dm->dc->caps.max_streams; i++)
4224                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4225                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4226                         goto fail;
4227                 }
4228
4229         /* Use Outbox interrupt */
4230         switch (adev->ip_versions[DCE_HWIP][0]) {
4231         case IP_VERSION(3, 0, 0):
4232         case IP_VERSION(3, 1, 2):
4233         case IP_VERSION(3, 1, 3):
4234         case IP_VERSION(3, 1, 5):
4235         case IP_VERSION(3, 1, 6):
4236         case IP_VERSION(2, 1, 0):
4237                 if (register_outbox_irq_handlers(dm->adev)) {
4238                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4239                         goto fail;
4240                 }
4241                 break;
4242         default:
4243                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4244                               adev->ip_versions[DCE_HWIP][0]);
4245         }
4246
4247         /* Determine whether to enable PSR support by default. */
4248         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4249                 switch (adev->ip_versions[DCE_HWIP][0]) {
4250                 case IP_VERSION(3, 1, 2):
4251                 case IP_VERSION(3, 1, 3):
4252                 case IP_VERSION(3, 1, 5):
4253                 case IP_VERSION(3, 1, 6):
4254                         psr_feature_enabled = true;
4255                         break;
4256                 default:
4257                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4258                         break;
4259                 }
4260         }
4261
4262         /* Disable vblank IRQs aggressively for power-saving. */
4263         adev_to_drm(adev)->vblank_disable_immediate = true;
4264
4265         /* loops over all connectors on the board */
4266         for (i = 0; i < link_cnt; i++) {
4267                 struct dc_link *link = NULL;
4268
4269                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4270                         DRM_ERROR(
4271                                 "KMS: Cannot support more than %d display indexes\n",
4272                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4273                         continue;
4274                 }
4275
4276                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4277                 if (!aconnector)
4278                         goto fail;
4279
4280                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4281                 if (!aencoder)
4282                         goto fail;
4283
4284                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4285                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4286                         goto fail;
4287                 }
4288
4289                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4290                         DRM_ERROR("KMS: Failed to initialize connector\n");
4291                         goto fail;
4292                 }
4293
4294                 link = dc_get_link_at_index(dm->dc, i);
4295
4296                 if (!dc_link_detect_sink(link, &new_connection_type))
4297                         DRM_ERROR("KMS: Failed to detect connector\n");
4298
4299                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4300                         emulated_link_detect(link);
4301                         amdgpu_dm_update_connector_after_detect(aconnector);
4302
4303                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4304                         amdgpu_dm_update_connector_after_detect(aconnector);
4305                         register_backlight_device(dm, link);
4306                         if (dm->num_of_edps)
4307                                 update_connector_ext_caps(aconnector);
4308                         if (psr_feature_enabled)
4309                                 amdgpu_dm_set_psr_caps(link);
4310
4311                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4312                          * PSR is also supported.
4313                          */
4314                         if (link->psr_settings.psr_feature_enabled)
4315                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4316                 }
4317
4318
4319         }
4320
4321         /* Software is initialized. Now we can register interrupt handlers. */
4322         switch (adev->asic_type) {
4323 #if defined(CONFIG_DRM_AMD_DC_SI)
4324         case CHIP_TAHITI:
4325         case CHIP_PITCAIRN:
4326         case CHIP_VERDE:
4327         case CHIP_OLAND:
4328                 if (dce60_register_irq_handlers(dm->adev)) {
4329                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4330                         goto fail;
4331                 }
4332                 break;
4333 #endif
4334         case CHIP_BONAIRE:
4335         case CHIP_HAWAII:
4336         case CHIP_KAVERI:
4337         case CHIP_KABINI:
4338         case CHIP_MULLINS:
4339         case CHIP_TONGA:
4340         case CHIP_FIJI:
4341         case CHIP_CARRIZO:
4342         case CHIP_STONEY:
4343         case CHIP_POLARIS11:
4344         case CHIP_POLARIS10:
4345         case CHIP_POLARIS12:
4346         case CHIP_VEGAM:
4347         case CHIP_VEGA10:
4348         case CHIP_VEGA12:
4349         case CHIP_VEGA20:
4350                 if (dce110_register_irq_handlers(dm->adev)) {
4351                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4352                         goto fail;
4353                 }
4354                 break;
4355         default:
4356                 switch (adev->ip_versions[DCE_HWIP][0]) {
4357                 case IP_VERSION(1, 0, 0):
4358                 case IP_VERSION(1, 0, 1):
4359                 case IP_VERSION(2, 0, 2):
4360                 case IP_VERSION(2, 0, 3):
4361                 case IP_VERSION(2, 0, 0):
4362                 case IP_VERSION(2, 1, 0):
4363                 case IP_VERSION(3, 0, 0):
4364                 case IP_VERSION(3, 0, 2):
4365                 case IP_VERSION(3, 0, 3):
4366                 case IP_VERSION(3, 0, 1):
4367                 case IP_VERSION(3, 1, 2):
4368                 case IP_VERSION(3, 1, 3):
4369                 case IP_VERSION(3, 1, 5):
4370                 case IP_VERSION(3, 1, 6):
4371                         if (dcn10_register_irq_handlers(dm->adev)) {
4372                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4373                                 goto fail;
4374                         }
4375                         break;
4376                 default:
4377                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4378                                         adev->ip_versions[DCE_HWIP][0]);
4379                         goto fail;
4380                 }
4381                 break;
4382         }
4383
4384         return 0;
4385 fail:
4386         kfree(aencoder);
4387         kfree(aconnector);
4388
4389         return -EINVAL;
4390 }
4391
4392 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4393 {
4394         drm_atomic_private_obj_fini(&dm->atomic_obj);
4395         return;
4396 }
4397
4398 /******************************************************************************
4399  * amdgpu_display_funcs functions
4400  *****************************************************************************/
4401
4402 /*
4403  * dm_bandwidth_update - program display watermarks
4404  *
4405  * @adev: amdgpu_device pointer
4406  *
4407  * Calculate and program the display watermarks and line buffer allocation.
4408  */
4409 static void dm_bandwidth_update(struct amdgpu_device *adev)
4410 {
4411         /* TODO: implement later */
4412 }
4413
4414 static const struct amdgpu_display_funcs dm_display_funcs = {
4415         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4416         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4417         .backlight_set_level = NULL, /* never called for DC */
4418         .backlight_get_level = NULL, /* never called for DC */
4419         .hpd_sense = NULL,/* called unconditionally */
4420         .hpd_set_polarity = NULL, /* called unconditionally */
4421         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4422         .page_flip_get_scanoutpos =
4423                 dm_crtc_get_scanoutpos,/* called unconditionally */
4424         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4425         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4426 };
4427
4428 #if defined(CONFIG_DEBUG_KERNEL_DC)
4429
4430 static ssize_t s3_debug_store(struct device *device,
4431                               struct device_attribute *attr,
4432                               const char *buf,
4433                               size_t count)
4434 {
4435         int ret;
4436         int s3_state;
4437         struct drm_device *drm_dev = dev_get_drvdata(device);
4438         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4439
4440         ret = kstrtoint(buf, 0, &s3_state);
4441
4442         if (ret == 0) {
4443                 if (s3_state) {
4444                         dm_resume(adev);
4445                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4446                 } else
4447                         dm_suspend(adev);
4448         }
4449
4450         return ret == 0 ? count : 0;
4451 }
4452
4453 DEVICE_ATTR_WO(s3_debug);
4454
4455 #endif
4456
4457 static int dm_early_init(void *handle)
4458 {
4459         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4460
4461         switch (adev->asic_type) {
4462 #if defined(CONFIG_DRM_AMD_DC_SI)
4463         case CHIP_TAHITI:
4464         case CHIP_PITCAIRN:
4465         case CHIP_VERDE:
4466                 adev->mode_info.num_crtc = 6;
4467                 adev->mode_info.num_hpd = 6;
4468                 adev->mode_info.num_dig = 6;
4469                 break;
4470         case CHIP_OLAND:
4471                 adev->mode_info.num_crtc = 2;
4472                 adev->mode_info.num_hpd = 2;
4473                 adev->mode_info.num_dig = 2;
4474                 break;
4475 #endif
4476         case CHIP_BONAIRE:
4477         case CHIP_HAWAII:
4478                 adev->mode_info.num_crtc = 6;
4479                 adev->mode_info.num_hpd = 6;
4480                 adev->mode_info.num_dig = 6;
4481                 break;
4482         case CHIP_KAVERI:
4483                 adev->mode_info.num_crtc = 4;
4484                 adev->mode_info.num_hpd = 6;
4485                 adev->mode_info.num_dig = 7;
4486                 break;
4487         case CHIP_KABINI:
4488         case CHIP_MULLINS:
4489                 adev->mode_info.num_crtc = 2;
4490                 adev->mode_info.num_hpd = 6;
4491                 adev->mode_info.num_dig = 6;
4492                 break;
4493         case CHIP_FIJI:
4494         case CHIP_TONGA:
4495                 adev->mode_info.num_crtc = 6;
4496                 adev->mode_info.num_hpd = 6;
4497                 adev->mode_info.num_dig = 7;
4498                 break;
4499         case CHIP_CARRIZO:
4500                 adev->mode_info.num_crtc = 3;
4501                 adev->mode_info.num_hpd = 6;
4502                 adev->mode_info.num_dig = 9;
4503                 break;
4504         case CHIP_STONEY:
4505                 adev->mode_info.num_crtc = 2;
4506                 adev->mode_info.num_hpd = 6;
4507                 adev->mode_info.num_dig = 9;
4508                 break;
4509         case CHIP_POLARIS11:
4510         case CHIP_POLARIS12:
4511                 adev->mode_info.num_crtc = 5;
4512                 adev->mode_info.num_hpd = 5;
4513                 adev->mode_info.num_dig = 5;
4514                 break;
4515         case CHIP_POLARIS10:
4516         case CHIP_VEGAM:
4517                 adev->mode_info.num_crtc = 6;
4518                 adev->mode_info.num_hpd = 6;
4519                 adev->mode_info.num_dig = 6;
4520                 break;
4521         case CHIP_VEGA10:
4522         case CHIP_VEGA12:
4523         case CHIP_VEGA20:
4524                 adev->mode_info.num_crtc = 6;
4525                 adev->mode_info.num_hpd = 6;
4526                 adev->mode_info.num_dig = 6;
4527                 break;
4528         default:
4529
4530                 switch (adev->ip_versions[DCE_HWIP][0]) {
4531                 case IP_VERSION(2, 0, 2):
4532                 case IP_VERSION(3, 0, 0):
4533                         adev->mode_info.num_crtc = 6;
4534                         adev->mode_info.num_hpd = 6;
4535                         adev->mode_info.num_dig = 6;
4536                         break;
4537                 case IP_VERSION(2, 0, 0):
4538                 case IP_VERSION(3, 0, 2):
4539                         adev->mode_info.num_crtc = 5;
4540                         adev->mode_info.num_hpd = 5;
4541                         adev->mode_info.num_dig = 5;
4542                         break;
4543                 case IP_VERSION(2, 0, 3):
4544                 case IP_VERSION(3, 0, 3):
4545                         adev->mode_info.num_crtc = 2;
4546                         adev->mode_info.num_hpd = 2;
4547                         adev->mode_info.num_dig = 2;
4548                         break;
4549                 case IP_VERSION(1, 0, 0):
4550                 case IP_VERSION(1, 0, 1):
4551                 case IP_VERSION(3, 0, 1):
4552                 case IP_VERSION(2, 1, 0):
4553                 case IP_VERSION(3, 1, 2):
4554                 case IP_VERSION(3, 1, 3):
4555                 case IP_VERSION(3, 1, 5):
4556                 case IP_VERSION(3, 1, 6):
4557                         adev->mode_info.num_crtc = 4;
4558                         adev->mode_info.num_hpd = 4;
4559                         adev->mode_info.num_dig = 4;
4560                         break;
4561                 default:
4562                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4563                                         adev->ip_versions[DCE_HWIP][0]);
4564                         return -EINVAL;
4565                 }
4566                 break;
4567         }
4568
4569         amdgpu_dm_set_irq_funcs(adev);
4570
4571         if (adev->mode_info.funcs == NULL)
4572                 adev->mode_info.funcs = &dm_display_funcs;
4573
4574         /*
4575          * Note: Do NOT change adev->audio_endpt_rreg and
4576          * adev->audio_endpt_wreg because they are initialised in
4577          * amdgpu_device_init()
4578          */
4579 #if defined(CONFIG_DEBUG_KERNEL_DC)
4580         device_create_file(
4581                 adev_to_drm(adev)->dev,
4582                 &dev_attr_s3_debug);
4583 #endif
4584
4585         return 0;
4586 }
4587
4588 static bool modeset_required(struct drm_crtc_state *crtc_state,
4589                              struct dc_stream_state *new_stream,
4590                              struct dc_stream_state *old_stream)
4591 {
4592         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4593 }
4594
4595 static bool modereset_required(struct drm_crtc_state *crtc_state)
4596 {
4597         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4598 }
4599
4600 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4601 {
4602         drm_encoder_cleanup(encoder);
4603         kfree(encoder);
4604 }
4605
4606 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4607         .destroy = amdgpu_dm_encoder_destroy,
4608 };
4609
4610
4611 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4612                                          struct drm_framebuffer *fb,
4613                                          int *min_downscale, int *max_upscale)
4614 {
4615         struct amdgpu_device *adev = drm_to_adev(dev);
4616         struct dc *dc = adev->dm.dc;
4617         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4618         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4619
4620         switch (fb->format->format) {
4621         case DRM_FORMAT_P010:
4622         case DRM_FORMAT_NV12:
4623         case DRM_FORMAT_NV21:
4624                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4625                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4626                 break;
4627
4628         case DRM_FORMAT_XRGB16161616F:
4629         case DRM_FORMAT_ARGB16161616F:
4630         case DRM_FORMAT_XBGR16161616F:
4631         case DRM_FORMAT_ABGR16161616F:
4632                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4633                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4634                 break;
4635
4636         default:
4637                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4638                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4639                 break;
4640         }
4641
4642         /*
4643          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4644          * scaling factor of 1.0 == 1000 units.
4645          */
4646         if (*max_upscale == 1)
4647                 *max_upscale = 1000;
4648
4649         if (*min_downscale == 1)
4650                 *min_downscale = 1000;
4651 }
4652
4653
4654 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4655                                 const struct drm_plane_state *state,
4656                                 struct dc_scaling_info *scaling_info)
4657 {
4658         int scale_w, scale_h, min_downscale, max_upscale;
4659
4660         memset(scaling_info, 0, sizeof(*scaling_info));
4661
4662         /* Source is fixed 16.16 but we ignore mantissa for now... */
4663         scaling_info->src_rect.x = state->src_x >> 16;
4664         scaling_info->src_rect.y = state->src_y >> 16;
4665
4666         /*
4667          * For reasons we don't (yet) fully understand a non-zero
4668          * src_y coordinate into an NV12 buffer can cause a
4669          * system hang on DCN1x.
4670          * To avoid hangs (and maybe be overly cautious)
4671          * let's reject both non-zero src_x and src_y.
4672          *
4673          * We currently know of only one use-case to reproduce a
4674          * scenario with non-zero src_x and src_y for NV12, which
4675          * is to gesture the YouTube Android app into full screen
4676          * on ChromeOS.
4677          */
4678         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4679             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4680             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4681             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4682                 return -EINVAL;
4683
4684         scaling_info->src_rect.width = state->src_w >> 16;
4685         if (scaling_info->src_rect.width == 0)
4686                 return -EINVAL;
4687
4688         scaling_info->src_rect.height = state->src_h >> 16;
4689         if (scaling_info->src_rect.height == 0)
4690                 return -EINVAL;
4691
4692         scaling_info->dst_rect.x = state->crtc_x;
4693         scaling_info->dst_rect.y = state->crtc_y;
4694
4695         if (state->crtc_w == 0)
4696                 return -EINVAL;
4697
4698         scaling_info->dst_rect.width = state->crtc_w;
4699
4700         if (state->crtc_h == 0)
4701                 return -EINVAL;
4702
4703         scaling_info->dst_rect.height = state->crtc_h;
4704
4705         /* DRM doesn't specify clipping on destination output. */
4706         scaling_info->clip_rect = scaling_info->dst_rect;
4707
4708         /* Validate scaling per-format with DC plane caps */
4709         if (state->plane && state->plane->dev && state->fb) {
4710                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4711                                              &min_downscale, &max_upscale);
4712         } else {
4713                 min_downscale = 250;
4714                 max_upscale = 16000;
4715         }
4716
4717         scale_w = scaling_info->dst_rect.width * 1000 /
4718                   scaling_info->src_rect.width;
4719
4720         if (scale_w < min_downscale || scale_w > max_upscale)
4721                 return -EINVAL;
4722
4723         scale_h = scaling_info->dst_rect.height * 1000 /
4724                   scaling_info->src_rect.height;
4725
4726         if (scale_h < min_downscale || scale_h > max_upscale)
4727                 return -EINVAL;
4728
4729         /*
4730          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4731          * assume reasonable defaults based on the format.
4732          */
4733
4734         return 0;
4735 }
4736
4737 static void
4738 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4739                                  uint64_t tiling_flags)
4740 {
4741         /* Fill GFX8 params */
4742         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4743                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4744
4745                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4746                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4747                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4748                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4749                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4750
4751                 /* XXX fix me for VI */
4752                 tiling_info->gfx8.num_banks = num_banks;
4753                 tiling_info->gfx8.array_mode =
4754                                 DC_ARRAY_2D_TILED_THIN1;
4755                 tiling_info->gfx8.tile_split = tile_split;
4756                 tiling_info->gfx8.bank_width = bankw;
4757                 tiling_info->gfx8.bank_height = bankh;
4758                 tiling_info->gfx8.tile_aspect = mtaspect;
4759                 tiling_info->gfx8.tile_mode =
4760                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4761         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4762                         == DC_ARRAY_1D_TILED_THIN1) {
4763                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4764         }
4765
4766         tiling_info->gfx8.pipe_config =
4767                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4768 }
4769
4770 static void
4771 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4772                                   union dc_tiling_info *tiling_info)
4773 {
4774         tiling_info->gfx9.num_pipes =
4775                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4776         tiling_info->gfx9.num_banks =
4777                 adev->gfx.config.gb_addr_config_fields.num_banks;
4778         tiling_info->gfx9.pipe_interleave =
4779                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4780         tiling_info->gfx9.num_shader_engines =
4781                 adev->gfx.config.gb_addr_config_fields.num_se;
4782         tiling_info->gfx9.max_compressed_frags =
4783                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4784         tiling_info->gfx9.num_rb_per_se =
4785                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4786         tiling_info->gfx9.shaderEnable = 1;
4787         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4788                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4789 }
4790
4791 static int
4792 validate_dcc(struct amdgpu_device *adev,
4793              const enum surface_pixel_format format,
4794              const enum dc_rotation_angle rotation,
4795              const union dc_tiling_info *tiling_info,
4796              const struct dc_plane_dcc_param *dcc,
4797              const struct dc_plane_address *address,
4798              const struct plane_size *plane_size)
4799 {
4800         struct dc *dc = adev->dm.dc;
4801         struct dc_dcc_surface_param input;
4802         struct dc_surface_dcc_cap output;
4803
4804         memset(&input, 0, sizeof(input));
4805         memset(&output, 0, sizeof(output));
4806
4807         if (!dcc->enable)
4808                 return 0;
4809
4810         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4811             !dc->cap_funcs.get_dcc_compression_cap)
4812                 return -EINVAL;
4813
4814         input.format = format;
4815         input.surface_size.width = plane_size->surface_size.width;
4816         input.surface_size.height = plane_size->surface_size.height;
4817         input.swizzle_mode = tiling_info->gfx9.swizzle;
4818
4819         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4820                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4821         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4822                 input.scan = SCAN_DIRECTION_VERTICAL;
4823
4824         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4825                 return -EINVAL;
4826
4827         if (!output.capable)
4828                 return -EINVAL;
4829
4830         if (dcc->independent_64b_blks == 0 &&
4831             output.grph.rgb.independent_64b_blks != 0)
4832                 return -EINVAL;
4833
4834         return 0;
4835 }
4836
4837 static bool
4838 modifier_has_dcc(uint64_t modifier)
4839 {
4840         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4841 }
4842
4843 static unsigned
4844 modifier_gfx9_swizzle_mode(uint64_t modifier)
4845 {
4846         if (modifier == DRM_FORMAT_MOD_LINEAR)
4847                 return 0;
4848
4849         return AMD_FMT_MOD_GET(TILE, modifier);
4850 }
4851
4852 static const struct drm_format_info *
4853 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4854 {
4855         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4856 }
4857
4858 static void
4859 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4860                                     union dc_tiling_info *tiling_info,
4861                                     uint64_t modifier)
4862 {
4863         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4864         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4865         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4866         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4867
4868         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4869
4870         if (!IS_AMD_FMT_MOD(modifier))
4871                 return;
4872
4873         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4874         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4875
4876         if (adev->family >= AMDGPU_FAMILY_NV) {
4877                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4878         } else {
4879                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4880
4881                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4882         }
4883 }
4884
4885 enum dm_micro_swizzle {
4886         MICRO_SWIZZLE_Z = 0,
4887         MICRO_SWIZZLE_S = 1,
4888         MICRO_SWIZZLE_D = 2,
4889         MICRO_SWIZZLE_R = 3
4890 };
4891
4892 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4893                                           uint32_t format,
4894                                           uint64_t modifier)
4895 {
4896         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4897         const struct drm_format_info *info = drm_format_info(format);
4898         int i;
4899
4900         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4901
4902         if (!info)
4903                 return false;
4904
4905         /*
4906          * We always have to allow these modifiers:
4907          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4908          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4909          */
4910         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4911             modifier == DRM_FORMAT_MOD_INVALID) {
4912                 return true;
4913         }
4914
4915         /* Check that the modifier is on the list of the plane's supported modifiers. */
4916         for (i = 0; i < plane->modifier_count; i++) {
4917                 if (modifier == plane->modifiers[i])
4918                         break;
4919         }
4920         if (i == plane->modifier_count)
4921                 return false;
4922
4923         /*
4924          * For D swizzle the canonical modifier depends on the bpp, so check
4925          * it here.
4926          */
4927         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4928             adev->family >= AMDGPU_FAMILY_NV) {
4929                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4930                         return false;
4931         }
4932
4933         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4934             info->cpp[0] < 8)
4935                 return false;
4936
4937         if (modifier_has_dcc(modifier)) {
4938                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4939                 if (info->cpp[0] != 4)
4940                         return false;
4941                 /* We support multi-planar formats, but not when combined with
4942                  * additional DCC metadata planes. */
4943                 if (info->num_planes > 1)
4944                         return false;
4945         }
4946
4947         return true;
4948 }
4949
4950 static void
4951 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4952 {
4953         if (!*mods)
4954                 return;
4955
4956         if (*cap - *size < 1) {
4957                 uint64_t new_cap = *cap * 2;
4958                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4959
4960                 if (!new_mods) {
4961                         kfree(*mods);
4962                         *mods = NULL;
4963                         return;
4964                 }
4965
4966                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4967                 kfree(*mods);
4968                 *mods = new_mods;
4969                 *cap = new_cap;
4970         }
4971
4972         (*mods)[*size] = mod;
4973         *size += 1;
4974 }
4975
4976 static void
4977 add_gfx9_modifiers(const struct amdgpu_device *adev,
4978                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4979 {
4980         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4981         int pipe_xor_bits = min(8, pipes +
4982                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4983         int bank_xor_bits = min(8 - pipe_xor_bits,
4984                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4985         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4986                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4987
4988
4989         if (adev->family == AMDGPU_FAMILY_RV) {
4990                 /* Raven2 and later */
4991                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4992
4993                 /*
4994                  * No _D DCC swizzles yet because we only allow 32bpp, which
4995                  * doesn't support _D on DCN
4996                  */
4997
4998                 if (has_constant_encode) {
4999                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5001                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5002                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5004                                     AMD_FMT_MOD_SET(DCC, 1) |
5005                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5006                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5007                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5008                 }
5009
5010                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5011                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5012                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5013                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5014                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5015                             AMD_FMT_MOD_SET(DCC, 1) |
5016                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5017                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5018                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5019
5020                 if (has_constant_encode) {
5021                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5022                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5023                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5024                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5025                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5026                                     AMD_FMT_MOD_SET(DCC, 1) |
5027                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5028                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5029                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5030
5031                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5032                                     AMD_FMT_MOD_SET(RB, rb) |
5033                                     AMD_FMT_MOD_SET(PIPE, pipes));
5034                 }
5035
5036                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5037                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5038                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5039                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5040                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5041                             AMD_FMT_MOD_SET(DCC, 1) |
5042                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5043                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5044                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5045                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5046                             AMD_FMT_MOD_SET(RB, rb) |
5047                             AMD_FMT_MOD_SET(PIPE, pipes));
5048         }
5049
5050         /*
5051          * Only supported for 64bpp on Raven, will be filtered on format in
5052          * dm_plane_format_mod_supported.
5053          */
5054         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5055                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5056                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5057                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5058                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5059
5060         if (adev->family == AMDGPU_FAMILY_RV) {
5061                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5062                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5063                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5064                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5065                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5066         }
5067
5068         /*
5069          * Only supported for 64bpp on Raven, will be filtered on format in
5070          * dm_plane_format_mod_supported.
5071          */
5072         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5074                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5075
5076         if (adev->family == AMDGPU_FAMILY_RV) {
5077                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5078                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5079                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5080         }
5081 }
5082
5083 static void
5084 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5085                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5086 {
5087         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5088
5089         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5090                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5091                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5092                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5093                     AMD_FMT_MOD_SET(DCC, 1) |
5094                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5095                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5096                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5097
5098         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5099                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5100                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5101                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5102                     AMD_FMT_MOD_SET(DCC, 1) |
5103                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5104                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5105                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5106                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5107
5108         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5110                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5111                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5112
5113         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5115                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5116                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5117
5118
5119         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5120         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5121                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5122                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5123
5124         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5126                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5127 }
5128
5129 static void
5130 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5131                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5132 {
5133         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5134         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5135
5136         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5137                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5138                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5139                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5140                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5141                     AMD_FMT_MOD_SET(DCC, 1) |
5142                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5143                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5144                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5145                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5146
5147         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5149                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5150                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5151                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5152                     AMD_FMT_MOD_SET(DCC, 1) |
5153                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5154                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5155                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5156
5157         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5159                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5160                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5161                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5162                     AMD_FMT_MOD_SET(DCC, 1) |
5163                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5164                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5165                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5166                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5167                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5168
5169         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5170                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5171                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5172                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5173                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5174                     AMD_FMT_MOD_SET(DCC, 1) |
5175                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5176                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5177                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5178                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5179
5180         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5181                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5182                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5183                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5184                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5185
5186         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5187                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5188                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5189                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5190                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5191
5192         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5193         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5194                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5195                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5196
5197         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5198                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5199                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5200 }
5201
5202 static int
5203 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5204 {
5205         uint64_t size = 0, capacity = 128;
5206         *mods = NULL;
5207
5208         /* We have not hooked up any pre-GFX9 modifiers. */
5209         if (adev->family < AMDGPU_FAMILY_AI)
5210                 return 0;
5211
5212         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5213
5214         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5215                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5216                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5217                 return *mods ? 0 : -ENOMEM;
5218         }
5219
5220         switch (adev->family) {
5221         case AMDGPU_FAMILY_AI:
5222         case AMDGPU_FAMILY_RV:
5223                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5224                 break;
5225         case AMDGPU_FAMILY_NV:
5226         case AMDGPU_FAMILY_VGH:
5227         case AMDGPU_FAMILY_YC:
5228         case AMDGPU_FAMILY_GC_10_3_6:
5229         case AMDGPU_FAMILY_GC_10_3_7:
5230                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5231                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5232                 else
5233                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5234                 break;
5235         }
5236
5237         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5238
5239         /* INVALID marks the end of the list. */
5240         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5241
5242         if (!*mods)
5243                 return -ENOMEM;
5244
5245         return 0;
5246 }
5247
5248 static int
5249 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5250                                           const struct amdgpu_framebuffer *afb,
5251                                           const enum surface_pixel_format format,
5252                                           const enum dc_rotation_angle rotation,
5253                                           const struct plane_size *plane_size,
5254                                           union dc_tiling_info *tiling_info,
5255                                           struct dc_plane_dcc_param *dcc,
5256                                           struct dc_plane_address *address,
5257                                           const bool force_disable_dcc)
5258 {
5259         const uint64_t modifier = afb->base.modifier;
5260         int ret = 0;
5261
5262         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5263         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5264
5265         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5266                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5267                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5268                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5269
5270                 dcc->enable = 1;
5271                 dcc->meta_pitch = afb->base.pitches[1];
5272                 dcc->independent_64b_blks = independent_64b_blks;
5273                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5274                         if (independent_64b_blks && independent_128b_blks)
5275                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5276                         else if (independent_128b_blks)
5277                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5278                         else if (independent_64b_blks && !independent_128b_blks)
5279                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5280                         else
5281                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5282                 } else {
5283                         if (independent_64b_blks)
5284                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5285                         else
5286                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5287                 }
5288
5289                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5290                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5291         }
5292
5293         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5294         if (ret)
5295                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5296
5297         return ret;
5298 }
5299
5300 static int
5301 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5302                              const struct amdgpu_framebuffer *afb,
5303                              const enum surface_pixel_format format,
5304                              const enum dc_rotation_angle rotation,
5305                              const uint64_t tiling_flags,
5306                              union dc_tiling_info *tiling_info,
5307                              struct plane_size *plane_size,
5308                              struct dc_plane_dcc_param *dcc,
5309                              struct dc_plane_address *address,
5310                              bool tmz_surface,
5311                              bool force_disable_dcc)
5312 {
5313         const struct drm_framebuffer *fb = &afb->base;
5314         int ret;
5315
5316         memset(tiling_info, 0, sizeof(*tiling_info));
5317         memset(plane_size, 0, sizeof(*plane_size));
5318         memset(dcc, 0, sizeof(*dcc));
5319         memset(address, 0, sizeof(*address));
5320
5321         address->tmz_surface = tmz_surface;
5322
5323         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5324                 uint64_t addr = afb->address + fb->offsets[0];
5325
5326                 plane_size->surface_size.x = 0;
5327                 plane_size->surface_size.y = 0;
5328                 plane_size->surface_size.width = fb->width;
5329                 plane_size->surface_size.height = fb->height;
5330                 plane_size->surface_pitch =
5331                         fb->pitches[0] / fb->format->cpp[0];
5332
5333                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5334                 address->grph.addr.low_part = lower_32_bits(addr);
5335                 address->grph.addr.high_part = upper_32_bits(addr);
5336         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5337                 uint64_t luma_addr = afb->address + fb->offsets[0];
5338                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5339
5340                 plane_size->surface_size.x = 0;
5341                 plane_size->surface_size.y = 0;
5342                 plane_size->surface_size.width = fb->width;
5343                 plane_size->surface_size.height = fb->height;
5344                 plane_size->surface_pitch =
5345                         fb->pitches[0] / fb->format->cpp[0];
5346
5347                 plane_size->chroma_size.x = 0;
5348                 plane_size->chroma_size.y = 0;
5349                 /* TODO: set these based on surface format */
5350                 plane_size->chroma_size.width = fb->width / 2;
5351                 plane_size->chroma_size.height = fb->height / 2;
5352
5353                 plane_size->chroma_pitch =
5354                         fb->pitches[1] / fb->format->cpp[1];
5355
5356                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5357                 address->video_progressive.luma_addr.low_part =
5358                         lower_32_bits(luma_addr);
5359                 address->video_progressive.luma_addr.high_part =
5360                         upper_32_bits(luma_addr);
5361                 address->video_progressive.chroma_addr.low_part =
5362                         lower_32_bits(chroma_addr);
5363                 address->video_progressive.chroma_addr.high_part =
5364                         upper_32_bits(chroma_addr);
5365         }
5366
5367         if (adev->family >= AMDGPU_FAMILY_AI) {
5368                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5369                                                                 rotation, plane_size,
5370                                                                 tiling_info, dcc,
5371                                                                 address,
5372                                                                 force_disable_dcc);
5373                 if (ret)
5374                         return ret;
5375         } else {
5376                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5377         }
5378
5379         return 0;
5380 }
5381
5382 static void
5383 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5384                                bool *per_pixel_alpha, bool *global_alpha,
5385                                int *global_alpha_value)
5386 {
5387         *per_pixel_alpha = false;
5388         *global_alpha = false;
5389         *global_alpha_value = 0xff;
5390
5391         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5392                 return;
5393
5394         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5395                 static const uint32_t alpha_formats[] = {
5396                         DRM_FORMAT_ARGB8888,
5397                         DRM_FORMAT_RGBA8888,
5398                         DRM_FORMAT_ABGR8888,
5399                 };
5400                 uint32_t format = plane_state->fb->format->format;
5401                 unsigned int i;
5402
5403                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5404                         if (format == alpha_formats[i]) {
5405                                 *per_pixel_alpha = true;
5406                                 break;
5407                         }
5408                 }
5409         }
5410
5411         if (plane_state->alpha < 0xffff) {
5412                 *global_alpha = true;
5413                 *global_alpha_value = plane_state->alpha >> 8;
5414         }
5415 }
5416
5417 static int
5418 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5419                             const enum surface_pixel_format format,
5420                             enum dc_color_space *color_space)
5421 {
5422         bool full_range;
5423
5424         *color_space = COLOR_SPACE_SRGB;
5425
5426         /* DRM color properties only affect non-RGB formats. */
5427         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5428                 return 0;
5429
5430         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5431
5432         switch (plane_state->color_encoding) {
5433         case DRM_COLOR_YCBCR_BT601:
5434                 if (full_range)
5435                         *color_space = COLOR_SPACE_YCBCR601;
5436                 else
5437                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5438                 break;
5439
5440         case DRM_COLOR_YCBCR_BT709:
5441                 if (full_range)
5442                         *color_space = COLOR_SPACE_YCBCR709;
5443                 else
5444                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5445                 break;
5446
5447         case DRM_COLOR_YCBCR_BT2020:
5448                 if (full_range)
5449                         *color_space = COLOR_SPACE_2020_YCBCR;
5450                 else
5451                         return -EINVAL;
5452                 break;
5453
5454         default:
5455                 return -EINVAL;
5456         }
5457
5458         return 0;
5459 }
5460
5461 static int
5462 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5463                             const struct drm_plane_state *plane_state,
5464                             const uint64_t tiling_flags,
5465                             struct dc_plane_info *plane_info,
5466                             struct dc_plane_address *address,
5467                             bool tmz_surface,
5468                             bool force_disable_dcc)
5469 {
5470         const struct drm_framebuffer *fb = plane_state->fb;
5471         const struct amdgpu_framebuffer *afb =
5472                 to_amdgpu_framebuffer(plane_state->fb);
5473         int ret;
5474
5475         memset(plane_info, 0, sizeof(*plane_info));
5476
5477         switch (fb->format->format) {
5478         case DRM_FORMAT_C8:
5479                 plane_info->format =
5480                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5481                 break;
5482         case DRM_FORMAT_RGB565:
5483                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5484                 break;
5485         case DRM_FORMAT_XRGB8888:
5486         case DRM_FORMAT_ARGB8888:
5487                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5488                 break;
5489         case DRM_FORMAT_XRGB2101010:
5490         case DRM_FORMAT_ARGB2101010:
5491                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5492                 break;
5493         case DRM_FORMAT_XBGR2101010:
5494         case DRM_FORMAT_ABGR2101010:
5495                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5496                 break;
5497         case DRM_FORMAT_XBGR8888:
5498         case DRM_FORMAT_ABGR8888:
5499                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5500                 break;
5501         case DRM_FORMAT_NV21:
5502                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5503                 break;
5504         case DRM_FORMAT_NV12:
5505                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5506                 break;
5507         case DRM_FORMAT_P010:
5508                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5509                 break;
5510         case DRM_FORMAT_XRGB16161616F:
5511         case DRM_FORMAT_ARGB16161616F:
5512                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5513                 break;
5514         case DRM_FORMAT_XBGR16161616F:
5515         case DRM_FORMAT_ABGR16161616F:
5516                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5517                 break;
5518         case DRM_FORMAT_XRGB16161616:
5519         case DRM_FORMAT_ARGB16161616:
5520                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5521                 break;
5522         case DRM_FORMAT_XBGR16161616:
5523         case DRM_FORMAT_ABGR16161616:
5524                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5525                 break;
5526         default:
5527                 DRM_ERROR(
5528                         "Unsupported screen format %p4cc\n",
5529                         &fb->format->format);
5530                 return -EINVAL;
5531         }
5532
5533         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5534         case DRM_MODE_ROTATE_0:
5535                 plane_info->rotation = ROTATION_ANGLE_0;
5536                 break;
5537         case DRM_MODE_ROTATE_90:
5538                 plane_info->rotation = ROTATION_ANGLE_90;
5539                 break;
5540         case DRM_MODE_ROTATE_180:
5541                 plane_info->rotation = ROTATION_ANGLE_180;
5542                 break;
5543         case DRM_MODE_ROTATE_270:
5544                 plane_info->rotation = ROTATION_ANGLE_270;
5545                 break;
5546         default:
5547                 plane_info->rotation = ROTATION_ANGLE_0;
5548                 break;
5549         }
5550
5551         plane_info->visible = true;
5552         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5553
5554         plane_info->layer_index = 0;
5555
5556         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5557                                           &plane_info->color_space);
5558         if (ret)
5559                 return ret;
5560
5561         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5562                                            plane_info->rotation, tiling_flags,
5563                                            &plane_info->tiling_info,
5564                                            &plane_info->plane_size,
5565                                            &plane_info->dcc, address, tmz_surface,
5566                                            force_disable_dcc);
5567         if (ret)
5568                 return ret;
5569
5570         fill_blending_from_plane_state(
5571                 plane_state, &plane_info->per_pixel_alpha,
5572                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5573
5574         return 0;
5575 }
5576
5577 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5578                                     struct dc_plane_state *dc_plane_state,
5579                                     struct drm_plane_state *plane_state,
5580                                     struct drm_crtc_state *crtc_state)
5581 {
5582         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5583         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5584         struct dc_scaling_info scaling_info;
5585         struct dc_plane_info plane_info;
5586         int ret;
5587         bool force_disable_dcc = false;
5588
5589         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5590         if (ret)
5591                 return ret;
5592
5593         dc_plane_state->src_rect = scaling_info.src_rect;
5594         dc_plane_state->dst_rect = scaling_info.dst_rect;
5595         dc_plane_state->clip_rect = scaling_info.clip_rect;
5596         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5597
5598         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5599         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5600                                           afb->tiling_flags,
5601                                           &plane_info,
5602                                           &dc_plane_state->address,
5603                                           afb->tmz_surface,
5604                                           force_disable_dcc);
5605         if (ret)
5606                 return ret;
5607
5608         dc_plane_state->format = plane_info.format;
5609         dc_plane_state->color_space = plane_info.color_space;
5610         dc_plane_state->format = plane_info.format;
5611         dc_plane_state->plane_size = plane_info.plane_size;
5612         dc_plane_state->rotation = plane_info.rotation;
5613         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5614         dc_plane_state->stereo_format = plane_info.stereo_format;
5615         dc_plane_state->tiling_info = plane_info.tiling_info;
5616         dc_plane_state->visible = plane_info.visible;
5617         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5618         dc_plane_state->global_alpha = plane_info.global_alpha;
5619         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5620         dc_plane_state->dcc = plane_info.dcc;
5621         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5622         dc_plane_state->flip_int_enabled = true;
5623
5624         /*
5625          * Always set input transfer function, since plane state is refreshed
5626          * every time.
5627          */
5628         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5629         if (ret)
5630                 return ret;
5631
5632         return 0;
5633 }
5634
5635 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5636                                            const struct dm_connector_state *dm_state,
5637                                            struct dc_stream_state *stream)
5638 {
5639         enum amdgpu_rmx_type rmx_type;
5640
5641         struct rect src = { 0 }; /* viewport in composition space*/
5642         struct rect dst = { 0 }; /* stream addressable area */
5643
5644         /* no mode. nothing to be done */
5645         if (!mode)
5646                 return;
5647
5648         /* Full screen scaling by default */
5649         src.width = mode->hdisplay;
5650         src.height = mode->vdisplay;
5651         dst.width = stream->timing.h_addressable;
5652         dst.height = stream->timing.v_addressable;
5653
5654         if (dm_state) {
5655                 rmx_type = dm_state->scaling;
5656                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5657                         if (src.width * dst.height <
5658                                         src.height * dst.width) {
5659                                 /* height needs less upscaling/more downscaling */
5660                                 dst.width = src.width *
5661                                                 dst.height / src.height;
5662                         } else {
5663                                 /* width needs less upscaling/more downscaling */
5664                                 dst.height = src.height *
5665                                                 dst.width / src.width;
5666                         }
5667                 } else if (rmx_type == RMX_CENTER) {
5668                         dst = src;
5669                 }
5670
5671                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5672                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5673
5674                 if (dm_state->underscan_enable) {
5675                         dst.x += dm_state->underscan_hborder / 2;
5676                         dst.y += dm_state->underscan_vborder / 2;
5677                         dst.width -= dm_state->underscan_hborder;
5678                         dst.height -= dm_state->underscan_vborder;
5679                 }
5680         }
5681
5682         stream->src = src;
5683         stream->dst = dst;
5684
5685         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5686                       dst.x, dst.y, dst.width, dst.height);
5687
5688 }
5689
5690 static enum dc_color_depth
5691 convert_color_depth_from_display_info(const struct drm_connector *connector,
5692                                       bool is_y420, int requested_bpc)
5693 {
5694         uint8_t bpc;
5695
5696         if (is_y420) {
5697                 bpc = 8;
5698
5699                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5700                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5701                         bpc = 16;
5702                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5703                         bpc = 12;
5704                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5705                         bpc = 10;
5706         } else {
5707                 bpc = (uint8_t)connector->display_info.bpc;
5708                 /* Assume 8 bpc by default if no bpc is specified. */
5709                 bpc = bpc ? bpc : 8;
5710         }
5711
5712         if (requested_bpc > 0) {
5713                 /*
5714                  * Cap display bpc based on the user requested value.
5715                  *
5716                  * The value for state->max_bpc may not correctly updated
5717                  * depending on when the connector gets added to the state
5718                  * or if this was called outside of atomic check, so it
5719                  * can't be used directly.
5720                  */
5721                 bpc = min_t(u8, bpc, requested_bpc);
5722
5723                 /* Round down to the nearest even number. */
5724                 bpc = bpc - (bpc & 1);
5725         }
5726
5727         switch (bpc) {
5728         case 0:
5729                 /*
5730                  * Temporary Work around, DRM doesn't parse color depth for
5731                  * EDID revision before 1.4
5732                  * TODO: Fix edid parsing
5733                  */
5734                 return COLOR_DEPTH_888;
5735         case 6:
5736                 return COLOR_DEPTH_666;
5737         case 8:
5738                 return COLOR_DEPTH_888;
5739         case 10:
5740                 return COLOR_DEPTH_101010;
5741         case 12:
5742                 return COLOR_DEPTH_121212;
5743         case 14:
5744                 return COLOR_DEPTH_141414;
5745         case 16:
5746                 return COLOR_DEPTH_161616;
5747         default:
5748                 return COLOR_DEPTH_UNDEFINED;
5749         }
5750 }
5751
5752 static enum dc_aspect_ratio
5753 get_aspect_ratio(const struct drm_display_mode *mode_in)
5754 {
5755         /* 1-1 mapping, since both enums follow the HDMI spec. */
5756         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5757 }
5758
5759 static enum dc_color_space
5760 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5761 {
5762         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5763
5764         switch (dc_crtc_timing->pixel_encoding) {
5765         case PIXEL_ENCODING_YCBCR422:
5766         case PIXEL_ENCODING_YCBCR444:
5767         case PIXEL_ENCODING_YCBCR420:
5768         {
5769                 /*
5770                  * 27030khz is the separation point between HDTV and SDTV
5771                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5772                  * respectively
5773                  */
5774                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5775                         if (dc_crtc_timing->flags.Y_ONLY)
5776                                 color_space =
5777                                         COLOR_SPACE_YCBCR709_LIMITED;
5778                         else
5779                                 color_space = COLOR_SPACE_YCBCR709;
5780                 } else {
5781                         if (dc_crtc_timing->flags.Y_ONLY)
5782                                 color_space =
5783                                         COLOR_SPACE_YCBCR601_LIMITED;
5784                         else
5785                                 color_space = COLOR_SPACE_YCBCR601;
5786                 }
5787
5788         }
5789         break;
5790         case PIXEL_ENCODING_RGB:
5791                 color_space = COLOR_SPACE_SRGB;
5792                 break;
5793
5794         default:
5795                 WARN_ON(1);
5796                 break;
5797         }
5798
5799         return color_space;
5800 }
5801
5802 static bool adjust_colour_depth_from_display_info(
5803         struct dc_crtc_timing *timing_out,
5804         const struct drm_display_info *info)
5805 {
5806         enum dc_color_depth depth = timing_out->display_color_depth;
5807         int normalized_clk;
5808         do {
5809                 normalized_clk = timing_out->pix_clk_100hz / 10;
5810                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5811                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5812                         normalized_clk /= 2;
5813                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5814                 switch (depth) {
5815                 case COLOR_DEPTH_888:
5816                         break;
5817                 case COLOR_DEPTH_101010:
5818                         normalized_clk = (normalized_clk * 30) / 24;
5819                         break;
5820                 case COLOR_DEPTH_121212:
5821                         normalized_clk = (normalized_clk * 36) / 24;
5822                         break;
5823                 case COLOR_DEPTH_161616:
5824                         normalized_clk = (normalized_clk * 48) / 24;
5825                         break;
5826                 default:
5827                         /* The above depths are the only ones valid for HDMI. */
5828                         return false;
5829                 }
5830                 if (normalized_clk <= info->max_tmds_clock) {
5831                         timing_out->display_color_depth = depth;
5832                         return true;
5833                 }
5834         } while (--depth > COLOR_DEPTH_666);
5835         return false;
5836 }
5837
5838 static void fill_stream_properties_from_drm_display_mode(
5839         struct dc_stream_state *stream,
5840         const struct drm_display_mode *mode_in,
5841         const struct drm_connector *connector,
5842         const struct drm_connector_state *connector_state,
5843         const struct dc_stream_state *old_stream,
5844         int requested_bpc)
5845 {
5846         struct dc_crtc_timing *timing_out = &stream->timing;
5847         const struct drm_display_info *info = &connector->display_info;
5848         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5849         struct hdmi_vendor_infoframe hv_frame;
5850         struct hdmi_avi_infoframe avi_frame;
5851
5852         memset(&hv_frame, 0, sizeof(hv_frame));
5853         memset(&avi_frame, 0, sizeof(avi_frame));
5854
5855         timing_out->h_border_left = 0;
5856         timing_out->h_border_right = 0;
5857         timing_out->v_border_top = 0;
5858         timing_out->v_border_bottom = 0;
5859         /* TODO: un-hardcode */
5860         if (drm_mode_is_420_only(info, mode_in)
5861                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5862                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5863         else if (drm_mode_is_420_also(info, mode_in)
5864                         && aconnector->force_yuv420_output)
5865                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5866         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5867                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5868                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5869         else
5870                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5871
5872         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5873         timing_out->display_color_depth = convert_color_depth_from_display_info(
5874                 connector,
5875                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5876                 requested_bpc);
5877         timing_out->scan_type = SCANNING_TYPE_NODATA;
5878         timing_out->hdmi_vic = 0;
5879
5880         if(old_stream) {
5881                 timing_out->vic = old_stream->timing.vic;
5882                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5883                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5884         } else {
5885                 timing_out->vic = drm_match_cea_mode(mode_in);
5886                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5887                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5888                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5889                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5890         }
5891
5892         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5893                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5894                 timing_out->vic = avi_frame.video_code;
5895                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5896                 timing_out->hdmi_vic = hv_frame.vic;
5897         }
5898
5899         if (is_freesync_video_mode(mode_in, aconnector)) {
5900                 timing_out->h_addressable = mode_in->hdisplay;
5901                 timing_out->h_total = mode_in->htotal;
5902                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5903                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5904                 timing_out->v_total = mode_in->vtotal;
5905                 timing_out->v_addressable = mode_in->vdisplay;
5906                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5907                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5908                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5909         } else {
5910                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5911                 timing_out->h_total = mode_in->crtc_htotal;
5912                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5913                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5914                 timing_out->v_total = mode_in->crtc_vtotal;
5915                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5916                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5917                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5918                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5919         }
5920
5921         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5922
5923         stream->output_color_space = get_output_color_space(timing_out);
5924
5925         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5926         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5927         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5928                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5929                     drm_mode_is_420_also(info, mode_in) &&
5930                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5931                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5932                         adjust_colour_depth_from_display_info(timing_out, info);
5933                 }
5934         }
5935 }
5936
5937 static void fill_audio_info(struct audio_info *audio_info,
5938                             const struct drm_connector *drm_connector,
5939                             const struct dc_sink *dc_sink)
5940 {
5941         int i = 0;
5942         int cea_revision = 0;
5943         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5944
5945         audio_info->manufacture_id = edid_caps->manufacturer_id;
5946         audio_info->product_id = edid_caps->product_id;
5947
5948         cea_revision = drm_connector->display_info.cea_rev;
5949
5950         strscpy(audio_info->display_name,
5951                 edid_caps->display_name,
5952                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5953
5954         if (cea_revision >= 3) {
5955                 audio_info->mode_count = edid_caps->audio_mode_count;
5956
5957                 for (i = 0; i < audio_info->mode_count; ++i) {
5958                         audio_info->modes[i].format_code =
5959                                         (enum audio_format_code)
5960                                         (edid_caps->audio_modes[i].format_code);
5961                         audio_info->modes[i].channel_count =
5962                                         edid_caps->audio_modes[i].channel_count;
5963                         audio_info->modes[i].sample_rates.all =
5964                                         edid_caps->audio_modes[i].sample_rate;
5965                         audio_info->modes[i].sample_size =
5966                                         edid_caps->audio_modes[i].sample_size;
5967                 }
5968         }
5969
5970         audio_info->flags.all = edid_caps->speaker_flags;
5971
5972         /* TODO: We only check for the progressive mode, check for interlace mode too */
5973         if (drm_connector->latency_present[0]) {
5974                 audio_info->video_latency = drm_connector->video_latency[0];
5975                 audio_info->audio_latency = drm_connector->audio_latency[0];
5976         }
5977
5978         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5979
5980 }
5981
5982 static void
5983 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5984                                       struct drm_display_mode *dst_mode)
5985 {
5986         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5987         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5988         dst_mode->crtc_clock = src_mode->crtc_clock;
5989         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5990         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5991         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5992         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5993         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5994         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5995         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5996         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5997         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5998         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5999         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6000 }
6001
6002 static void
6003 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6004                                         const struct drm_display_mode *native_mode,
6005                                         bool scale_enabled)
6006 {
6007         if (scale_enabled) {
6008                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6009         } else if (native_mode->clock == drm_mode->clock &&
6010                         native_mode->htotal == drm_mode->htotal &&
6011                         native_mode->vtotal == drm_mode->vtotal) {
6012                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6013         } else {
6014                 /* no scaling nor amdgpu inserted, no need to patch */
6015         }
6016 }
6017
6018 static struct dc_sink *
6019 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6020 {
6021         struct dc_sink_init_data sink_init_data = { 0 };
6022         struct dc_sink *sink = NULL;
6023         sink_init_data.link = aconnector->dc_link;
6024         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6025
6026         sink = dc_sink_create(&sink_init_data);
6027         if (!sink) {
6028                 DRM_ERROR("Failed to create sink!\n");
6029                 return NULL;
6030         }
6031         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6032
6033         return sink;
6034 }
6035
6036 static void set_multisync_trigger_params(
6037                 struct dc_stream_state *stream)
6038 {
6039         struct dc_stream_state *master = NULL;
6040
6041         if (stream->triggered_crtc_reset.enabled) {
6042                 master = stream->triggered_crtc_reset.event_source;
6043                 stream->triggered_crtc_reset.event =
6044                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6045                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6046                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6047         }
6048 }
6049
6050 static void set_master_stream(struct dc_stream_state *stream_set[],
6051                               int stream_count)
6052 {
6053         int j, highest_rfr = 0, master_stream = 0;
6054
6055         for (j = 0;  j < stream_count; j++) {
6056                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6057                         int refresh_rate = 0;
6058
6059                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6060                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6061                         if (refresh_rate > highest_rfr) {
6062                                 highest_rfr = refresh_rate;
6063                                 master_stream = j;
6064                         }
6065                 }
6066         }
6067         for (j = 0;  j < stream_count; j++) {
6068                 if (stream_set[j])
6069                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6070         }
6071 }
6072
6073 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6074 {
6075         int i = 0;
6076         struct dc_stream_state *stream;
6077
6078         if (context->stream_count < 2)
6079                 return;
6080         for (i = 0; i < context->stream_count ; i++) {
6081                 if (!context->streams[i])
6082                         continue;
6083                 /*
6084                  * TODO: add a function to read AMD VSDB bits and set
6085                  * crtc_sync_master.multi_sync_enabled flag
6086                  * For now it's set to false
6087                  */
6088         }
6089
6090         set_master_stream(context->streams, context->stream_count);
6091
6092         for (i = 0; i < context->stream_count ; i++) {
6093                 stream = context->streams[i];
6094
6095                 if (!stream)
6096                         continue;
6097
6098                 set_multisync_trigger_params(stream);
6099         }
6100 }
6101
6102 #if defined(CONFIG_DRM_AMD_DC_DCN)
6103 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6104                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6105                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6106 {
6107         stream->timing.flags.DSC = 0;
6108         dsc_caps->is_dsc_supported = false;
6109
6110         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6111                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6112                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6113                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6114                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6115                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6116                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6117                                 dsc_caps);
6118         }
6119 }
6120
6121 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6122                                     struct dc_sink *sink, struct dc_stream_state *stream,
6123                                     struct dsc_dec_dpcd_caps *dsc_caps,
6124                                     uint32_t max_dsc_target_bpp_limit_override)
6125 {
6126         const struct dc_link_settings *verified_link_cap = NULL;
6127         uint32_t link_bw_in_kbps;
6128         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6129         struct dc *dc = sink->ctx->dc;
6130         struct dc_dsc_bw_range bw_range = {0};
6131         struct dc_dsc_config dsc_cfg = {0};
6132
6133         verified_link_cap = dc_link_get_link_cap(stream->link);
6134         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6135         edp_min_bpp_x16 = 8 * 16;
6136         edp_max_bpp_x16 = 8 * 16;
6137
6138         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6139                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6140
6141         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6142                 edp_min_bpp_x16 = edp_max_bpp_x16;
6143
6144         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6145                                 dc->debug.dsc_min_slice_height_override,
6146                                 edp_min_bpp_x16, edp_max_bpp_x16,
6147                                 dsc_caps,
6148                                 &stream->timing,
6149                                 &bw_range)) {
6150
6151                 if (bw_range.max_kbps < link_bw_in_kbps) {
6152                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6153                                         dsc_caps,
6154                                         dc->debug.dsc_min_slice_height_override,
6155                                         max_dsc_target_bpp_limit_override,
6156                                         0,
6157                                         &stream->timing,
6158                                         &dsc_cfg)) {
6159                                 stream->timing.dsc_cfg = dsc_cfg;
6160                                 stream->timing.flags.DSC = 1;
6161                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6162                         }
6163                         return;
6164                 }
6165         }
6166
6167         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6168                                 dsc_caps,
6169                                 dc->debug.dsc_min_slice_height_override,
6170                                 max_dsc_target_bpp_limit_override,
6171                                 link_bw_in_kbps,
6172                                 &stream->timing,
6173                                 &dsc_cfg)) {
6174                 stream->timing.dsc_cfg = dsc_cfg;
6175                 stream->timing.flags.DSC = 1;
6176         }
6177 }
6178
6179 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6180                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6181                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6182 {
6183         struct drm_connector *drm_connector = &aconnector->base;
6184         uint32_t link_bandwidth_kbps;
6185         uint32_t max_dsc_target_bpp_limit_override = 0;
6186         struct dc *dc = sink->ctx->dc;
6187         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6188         uint32_t dsc_max_supported_bw_in_kbps;
6189
6190         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6191                                                         dc_link_get_link_cap(aconnector->dc_link));
6192
6193         if (stream->link && stream->link->local_sink)
6194                 max_dsc_target_bpp_limit_override =
6195                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6196
6197         /* Set DSC policy according to dsc_clock_en */
6198         dc_dsc_policy_set_enable_dsc_when_not_needed(
6199                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6200
6201         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6202             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6203
6204                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6205
6206         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6207                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6208                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6209                                                 dsc_caps,
6210                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6211                                                 max_dsc_target_bpp_limit_override,
6212                                                 link_bandwidth_kbps,
6213                                                 &stream->timing,
6214                                                 &stream->timing.dsc_cfg)) {
6215                                 stream->timing.flags.DSC = 1;
6216                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6217                                                                  __func__, drm_connector->name);
6218                         }
6219                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6220                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6221                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6222                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6223
6224                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6225                                         max_supported_bw_in_kbps > 0 &&
6226                                         dsc_max_supported_bw_in_kbps > 0)
6227                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6228                                                 dsc_caps,
6229                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6230                                                 max_dsc_target_bpp_limit_override,
6231                                                 dsc_max_supported_bw_in_kbps,
6232                                                 &stream->timing,
6233                                                 &stream->timing.dsc_cfg)) {
6234                                         stream->timing.flags.DSC = 1;
6235                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6236                                                                          __func__, drm_connector->name);
6237                                 }
6238                 }
6239         }
6240
6241         /* Overwrite the stream flag if DSC is enabled through debugfs */
6242         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6243                 stream->timing.flags.DSC = 1;
6244
6245         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6246                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6247
6248         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6249                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6250
6251         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6252                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6253 }
6254 #endif /* CONFIG_DRM_AMD_DC_DCN */
6255
6256 /**
6257  * DOC: FreeSync Video
6258  *
6259  * When a userspace application wants to play a video, the content follows a
6260  * standard format definition that usually specifies the FPS for that format.
6261  * The below list illustrates some video format and the expected FPS,
6262  * respectively:
6263  *
6264  * - TV/NTSC (23.976 FPS)
6265  * - Cinema (24 FPS)
6266  * - TV/PAL (25 FPS)
6267  * - TV/NTSC (29.97 FPS)
6268  * - TV/NTSC (30 FPS)
6269  * - Cinema HFR (48 FPS)
6270  * - TV/PAL (50 FPS)
6271  * - Commonly used (60 FPS)
6272  * - Multiples of 24 (48,72,96,120 FPS)
6273  *
6274  * The list of standards video format is not huge and can be added to the
6275  * connector modeset list beforehand. With that, userspace can leverage
6276  * FreeSync to extends the front porch in order to attain the target refresh
6277  * rate. Such a switch will happen seamlessly, without screen blanking or
6278  * reprogramming of the output in any other way. If the userspace requests a
6279  * modesetting change compatible with FreeSync modes that only differ in the
6280  * refresh rate, DC will skip the full update and avoid blink during the
6281  * transition. For example, the video player can change the modesetting from
6282  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6283  * causing any display blink. This same concept can be applied to a mode
6284  * setting change.
6285  */
6286 static struct drm_display_mode *
6287 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6288                           bool use_probed_modes)
6289 {
6290         struct drm_display_mode *m, *m_pref = NULL;
6291         u16 current_refresh, highest_refresh;
6292         struct list_head *list_head = use_probed_modes ?
6293                                                     &aconnector->base.probed_modes :
6294                                                     &aconnector->base.modes;
6295
6296         if (aconnector->freesync_vid_base.clock != 0)
6297                 return &aconnector->freesync_vid_base;
6298
6299         /* Find the preferred mode */
6300         list_for_each_entry (m, list_head, head) {
6301                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6302                         m_pref = m;
6303                         break;
6304                 }
6305         }
6306
6307         if (!m_pref) {
6308                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6309                 m_pref = list_first_entry_or_null(
6310                         &aconnector->base.modes, struct drm_display_mode, head);
6311                 if (!m_pref) {
6312                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6313                         return NULL;
6314                 }
6315         }
6316
6317         highest_refresh = drm_mode_vrefresh(m_pref);
6318
6319         /*
6320          * Find the mode with highest refresh rate with same resolution.
6321          * For some monitors, preferred mode is not the mode with highest
6322          * supported refresh rate.
6323          */
6324         list_for_each_entry (m, list_head, head) {
6325                 current_refresh  = drm_mode_vrefresh(m);
6326
6327                 if (m->hdisplay == m_pref->hdisplay &&
6328                     m->vdisplay == m_pref->vdisplay &&
6329                     highest_refresh < current_refresh) {
6330                         highest_refresh = current_refresh;
6331                         m_pref = m;
6332                 }
6333         }
6334
6335         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6336         return m_pref;
6337 }
6338
6339 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6340                                    struct amdgpu_dm_connector *aconnector)
6341 {
6342         struct drm_display_mode *high_mode;
6343         int timing_diff;
6344
6345         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6346         if (!high_mode || !mode)
6347                 return false;
6348
6349         timing_diff = high_mode->vtotal - mode->vtotal;
6350
6351         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6352             high_mode->hdisplay != mode->hdisplay ||
6353             high_mode->vdisplay != mode->vdisplay ||
6354             high_mode->hsync_start != mode->hsync_start ||
6355             high_mode->hsync_end != mode->hsync_end ||
6356             high_mode->htotal != mode->htotal ||
6357             high_mode->hskew != mode->hskew ||
6358             high_mode->vscan != mode->vscan ||
6359             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6360             high_mode->vsync_end - mode->vsync_end != timing_diff)
6361                 return false;
6362         else
6363                 return true;
6364 }
6365
6366 static struct dc_stream_state *
6367 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6368                        const struct drm_display_mode *drm_mode,
6369                        const struct dm_connector_state *dm_state,
6370                        const struct dc_stream_state *old_stream,
6371                        int requested_bpc)
6372 {
6373         struct drm_display_mode *preferred_mode = NULL;
6374         struct drm_connector *drm_connector;
6375         const struct drm_connector_state *con_state =
6376                 dm_state ? &dm_state->base : NULL;
6377         struct dc_stream_state *stream = NULL;
6378         struct drm_display_mode mode = *drm_mode;
6379         struct drm_display_mode saved_mode;
6380         struct drm_display_mode *freesync_mode = NULL;
6381         bool native_mode_found = false;
6382         bool recalculate_timing = false;
6383         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6384         int mode_refresh;
6385         int preferred_refresh = 0;
6386 #if defined(CONFIG_DRM_AMD_DC_DCN)
6387         struct dsc_dec_dpcd_caps dsc_caps;
6388 #endif
6389         struct dc_sink *sink = NULL;
6390
6391         memset(&saved_mode, 0, sizeof(saved_mode));
6392
6393         if (aconnector == NULL) {
6394                 DRM_ERROR("aconnector is NULL!\n");
6395                 return stream;
6396         }
6397
6398         drm_connector = &aconnector->base;
6399
6400         if (!aconnector->dc_sink) {
6401                 sink = create_fake_sink(aconnector);
6402                 if (!sink)
6403                         return stream;
6404         } else {
6405                 sink = aconnector->dc_sink;
6406                 dc_sink_retain(sink);
6407         }
6408
6409         stream = dc_create_stream_for_sink(sink);
6410
6411         if (stream == NULL) {
6412                 DRM_ERROR("Failed to create stream for sink!\n");
6413                 goto finish;
6414         }
6415
6416         stream->dm_stream_context = aconnector;
6417
6418         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6419                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6420
6421         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6422                 /* Search for preferred mode */
6423                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6424                         native_mode_found = true;
6425                         break;
6426                 }
6427         }
6428         if (!native_mode_found)
6429                 preferred_mode = list_first_entry_or_null(
6430                                 &aconnector->base.modes,
6431                                 struct drm_display_mode,
6432                                 head);
6433
6434         mode_refresh = drm_mode_vrefresh(&mode);
6435
6436         if (preferred_mode == NULL) {
6437                 /*
6438                  * This may not be an error, the use case is when we have no
6439                  * usermode calls to reset and set mode upon hotplug. In this
6440                  * case, we call set mode ourselves to restore the previous mode
6441                  * and the modelist may not be filled in in time.
6442                  */
6443                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6444         } else {
6445                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6446                 if (recalculate_timing) {
6447                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6448                         drm_mode_copy(&saved_mode, &mode);
6449                         drm_mode_copy(&mode, freesync_mode);
6450                 } else {
6451                         decide_crtc_timing_for_drm_display_mode(
6452                                 &mode, preferred_mode, scale);
6453
6454                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6455                 }
6456         }
6457
6458         if (recalculate_timing)
6459                 drm_mode_set_crtcinfo(&saved_mode, 0);
6460         else if (!dm_state)
6461                 drm_mode_set_crtcinfo(&mode, 0);
6462
6463        /*
6464         * If scaling is enabled and refresh rate didn't change
6465         * we copy the vic and polarities of the old timings
6466         */
6467         if (!scale || mode_refresh != preferred_refresh)
6468                 fill_stream_properties_from_drm_display_mode(
6469                         stream, &mode, &aconnector->base, con_state, NULL,
6470                         requested_bpc);
6471         else
6472                 fill_stream_properties_from_drm_display_mode(
6473                         stream, &mode, &aconnector->base, con_state, old_stream,
6474                         requested_bpc);
6475
6476 #if defined(CONFIG_DRM_AMD_DC_DCN)
6477         /* SST DSC determination policy */
6478         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6479         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6480                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6481 #endif
6482
6483         update_stream_scaling_settings(&mode, dm_state, stream);
6484
6485         fill_audio_info(
6486                 &stream->audio_info,
6487                 drm_connector,
6488                 sink);
6489
6490         update_stream_signal(stream, sink);
6491
6492         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6493                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6494
6495         if (stream->link->psr_settings.psr_feature_enabled) {
6496                 //
6497                 // should decide stream support vsc sdp colorimetry capability
6498                 // before building vsc info packet
6499                 //
6500                 stream->use_vsc_sdp_for_colorimetry = false;
6501                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6502                         stream->use_vsc_sdp_for_colorimetry =
6503                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6504                 } else {
6505                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6506                                 stream->use_vsc_sdp_for_colorimetry = true;
6507                 }
6508                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6509                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6510
6511         }
6512 finish:
6513         dc_sink_release(sink);
6514
6515         return stream;
6516 }
6517
6518 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6519 {
6520         drm_crtc_cleanup(crtc);
6521         kfree(crtc);
6522 }
6523
6524 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6525                                   struct drm_crtc_state *state)
6526 {
6527         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6528
6529         /* TODO Destroy dc_stream objects are stream object is flattened */
6530         if (cur->stream)
6531                 dc_stream_release(cur->stream);
6532
6533
6534         __drm_atomic_helper_crtc_destroy_state(state);
6535
6536
6537         kfree(state);
6538 }
6539
6540 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6541 {
6542         struct dm_crtc_state *state;
6543
6544         if (crtc->state)
6545                 dm_crtc_destroy_state(crtc, crtc->state);
6546
6547         state = kzalloc(sizeof(*state), GFP_KERNEL);
6548         if (WARN_ON(!state))
6549                 return;
6550
6551         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6552 }
6553
6554 static struct drm_crtc_state *
6555 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6556 {
6557         struct dm_crtc_state *state, *cur;
6558
6559         cur = to_dm_crtc_state(crtc->state);
6560
6561         if (WARN_ON(!crtc->state))
6562                 return NULL;
6563
6564         state = kzalloc(sizeof(*state), GFP_KERNEL);
6565         if (!state)
6566                 return NULL;
6567
6568         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6569
6570         if (cur->stream) {
6571                 state->stream = cur->stream;
6572                 dc_stream_retain(state->stream);
6573         }
6574
6575         state->active_planes = cur->active_planes;
6576         state->vrr_infopacket = cur->vrr_infopacket;
6577         state->abm_level = cur->abm_level;
6578         state->vrr_supported = cur->vrr_supported;
6579         state->freesync_config = cur->freesync_config;
6580         state->cm_has_degamma = cur->cm_has_degamma;
6581         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6582         state->force_dpms_off = cur->force_dpms_off;
6583         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6584
6585         return &state->base;
6586 }
6587
6588 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6589 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6590 {
6591         crtc_debugfs_init(crtc);
6592
6593         return 0;
6594 }
6595 #endif
6596
6597 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6598 {
6599         enum dc_irq_source irq_source;
6600         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6601         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6602         int rc;
6603
6604         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6605
6606         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6607
6608         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6609                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6610         return rc;
6611 }
6612
6613 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6614 {
6615         enum dc_irq_source irq_source;
6616         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6617         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6618         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6619         struct amdgpu_display_manager *dm = &adev->dm;
6620         struct vblank_control_work *work;
6621         int rc = 0;
6622
6623         if (enable) {
6624                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6625                 if (amdgpu_dm_vrr_active(acrtc_state))
6626                         rc = dm_set_vupdate_irq(crtc, true);
6627         } else {
6628                 /* vblank irq off -> vupdate irq off */
6629                 rc = dm_set_vupdate_irq(crtc, false);
6630         }
6631
6632         if (rc)
6633                 return rc;
6634
6635         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6636
6637         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6638                 return -EBUSY;
6639
6640         if (amdgpu_in_reset(adev))
6641                 return 0;
6642
6643         if (dm->vblank_control_workqueue) {
6644                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6645                 if (!work)
6646                         return -ENOMEM;
6647
6648                 INIT_WORK(&work->work, vblank_control_worker);
6649                 work->dm = dm;
6650                 work->acrtc = acrtc;
6651                 work->enable = enable;
6652
6653                 if (acrtc_state->stream) {
6654                         dc_stream_retain(acrtc_state->stream);
6655                         work->stream = acrtc_state->stream;
6656                 }
6657
6658                 queue_work(dm->vblank_control_workqueue, &work->work);
6659         }
6660
6661         return 0;
6662 }
6663
6664 static int dm_enable_vblank(struct drm_crtc *crtc)
6665 {
6666         return dm_set_vblank(crtc, true);
6667 }
6668
6669 static void dm_disable_vblank(struct drm_crtc *crtc)
6670 {
6671         dm_set_vblank(crtc, false);
6672 }
6673
6674 /* Implemented only the options currently availible for the driver */
6675 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6676         .reset = dm_crtc_reset_state,
6677         .destroy = amdgpu_dm_crtc_destroy,
6678         .set_config = drm_atomic_helper_set_config,
6679         .page_flip = drm_atomic_helper_page_flip,
6680         .atomic_duplicate_state = dm_crtc_duplicate_state,
6681         .atomic_destroy_state = dm_crtc_destroy_state,
6682         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6683         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6684         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6685         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6686         .enable_vblank = dm_enable_vblank,
6687         .disable_vblank = dm_disable_vblank,
6688         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6689 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6690         .late_register = amdgpu_dm_crtc_late_register,
6691 #endif
6692 };
6693
6694 static enum drm_connector_status
6695 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6696 {
6697         bool connected;
6698         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6699
6700         /*
6701          * Notes:
6702          * 1. This interface is NOT called in context of HPD irq.
6703          * 2. This interface *is called* in context of user-mode ioctl. Which
6704          * makes it a bad place for *any* MST-related activity.
6705          */
6706
6707         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6708             !aconnector->fake_enable)
6709                 connected = (aconnector->dc_sink != NULL);
6710         else
6711                 connected = (aconnector->base.force == DRM_FORCE_ON);
6712
6713         update_subconnector_property(aconnector);
6714
6715         return (connected ? connector_status_connected :
6716                         connector_status_disconnected);
6717 }
6718
6719 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6720                                             struct drm_connector_state *connector_state,
6721                                             struct drm_property *property,
6722                                             uint64_t val)
6723 {
6724         struct drm_device *dev = connector->dev;
6725         struct amdgpu_device *adev = drm_to_adev(dev);
6726         struct dm_connector_state *dm_old_state =
6727                 to_dm_connector_state(connector->state);
6728         struct dm_connector_state *dm_new_state =
6729                 to_dm_connector_state(connector_state);
6730
6731         int ret = -EINVAL;
6732
6733         if (property == dev->mode_config.scaling_mode_property) {
6734                 enum amdgpu_rmx_type rmx_type;
6735
6736                 switch (val) {
6737                 case DRM_MODE_SCALE_CENTER:
6738                         rmx_type = RMX_CENTER;
6739                         break;
6740                 case DRM_MODE_SCALE_ASPECT:
6741                         rmx_type = RMX_ASPECT;
6742                         break;
6743                 case DRM_MODE_SCALE_FULLSCREEN:
6744                         rmx_type = RMX_FULL;
6745                         break;
6746                 case DRM_MODE_SCALE_NONE:
6747                 default:
6748                         rmx_type = RMX_OFF;
6749                         break;
6750                 }
6751
6752                 if (dm_old_state->scaling == rmx_type)
6753                         return 0;
6754
6755                 dm_new_state->scaling = rmx_type;
6756                 ret = 0;
6757         } else if (property == adev->mode_info.underscan_hborder_property) {
6758                 dm_new_state->underscan_hborder = val;
6759                 ret = 0;
6760         } else if (property == adev->mode_info.underscan_vborder_property) {
6761                 dm_new_state->underscan_vborder = val;
6762                 ret = 0;
6763         } else if (property == adev->mode_info.underscan_property) {
6764                 dm_new_state->underscan_enable = val;
6765                 ret = 0;
6766         } else if (property == adev->mode_info.abm_level_property) {
6767                 dm_new_state->abm_level = val;
6768                 ret = 0;
6769         }
6770
6771         return ret;
6772 }
6773
6774 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6775                                             const struct drm_connector_state *state,
6776                                             struct drm_property *property,
6777                                             uint64_t *val)
6778 {
6779         struct drm_device *dev = connector->dev;
6780         struct amdgpu_device *adev = drm_to_adev(dev);
6781         struct dm_connector_state *dm_state =
6782                 to_dm_connector_state(state);
6783         int ret = -EINVAL;
6784
6785         if (property == dev->mode_config.scaling_mode_property) {
6786                 switch (dm_state->scaling) {
6787                 case RMX_CENTER:
6788                         *val = DRM_MODE_SCALE_CENTER;
6789                         break;
6790                 case RMX_ASPECT:
6791                         *val = DRM_MODE_SCALE_ASPECT;
6792                         break;
6793                 case RMX_FULL:
6794                         *val = DRM_MODE_SCALE_FULLSCREEN;
6795                         break;
6796                 case RMX_OFF:
6797                 default:
6798                         *val = DRM_MODE_SCALE_NONE;
6799                         break;
6800                 }
6801                 ret = 0;
6802         } else if (property == adev->mode_info.underscan_hborder_property) {
6803                 *val = dm_state->underscan_hborder;
6804                 ret = 0;
6805         } else if (property == adev->mode_info.underscan_vborder_property) {
6806                 *val = dm_state->underscan_vborder;
6807                 ret = 0;
6808         } else if (property == adev->mode_info.underscan_property) {
6809                 *val = dm_state->underscan_enable;
6810                 ret = 0;
6811         } else if (property == adev->mode_info.abm_level_property) {
6812                 *val = dm_state->abm_level;
6813                 ret = 0;
6814         }
6815
6816         return ret;
6817 }
6818
6819 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6820 {
6821         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6822
6823         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6824 }
6825
6826 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6827 {
6828         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6829         const struct dc_link *link = aconnector->dc_link;
6830         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6831         struct amdgpu_display_manager *dm = &adev->dm;
6832         int i;
6833
6834         /*
6835          * Call only if mst_mgr was iniitalized before since it's not done
6836          * for all connector types.
6837          */
6838         if (aconnector->mst_mgr.dev)
6839                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6840
6841 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6842         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6843         for (i = 0; i < dm->num_of_edps; i++) {
6844                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6845                         backlight_device_unregister(dm->backlight_dev[i]);
6846                         dm->backlight_dev[i] = NULL;
6847                 }
6848         }
6849 #endif
6850
6851         if (aconnector->dc_em_sink)
6852                 dc_sink_release(aconnector->dc_em_sink);
6853         aconnector->dc_em_sink = NULL;
6854         if (aconnector->dc_sink)
6855                 dc_sink_release(aconnector->dc_sink);
6856         aconnector->dc_sink = NULL;
6857
6858         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6859         drm_connector_unregister(connector);
6860         drm_connector_cleanup(connector);
6861         if (aconnector->i2c) {
6862                 i2c_del_adapter(&aconnector->i2c->base);
6863                 kfree(aconnector->i2c);
6864         }
6865         kfree(aconnector->dm_dp_aux.aux.name);
6866
6867         kfree(connector);
6868 }
6869
6870 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6871 {
6872         struct dm_connector_state *state =
6873                 to_dm_connector_state(connector->state);
6874
6875         if (connector->state)
6876                 __drm_atomic_helper_connector_destroy_state(connector->state);
6877
6878         kfree(state);
6879
6880         state = kzalloc(sizeof(*state), GFP_KERNEL);
6881
6882         if (state) {
6883                 state->scaling = RMX_OFF;
6884                 state->underscan_enable = false;
6885                 state->underscan_hborder = 0;
6886                 state->underscan_vborder = 0;
6887                 state->base.max_requested_bpc = 8;
6888                 state->vcpi_slots = 0;
6889                 state->pbn = 0;
6890                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6891                         state->abm_level = amdgpu_dm_abm_level;
6892
6893                 __drm_atomic_helper_connector_reset(connector, &state->base);
6894         }
6895 }
6896
6897 struct drm_connector_state *
6898 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6899 {
6900         struct dm_connector_state *state =
6901                 to_dm_connector_state(connector->state);
6902
6903         struct dm_connector_state *new_state =
6904                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6905
6906         if (!new_state)
6907                 return NULL;
6908
6909         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6910
6911         new_state->freesync_capable = state->freesync_capable;
6912         new_state->abm_level = state->abm_level;
6913         new_state->scaling = state->scaling;
6914         new_state->underscan_enable = state->underscan_enable;
6915         new_state->underscan_hborder = state->underscan_hborder;
6916         new_state->underscan_vborder = state->underscan_vborder;
6917         new_state->vcpi_slots = state->vcpi_slots;
6918         new_state->pbn = state->pbn;
6919         return &new_state->base;
6920 }
6921
6922 static int
6923 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6924 {
6925         struct amdgpu_dm_connector *amdgpu_dm_connector =
6926                 to_amdgpu_dm_connector(connector);
6927         int r;
6928
6929         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6930             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6931                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6932                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6933                 if (r)
6934                         return r;
6935         }
6936
6937 #if defined(CONFIG_DEBUG_FS)
6938         connector_debugfs_init(amdgpu_dm_connector);
6939 #endif
6940
6941         return 0;
6942 }
6943
6944 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6945         .reset = amdgpu_dm_connector_funcs_reset,
6946         .detect = amdgpu_dm_connector_detect,
6947         .fill_modes = drm_helper_probe_single_connector_modes,
6948         .destroy = amdgpu_dm_connector_destroy,
6949         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6950         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6951         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6952         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6953         .late_register = amdgpu_dm_connector_late_register,
6954         .early_unregister = amdgpu_dm_connector_unregister
6955 };
6956
6957 static int get_modes(struct drm_connector *connector)
6958 {
6959         return amdgpu_dm_connector_get_modes(connector);
6960 }
6961
6962 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6963 {
6964         struct dc_sink_init_data init_params = {
6965                         .link = aconnector->dc_link,
6966                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6967         };
6968         struct edid *edid;
6969
6970         if (!aconnector->base.edid_blob_ptr) {
6971                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6972                                 aconnector->base.name);
6973
6974                 aconnector->base.force = DRM_FORCE_OFF;
6975                 aconnector->base.override_edid = false;
6976                 return;
6977         }
6978
6979         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6980
6981         aconnector->edid = edid;
6982
6983         aconnector->dc_em_sink = dc_link_add_remote_sink(
6984                 aconnector->dc_link,
6985                 (uint8_t *)edid,
6986                 (edid->extensions + 1) * EDID_LENGTH,
6987                 &init_params);
6988
6989         if (aconnector->base.force == DRM_FORCE_ON) {
6990                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6991                 aconnector->dc_link->local_sink :
6992                 aconnector->dc_em_sink;
6993                 dc_sink_retain(aconnector->dc_sink);
6994         }
6995 }
6996
6997 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6998 {
6999         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7000
7001         /*
7002          * In case of headless boot with force on for DP managed connector
7003          * Those settings have to be != 0 to get initial modeset
7004          */
7005         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7006                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7007                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7008         }
7009
7010
7011         aconnector->base.override_edid = true;
7012         create_eml_sink(aconnector);
7013 }
7014
7015 struct dc_stream_state *
7016 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7017                                 const struct drm_display_mode *drm_mode,
7018                                 const struct dm_connector_state *dm_state,
7019                                 const struct dc_stream_state *old_stream)
7020 {
7021         struct drm_connector *connector = &aconnector->base;
7022         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7023         struct dc_stream_state *stream;
7024         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7025         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7026         enum dc_status dc_result = DC_OK;
7027
7028         do {
7029                 stream = create_stream_for_sink(aconnector, drm_mode,
7030                                                 dm_state, old_stream,
7031                                                 requested_bpc);
7032                 if (stream == NULL) {
7033                         DRM_ERROR("Failed to create stream for sink!\n");
7034                         break;
7035                 }
7036
7037                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7038
7039                 if (dc_result != DC_OK) {
7040                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7041                                       drm_mode->hdisplay,
7042                                       drm_mode->vdisplay,
7043                                       drm_mode->clock,
7044                                       dc_result,
7045                                       dc_status_to_str(dc_result));
7046
7047                         dc_stream_release(stream);
7048                         stream = NULL;
7049                         requested_bpc -= 2; /* lower bpc to retry validation */
7050                 }
7051
7052         } while (stream == NULL && requested_bpc >= 6);
7053
7054         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7055                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7056
7057                 aconnector->force_yuv420_output = true;
7058                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7059                                                 dm_state, old_stream);
7060                 aconnector->force_yuv420_output = false;
7061         }
7062
7063         return stream;
7064 }
7065
7066 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7067                                    struct drm_display_mode *mode)
7068 {
7069         int result = MODE_ERROR;
7070         struct dc_sink *dc_sink;
7071         /* TODO: Unhardcode stream count */
7072         struct dc_stream_state *stream;
7073         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7074
7075         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7076                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7077                 return result;
7078
7079         /*
7080          * Only run this the first time mode_valid is called to initilialize
7081          * EDID mgmt
7082          */
7083         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7084                 !aconnector->dc_em_sink)
7085                 handle_edid_mgmt(aconnector);
7086
7087         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7088
7089         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7090                                 aconnector->base.force != DRM_FORCE_ON) {
7091                 DRM_ERROR("dc_sink is NULL!\n");
7092                 goto fail;
7093         }
7094
7095         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7096         if (stream) {
7097                 dc_stream_release(stream);
7098                 result = MODE_OK;
7099         }
7100
7101 fail:
7102         /* TODO: error handling*/
7103         return result;
7104 }
7105
7106 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7107                                 struct dc_info_packet *out)
7108 {
7109         struct hdmi_drm_infoframe frame;
7110         unsigned char buf[30]; /* 26 + 4 */
7111         ssize_t len;
7112         int ret, i;
7113
7114         memset(out, 0, sizeof(*out));
7115
7116         if (!state->hdr_output_metadata)
7117                 return 0;
7118
7119         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7120         if (ret)
7121                 return ret;
7122
7123         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7124         if (len < 0)
7125                 return (int)len;
7126
7127         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7128         if (len != 30)
7129                 return -EINVAL;
7130
7131         /* Prepare the infopacket for DC. */
7132         switch (state->connector->connector_type) {
7133         case DRM_MODE_CONNECTOR_HDMIA:
7134                 out->hb0 = 0x87; /* type */
7135                 out->hb1 = 0x01; /* version */
7136                 out->hb2 = 0x1A; /* length */
7137                 out->sb[0] = buf[3]; /* checksum */
7138                 i = 1;
7139                 break;
7140
7141         case DRM_MODE_CONNECTOR_DisplayPort:
7142         case DRM_MODE_CONNECTOR_eDP:
7143                 out->hb0 = 0x00; /* sdp id, zero */
7144                 out->hb1 = 0x87; /* type */
7145                 out->hb2 = 0x1D; /* payload len - 1 */
7146                 out->hb3 = (0x13 << 2); /* sdp version */
7147                 out->sb[0] = 0x01; /* version */
7148                 out->sb[1] = 0x1A; /* length */
7149                 i = 2;
7150                 break;
7151
7152         default:
7153                 return -EINVAL;
7154         }
7155
7156         memcpy(&out->sb[i], &buf[4], 26);
7157         out->valid = true;
7158
7159         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7160                        sizeof(out->sb), false);
7161
7162         return 0;
7163 }
7164
7165 static int
7166 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7167                                  struct drm_atomic_state *state)
7168 {
7169         struct drm_connector_state *new_con_state =
7170                 drm_atomic_get_new_connector_state(state, conn);
7171         struct drm_connector_state *old_con_state =
7172                 drm_atomic_get_old_connector_state(state, conn);
7173         struct drm_crtc *crtc = new_con_state->crtc;
7174         struct drm_crtc_state *new_crtc_state;
7175         int ret;
7176
7177         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7178
7179         if (!crtc)
7180                 return 0;
7181
7182         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7183                 struct dc_info_packet hdr_infopacket;
7184
7185                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7186                 if (ret)
7187                         return ret;
7188
7189                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7190                 if (IS_ERR(new_crtc_state))
7191                         return PTR_ERR(new_crtc_state);
7192
7193                 /*
7194                  * DC considers the stream backends changed if the
7195                  * static metadata changes. Forcing the modeset also
7196                  * gives a simple way for userspace to switch from
7197                  * 8bpc to 10bpc when setting the metadata to enter
7198                  * or exit HDR.
7199                  *
7200                  * Changing the static metadata after it's been
7201                  * set is permissible, however. So only force a
7202                  * modeset if we're entering or exiting HDR.
7203                  */
7204                 new_crtc_state->mode_changed =
7205                         !old_con_state->hdr_output_metadata ||
7206                         !new_con_state->hdr_output_metadata;
7207         }
7208
7209         return 0;
7210 }
7211
7212 static const struct drm_connector_helper_funcs
7213 amdgpu_dm_connector_helper_funcs = {
7214         /*
7215          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7216          * modes will be filtered by drm_mode_validate_size(), and those modes
7217          * are missing after user start lightdm. So we need to renew modes list.
7218          * in get_modes call back, not just return the modes count
7219          */
7220         .get_modes = get_modes,
7221         .mode_valid = amdgpu_dm_connector_mode_valid,
7222         .atomic_check = amdgpu_dm_connector_atomic_check,
7223 };
7224
7225 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7226 {
7227 }
7228
7229 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7230 {
7231         struct drm_atomic_state *state = new_crtc_state->state;
7232         struct drm_plane *plane;
7233         int num_active = 0;
7234
7235         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7236                 struct drm_plane_state *new_plane_state;
7237
7238                 /* Cursor planes are "fake". */
7239                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7240                         continue;
7241
7242                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7243
7244                 if (!new_plane_state) {
7245                         /*
7246                          * The plane is enable on the CRTC and hasn't changed
7247                          * state. This means that it previously passed
7248                          * validation and is therefore enabled.
7249                          */
7250                         num_active += 1;
7251                         continue;
7252                 }
7253
7254                 /* We need a framebuffer to be considered enabled. */
7255                 num_active += (new_plane_state->fb != NULL);
7256         }
7257
7258         return num_active;
7259 }
7260
7261 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7262                                          struct drm_crtc_state *new_crtc_state)
7263 {
7264         struct dm_crtc_state *dm_new_crtc_state =
7265                 to_dm_crtc_state(new_crtc_state);
7266
7267         dm_new_crtc_state->active_planes = 0;
7268
7269         if (!dm_new_crtc_state->stream)
7270                 return;
7271
7272         dm_new_crtc_state->active_planes =
7273                 count_crtc_active_planes(new_crtc_state);
7274 }
7275
7276 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7277                                        struct drm_atomic_state *state)
7278 {
7279         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7280                                                                           crtc);
7281         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7282         struct dc *dc = adev->dm.dc;
7283         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7284         int ret = -EINVAL;
7285
7286         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7287
7288         dm_update_crtc_active_planes(crtc, crtc_state);
7289
7290         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7291                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7292                 return ret;
7293         }
7294
7295         /*
7296          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7297          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7298          * planes are disabled, which is not supported by the hardware. And there is legacy
7299          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7300          */
7301         if (crtc_state->enable &&
7302             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7303                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7304                 return -EINVAL;
7305         }
7306
7307         /* In some use cases, like reset, no stream is attached */
7308         if (!dm_crtc_state->stream)
7309                 return 0;
7310
7311         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7312                 return 0;
7313
7314         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7315         return ret;
7316 }
7317
7318 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7319                                       const struct drm_display_mode *mode,
7320                                       struct drm_display_mode *adjusted_mode)
7321 {
7322         return true;
7323 }
7324
7325 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7326         .disable = dm_crtc_helper_disable,
7327         .atomic_check = dm_crtc_helper_atomic_check,
7328         .mode_fixup = dm_crtc_helper_mode_fixup,
7329         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7330 };
7331
7332 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7333 {
7334
7335 }
7336
7337 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7338 {
7339         switch (display_color_depth) {
7340                 case COLOR_DEPTH_666:
7341                         return 6;
7342                 case COLOR_DEPTH_888:
7343                         return 8;
7344                 case COLOR_DEPTH_101010:
7345                         return 10;
7346                 case COLOR_DEPTH_121212:
7347                         return 12;
7348                 case COLOR_DEPTH_141414:
7349                         return 14;
7350                 case COLOR_DEPTH_161616:
7351                         return 16;
7352                 default:
7353                         break;
7354                 }
7355         return 0;
7356 }
7357
7358 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7359                                           struct drm_crtc_state *crtc_state,
7360                                           struct drm_connector_state *conn_state)
7361 {
7362         struct drm_atomic_state *state = crtc_state->state;
7363         struct drm_connector *connector = conn_state->connector;
7364         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7365         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7366         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7367         struct drm_dp_mst_topology_mgr *mst_mgr;
7368         struct drm_dp_mst_port *mst_port;
7369         enum dc_color_depth color_depth;
7370         int clock, bpp = 0;
7371         bool is_y420 = false;
7372
7373         if (!aconnector->port || !aconnector->dc_sink)
7374                 return 0;
7375
7376         mst_port = aconnector->port;
7377         mst_mgr = &aconnector->mst_port->mst_mgr;
7378
7379         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7380                 return 0;
7381
7382         if (!state->duplicated) {
7383                 int max_bpc = conn_state->max_requested_bpc;
7384                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7385                                 aconnector->force_yuv420_output;
7386                 color_depth = convert_color_depth_from_display_info(connector,
7387                                                                     is_y420,
7388                                                                     max_bpc);
7389                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7390                 clock = adjusted_mode->clock;
7391                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7392         }
7393         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7394                                                                            mst_mgr,
7395                                                                            mst_port,
7396                                                                            dm_new_connector_state->pbn,
7397                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7398         if (dm_new_connector_state->vcpi_slots < 0) {
7399                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7400                 return dm_new_connector_state->vcpi_slots;
7401         }
7402         return 0;
7403 }
7404
7405 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7406         .disable = dm_encoder_helper_disable,
7407         .atomic_check = dm_encoder_helper_atomic_check
7408 };
7409
7410 #if defined(CONFIG_DRM_AMD_DC_DCN)
7411 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7412                                             struct dc_state *dc_state,
7413                                             struct dsc_mst_fairness_vars *vars)
7414 {
7415         struct dc_stream_state *stream = NULL;
7416         struct drm_connector *connector;
7417         struct drm_connector_state *new_con_state;
7418         struct amdgpu_dm_connector *aconnector;
7419         struct dm_connector_state *dm_conn_state;
7420         int i, j;
7421         int vcpi, pbn_div, pbn, slot_num = 0;
7422
7423         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7424
7425                 aconnector = to_amdgpu_dm_connector(connector);
7426
7427                 if (!aconnector->port)
7428                         continue;
7429
7430                 if (!new_con_state || !new_con_state->crtc)
7431                         continue;
7432
7433                 dm_conn_state = to_dm_connector_state(new_con_state);
7434
7435                 for (j = 0; j < dc_state->stream_count; j++) {
7436                         stream = dc_state->streams[j];
7437                         if (!stream)
7438                                 continue;
7439
7440                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7441                                 break;
7442
7443                         stream = NULL;
7444                 }
7445
7446                 if (!stream)
7447                         continue;
7448
7449                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7450                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7451                 for (j = 0; j < dc_state->stream_count; j++) {
7452                         if (vars[j].aconnector == aconnector) {
7453                                 pbn = vars[j].pbn;
7454                                 break;
7455                         }
7456                 }
7457
7458                 if (j == dc_state->stream_count)
7459                         continue;
7460
7461                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7462
7463                 if (stream->timing.flags.DSC != 1) {
7464                         dm_conn_state->pbn = pbn;
7465                         dm_conn_state->vcpi_slots = slot_num;
7466
7467                         drm_dp_mst_atomic_enable_dsc(state,
7468                                                      aconnector->port,
7469                                                      dm_conn_state->pbn,
7470                                                      0,
7471                                                      false);
7472                         continue;
7473                 }
7474
7475                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7476                                                     aconnector->port,
7477                                                     pbn, pbn_div,
7478                                                     true);
7479                 if (vcpi < 0)
7480                         return vcpi;
7481
7482                 dm_conn_state->pbn = pbn;
7483                 dm_conn_state->vcpi_slots = vcpi;
7484         }
7485         return 0;
7486 }
7487 #endif
7488
7489 static void dm_drm_plane_reset(struct drm_plane *plane)
7490 {
7491         struct dm_plane_state *amdgpu_state = NULL;
7492
7493         if (plane->state)
7494                 plane->funcs->atomic_destroy_state(plane, plane->state);
7495
7496         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7497         WARN_ON(amdgpu_state == NULL);
7498
7499         if (amdgpu_state)
7500                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7501 }
7502
7503 static struct drm_plane_state *
7504 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7505 {
7506         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7507
7508         old_dm_plane_state = to_dm_plane_state(plane->state);
7509         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7510         if (!dm_plane_state)
7511                 return NULL;
7512
7513         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7514
7515         if (old_dm_plane_state->dc_state) {
7516                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7517                 dc_plane_state_retain(dm_plane_state->dc_state);
7518         }
7519
7520         return &dm_plane_state->base;
7521 }
7522
7523 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7524                                 struct drm_plane_state *state)
7525 {
7526         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7527
7528         if (dm_plane_state->dc_state)
7529                 dc_plane_state_release(dm_plane_state->dc_state);
7530
7531         drm_atomic_helper_plane_destroy_state(plane, state);
7532 }
7533
7534 static const struct drm_plane_funcs dm_plane_funcs = {
7535         .update_plane   = drm_atomic_helper_update_plane,
7536         .disable_plane  = drm_atomic_helper_disable_plane,
7537         .destroy        = drm_primary_helper_destroy,
7538         .reset = dm_drm_plane_reset,
7539         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7540         .atomic_destroy_state = dm_drm_plane_destroy_state,
7541         .format_mod_supported = dm_plane_format_mod_supported,
7542 };
7543
7544 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7545                                       struct drm_plane_state *new_state)
7546 {
7547         struct amdgpu_framebuffer *afb;
7548         struct drm_gem_object *obj;
7549         struct amdgpu_device *adev;
7550         struct amdgpu_bo *rbo;
7551         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7552         uint32_t domain;
7553         int r;
7554
7555         if (!new_state->fb) {
7556                 DRM_DEBUG_KMS("No FB bound\n");
7557                 return 0;
7558         }
7559
7560         afb = to_amdgpu_framebuffer(new_state->fb);
7561         obj = new_state->fb->obj[0];
7562         rbo = gem_to_amdgpu_bo(obj);
7563         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7564
7565         r = amdgpu_bo_reserve(rbo, true);
7566         if (r) {
7567                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7568                 return r;
7569         }
7570
7571         r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7572         if (r) {
7573                 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7574                 goto error_unlock;
7575         }
7576
7577         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7578                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7579         else
7580                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7581
7582         r = amdgpu_bo_pin(rbo, domain);
7583         if (unlikely(r != 0)) {
7584                 if (r != -ERESTARTSYS)
7585                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7586                 goto error_unlock;
7587         }
7588
7589         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7590         if (unlikely(r != 0)) {
7591                 DRM_ERROR("%p bind failed\n", rbo);
7592                 goto error_unpin;
7593         }
7594
7595         amdgpu_bo_unreserve(rbo);
7596
7597         afb->address = amdgpu_bo_gpu_offset(rbo);
7598
7599         amdgpu_bo_ref(rbo);
7600
7601         /**
7602          * We don't do surface updates on planes that have been newly created,
7603          * but we also don't have the afb->address during atomic check.
7604          *
7605          * Fill in buffer attributes depending on the address here, but only on
7606          * newly created planes since they're not being used by DC yet and this
7607          * won't modify global state.
7608          */
7609         dm_plane_state_old = to_dm_plane_state(plane->state);
7610         dm_plane_state_new = to_dm_plane_state(new_state);
7611
7612         if (dm_plane_state_new->dc_state &&
7613             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7614                 struct dc_plane_state *plane_state =
7615                         dm_plane_state_new->dc_state;
7616                 bool force_disable_dcc = !plane_state->dcc.enable;
7617
7618                 fill_plane_buffer_attributes(
7619                         adev, afb, plane_state->format, plane_state->rotation,
7620                         afb->tiling_flags,
7621                         &plane_state->tiling_info, &plane_state->plane_size,
7622                         &plane_state->dcc, &plane_state->address,
7623                         afb->tmz_surface, force_disable_dcc);
7624         }
7625
7626         return 0;
7627
7628 error_unpin:
7629         amdgpu_bo_unpin(rbo);
7630
7631 error_unlock:
7632         amdgpu_bo_unreserve(rbo);
7633         return r;
7634 }
7635
7636 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7637                                        struct drm_plane_state *old_state)
7638 {
7639         struct amdgpu_bo *rbo;
7640         int r;
7641
7642         if (!old_state->fb)
7643                 return;
7644
7645         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7646         r = amdgpu_bo_reserve(rbo, false);
7647         if (unlikely(r)) {
7648                 DRM_ERROR("failed to reserve rbo before unpin\n");
7649                 return;
7650         }
7651
7652         amdgpu_bo_unpin(rbo);
7653         amdgpu_bo_unreserve(rbo);
7654         amdgpu_bo_unref(&rbo);
7655 }
7656
7657 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7658                                        struct drm_crtc_state *new_crtc_state)
7659 {
7660         struct drm_framebuffer *fb = state->fb;
7661         int min_downscale, max_upscale;
7662         int min_scale = 0;
7663         int max_scale = INT_MAX;
7664
7665         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7666         if (fb && state->crtc) {
7667                 /* Validate viewport to cover the case when only the position changes */
7668                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7669                         int viewport_width = state->crtc_w;
7670                         int viewport_height = state->crtc_h;
7671
7672                         if (state->crtc_x < 0)
7673                                 viewport_width += state->crtc_x;
7674                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7675                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7676
7677                         if (state->crtc_y < 0)
7678                                 viewport_height += state->crtc_y;
7679                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7680                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7681
7682                         if (viewport_width < 0 || viewport_height < 0) {
7683                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7684                                 return -EINVAL;
7685                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7686                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7687                                 return -EINVAL;
7688                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7689                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7690                                 return -EINVAL;
7691                         }
7692
7693                 }
7694
7695                 /* Get min/max allowed scaling factors from plane caps. */
7696                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7697                                              &min_downscale, &max_upscale);
7698                 /*
7699                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7700                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7701                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7702                  */
7703                 min_scale = (1000 << 16) / max_upscale;
7704                 max_scale = (1000 << 16) / min_downscale;
7705         }
7706
7707         return drm_atomic_helper_check_plane_state(
7708                 state, new_crtc_state, min_scale, max_scale, true, true);
7709 }
7710
7711 static int dm_plane_atomic_check(struct drm_plane *plane,
7712                                  struct drm_atomic_state *state)
7713 {
7714         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7715                                                                                  plane);
7716         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7717         struct dc *dc = adev->dm.dc;
7718         struct dm_plane_state *dm_plane_state;
7719         struct dc_scaling_info scaling_info;
7720         struct drm_crtc_state *new_crtc_state;
7721         int ret;
7722
7723         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7724
7725         dm_plane_state = to_dm_plane_state(new_plane_state);
7726
7727         if (!dm_plane_state->dc_state)
7728                 return 0;
7729
7730         new_crtc_state =
7731                 drm_atomic_get_new_crtc_state(state,
7732                                               new_plane_state->crtc);
7733         if (!new_crtc_state)
7734                 return -EINVAL;
7735
7736         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7737         if (ret)
7738                 return ret;
7739
7740         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7741         if (ret)
7742                 return ret;
7743
7744         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7745                 return 0;
7746
7747         return -EINVAL;
7748 }
7749
7750 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7751                                        struct drm_atomic_state *state)
7752 {
7753         /* Only support async updates on cursor planes. */
7754         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7755                 return -EINVAL;
7756
7757         return 0;
7758 }
7759
7760 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7761                                          struct drm_atomic_state *state)
7762 {
7763         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7764                                                                            plane);
7765         struct drm_plane_state *old_state =
7766                 drm_atomic_get_old_plane_state(state, plane);
7767
7768         trace_amdgpu_dm_atomic_update_cursor(new_state);
7769
7770         swap(plane->state->fb, new_state->fb);
7771
7772         plane->state->src_x = new_state->src_x;
7773         plane->state->src_y = new_state->src_y;
7774         plane->state->src_w = new_state->src_w;
7775         plane->state->src_h = new_state->src_h;
7776         plane->state->crtc_x = new_state->crtc_x;
7777         plane->state->crtc_y = new_state->crtc_y;
7778         plane->state->crtc_w = new_state->crtc_w;
7779         plane->state->crtc_h = new_state->crtc_h;
7780
7781         handle_cursor_update(plane, old_state);
7782 }
7783
7784 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7785         .prepare_fb = dm_plane_helper_prepare_fb,
7786         .cleanup_fb = dm_plane_helper_cleanup_fb,
7787         .atomic_check = dm_plane_atomic_check,
7788         .atomic_async_check = dm_plane_atomic_async_check,
7789         .atomic_async_update = dm_plane_atomic_async_update
7790 };
7791
7792 /*
7793  * TODO: these are currently initialized to rgb formats only.
7794  * For future use cases we should either initialize them dynamically based on
7795  * plane capabilities, or initialize this array to all formats, so internal drm
7796  * check will succeed, and let DC implement proper check
7797  */
7798 static const uint32_t rgb_formats[] = {
7799         DRM_FORMAT_XRGB8888,
7800         DRM_FORMAT_ARGB8888,
7801         DRM_FORMAT_RGBA8888,
7802         DRM_FORMAT_XRGB2101010,
7803         DRM_FORMAT_XBGR2101010,
7804         DRM_FORMAT_ARGB2101010,
7805         DRM_FORMAT_ABGR2101010,
7806         DRM_FORMAT_XRGB16161616,
7807         DRM_FORMAT_XBGR16161616,
7808         DRM_FORMAT_ARGB16161616,
7809         DRM_FORMAT_ABGR16161616,
7810         DRM_FORMAT_XBGR8888,
7811         DRM_FORMAT_ABGR8888,
7812         DRM_FORMAT_RGB565,
7813 };
7814
7815 static const uint32_t overlay_formats[] = {
7816         DRM_FORMAT_XRGB8888,
7817         DRM_FORMAT_ARGB8888,
7818         DRM_FORMAT_RGBA8888,
7819         DRM_FORMAT_XBGR8888,
7820         DRM_FORMAT_ABGR8888,
7821         DRM_FORMAT_RGB565
7822 };
7823
7824 static const u32 cursor_formats[] = {
7825         DRM_FORMAT_ARGB8888
7826 };
7827
7828 static int get_plane_formats(const struct drm_plane *plane,
7829                              const struct dc_plane_cap *plane_cap,
7830                              uint32_t *formats, int max_formats)
7831 {
7832         int i, num_formats = 0;
7833
7834         /*
7835          * TODO: Query support for each group of formats directly from
7836          * DC plane caps. This will require adding more formats to the
7837          * caps list.
7838          */
7839
7840         switch (plane->type) {
7841         case DRM_PLANE_TYPE_PRIMARY:
7842                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7843                         if (num_formats >= max_formats)
7844                                 break;
7845
7846                         formats[num_formats++] = rgb_formats[i];
7847                 }
7848
7849                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7850                         formats[num_formats++] = DRM_FORMAT_NV12;
7851                 if (plane_cap && plane_cap->pixel_format_support.p010)
7852                         formats[num_formats++] = DRM_FORMAT_P010;
7853                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7854                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7855                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7856                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7857                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7858                 }
7859                 break;
7860
7861         case DRM_PLANE_TYPE_OVERLAY:
7862                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7863                         if (num_formats >= max_formats)
7864                                 break;
7865
7866                         formats[num_formats++] = overlay_formats[i];
7867                 }
7868                 break;
7869
7870         case DRM_PLANE_TYPE_CURSOR:
7871                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7872                         if (num_formats >= max_formats)
7873                                 break;
7874
7875                         formats[num_formats++] = cursor_formats[i];
7876                 }
7877                 break;
7878         }
7879
7880         return num_formats;
7881 }
7882
7883 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7884                                 struct drm_plane *plane,
7885                                 unsigned long possible_crtcs,
7886                                 const struct dc_plane_cap *plane_cap)
7887 {
7888         uint32_t formats[32];
7889         int num_formats;
7890         int res = -EPERM;
7891         unsigned int supported_rotations;
7892         uint64_t *modifiers = NULL;
7893
7894         num_formats = get_plane_formats(plane, plane_cap, formats,
7895                                         ARRAY_SIZE(formats));
7896
7897         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7898         if (res)
7899                 return res;
7900
7901         if (modifiers == NULL)
7902                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7903
7904         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7905                                        &dm_plane_funcs, formats, num_formats,
7906                                        modifiers, plane->type, NULL);
7907         kfree(modifiers);
7908         if (res)
7909                 return res;
7910
7911         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7912             plane_cap && plane_cap->per_pixel_alpha) {
7913                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7914                                           BIT(DRM_MODE_BLEND_PREMULTI);
7915
7916                 drm_plane_create_alpha_property(plane);
7917                 drm_plane_create_blend_mode_property(plane, blend_caps);
7918         }
7919
7920         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7921             plane_cap &&
7922             (plane_cap->pixel_format_support.nv12 ||
7923              plane_cap->pixel_format_support.p010)) {
7924                 /* This only affects YUV formats. */
7925                 drm_plane_create_color_properties(
7926                         plane,
7927                         BIT(DRM_COLOR_YCBCR_BT601) |
7928                         BIT(DRM_COLOR_YCBCR_BT709) |
7929                         BIT(DRM_COLOR_YCBCR_BT2020),
7930                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7931                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7932                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7933         }
7934
7935         supported_rotations =
7936                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7937                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7938
7939         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7940             plane->type != DRM_PLANE_TYPE_CURSOR)
7941                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7942                                                    supported_rotations);
7943
7944         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7945
7946         /* Create (reset) the plane state */
7947         if (plane->funcs->reset)
7948                 plane->funcs->reset(plane);
7949
7950         return 0;
7951 }
7952
7953 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7954                                struct drm_plane *plane,
7955                                uint32_t crtc_index)
7956 {
7957         struct amdgpu_crtc *acrtc = NULL;
7958         struct drm_plane *cursor_plane;
7959
7960         int res = -ENOMEM;
7961
7962         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7963         if (!cursor_plane)
7964                 goto fail;
7965
7966         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7967         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7968
7969         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7970         if (!acrtc)
7971                 goto fail;
7972
7973         res = drm_crtc_init_with_planes(
7974                         dm->ddev,
7975                         &acrtc->base,
7976                         plane,
7977                         cursor_plane,
7978                         &amdgpu_dm_crtc_funcs, NULL);
7979
7980         if (res)
7981                 goto fail;
7982
7983         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7984
7985         /* Create (reset) the plane state */
7986         if (acrtc->base.funcs->reset)
7987                 acrtc->base.funcs->reset(&acrtc->base);
7988
7989         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7990         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7991
7992         acrtc->crtc_id = crtc_index;
7993         acrtc->base.enabled = false;
7994         acrtc->otg_inst = -1;
7995
7996         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7997         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7998                                    true, MAX_COLOR_LUT_ENTRIES);
7999         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8000
8001         return 0;
8002
8003 fail:
8004         kfree(acrtc);
8005         kfree(cursor_plane);
8006         return res;
8007 }
8008
8009
8010 static int to_drm_connector_type(enum signal_type st)
8011 {
8012         switch (st) {
8013         case SIGNAL_TYPE_HDMI_TYPE_A:
8014                 return DRM_MODE_CONNECTOR_HDMIA;
8015         case SIGNAL_TYPE_EDP:
8016                 return DRM_MODE_CONNECTOR_eDP;
8017         case SIGNAL_TYPE_LVDS:
8018                 return DRM_MODE_CONNECTOR_LVDS;
8019         case SIGNAL_TYPE_RGB:
8020                 return DRM_MODE_CONNECTOR_VGA;
8021         case SIGNAL_TYPE_DISPLAY_PORT:
8022         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8023                 return DRM_MODE_CONNECTOR_DisplayPort;
8024         case SIGNAL_TYPE_DVI_DUAL_LINK:
8025         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8026                 return DRM_MODE_CONNECTOR_DVID;
8027         case SIGNAL_TYPE_VIRTUAL:
8028                 return DRM_MODE_CONNECTOR_VIRTUAL;
8029
8030         default:
8031                 return DRM_MODE_CONNECTOR_Unknown;
8032         }
8033 }
8034
8035 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8036 {
8037         struct drm_encoder *encoder;
8038
8039         /* There is only one encoder per connector */
8040         drm_connector_for_each_possible_encoder(connector, encoder)
8041                 return encoder;
8042
8043         return NULL;
8044 }
8045
8046 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8047 {
8048         struct drm_encoder *encoder;
8049         struct amdgpu_encoder *amdgpu_encoder;
8050
8051         encoder = amdgpu_dm_connector_to_encoder(connector);
8052
8053         if (encoder == NULL)
8054                 return;
8055
8056         amdgpu_encoder = to_amdgpu_encoder(encoder);
8057
8058         amdgpu_encoder->native_mode.clock = 0;
8059
8060         if (!list_empty(&connector->probed_modes)) {
8061                 struct drm_display_mode *preferred_mode = NULL;
8062
8063                 list_for_each_entry(preferred_mode,
8064                                     &connector->probed_modes,
8065                                     head) {
8066                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8067                                 amdgpu_encoder->native_mode = *preferred_mode;
8068
8069                         break;
8070                 }
8071
8072         }
8073 }
8074
8075 static struct drm_display_mode *
8076 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8077                              char *name,
8078                              int hdisplay, int vdisplay)
8079 {
8080         struct drm_device *dev = encoder->dev;
8081         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8082         struct drm_display_mode *mode = NULL;
8083         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8084
8085         mode = drm_mode_duplicate(dev, native_mode);
8086
8087         if (mode == NULL)
8088                 return NULL;
8089
8090         mode->hdisplay = hdisplay;
8091         mode->vdisplay = vdisplay;
8092         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8093         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8094
8095         return mode;
8096
8097 }
8098
8099 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8100                                                  struct drm_connector *connector)
8101 {
8102         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8103         struct drm_display_mode *mode = NULL;
8104         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8105         struct amdgpu_dm_connector *amdgpu_dm_connector =
8106                                 to_amdgpu_dm_connector(connector);
8107         int i;
8108         int n;
8109         struct mode_size {
8110                 char name[DRM_DISPLAY_MODE_LEN];
8111                 int w;
8112                 int h;
8113         } common_modes[] = {
8114                 {  "640x480",  640,  480},
8115                 {  "800x600",  800,  600},
8116                 { "1024x768", 1024,  768},
8117                 { "1280x720", 1280,  720},
8118                 { "1280x800", 1280,  800},
8119                 {"1280x1024", 1280, 1024},
8120                 { "1440x900", 1440,  900},
8121                 {"1680x1050", 1680, 1050},
8122                 {"1600x1200", 1600, 1200},
8123                 {"1920x1080", 1920, 1080},
8124                 {"1920x1200", 1920, 1200}
8125         };
8126
8127         n = ARRAY_SIZE(common_modes);
8128
8129         for (i = 0; i < n; i++) {
8130                 struct drm_display_mode *curmode = NULL;
8131                 bool mode_existed = false;
8132
8133                 if (common_modes[i].w > native_mode->hdisplay ||
8134                     common_modes[i].h > native_mode->vdisplay ||
8135                    (common_modes[i].w == native_mode->hdisplay &&
8136                     common_modes[i].h == native_mode->vdisplay))
8137                         continue;
8138
8139                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8140                         if (common_modes[i].w == curmode->hdisplay &&
8141                             common_modes[i].h == curmode->vdisplay) {
8142                                 mode_existed = true;
8143                                 break;
8144                         }
8145                 }
8146
8147                 if (mode_existed)
8148                         continue;
8149
8150                 mode = amdgpu_dm_create_common_mode(encoder,
8151                                 common_modes[i].name, common_modes[i].w,
8152                                 common_modes[i].h);
8153                 if (!mode)
8154                         continue;
8155
8156                 drm_mode_probed_add(connector, mode);
8157                 amdgpu_dm_connector->num_modes++;
8158         }
8159 }
8160
8161 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8162 {
8163         struct drm_encoder *encoder;
8164         struct amdgpu_encoder *amdgpu_encoder;
8165         const struct drm_display_mode *native_mode;
8166
8167         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8168             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8169                 return;
8170
8171         encoder = amdgpu_dm_connector_to_encoder(connector);
8172         if (!encoder)
8173                 return;
8174
8175         amdgpu_encoder = to_amdgpu_encoder(encoder);
8176
8177         native_mode = &amdgpu_encoder->native_mode;
8178         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8179                 return;
8180
8181         drm_connector_set_panel_orientation_with_quirk(connector,
8182                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8183                                                        native_mode->hdisplay,
8184                                                        native_mode->vdisplay);
8185 }
8186
8187 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8188                                               struct edid *edid)
8189 {
8190         struct amdgpu_dm_connector *amdgpu_dm_connector =
8191                         to_amdgpu_dm_connector(connector);
8192
8193         if (edid) {
8194                 /* empty probed_modes */
8195                 INIT_LIST_HEAD(&connector->probed_modes);
8196                 amdgpu_dm_connector->num_modes =
8197                                 drm_add_edid_modes(connector, edid);
8198
8199                 /* sorting the probed modes before calling function
8200                  * amdgpu_dm_get_native_mode() since EDID can have
8201                  * more than one preferred mode. The modes that are
8202                  * later in the probed mode list could be of higher
8203                  * and preferred resolution. For example, 3840x2160
8204                  * resolution in base EDID preferred timing and 4096x2160
8205                  * preferred resolution in DID extension block later.
8206                  */
8207                 drm_mode_sort(&connector->probed_modes);
8208                 amdgpu_dm_get_native_mode(connector);
8209
8210                 /* Freesync capabilities are reset by calling
8211                  * drm_add_edid_modes() and need to be
8212                  * restored here.
8213                  */
8214                 amdgpu_dm_update_freesync_caps(connector, edid);
8215
8216                 amdgpu_set_panel_orientation(connector);
8217         } else {
8218                 amdgpu_dm_connector->num_modes = 0;
8219         }
8220 }
8221
8222 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8223                               struct drm_display_mode *mode)
8224 {
8225         struct drm_display_mode *m;
8226
8227         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8228                 if (drm_mode_equal(m, mode))
8229                         return true;
8230         }
8231
8232         return false;
8233 }
8234
8235 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8236 {
8237         const struct drm_display_mode *m;
8238         struct drm_display_mode *new_mode;
8239         uint i;
8240         uint32_t new_modes_count = 0;
8241
8242         /* Standard FPS values
8243          *
8244          * 23.976       - TV/NTSC
8245          * 24           - Cinema
8246          * 25           - TV/PAL
8247          * 29.97        - TV/NTSC
8248          * 30           - TV/NTSC
8249          * 48           - Cinema HFR
8250          * 50           - TV/PAL
8251          * 60           - Commonly used
8252          * 48,72,96,120 - Multiples of 24
8253          */
8254         static const uint32_t common_rates[] = {
8255                 23976, 24000, 25000, 29970, 30000,
8256                 48000, 50000, 60000, 72000, 96000, 120000
8257         };
8258
8259         /*
8260          * Find mode with highest refresh rate with the same resolution
8261          * as the preferred mode. Some monitors report a preferred mode
8262          * with lower resolution than the highest refresh rate supported.
8263          */
8264
8265         m = get_highest_refresh_rate_mode(aconnector, true);
8266         if (!m)
8267                 return 0;
8268
8269         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8270                 uint64_t target_vtotal, target_vtotal_diff;
8271                 uint64_t num, den;
8272
8273                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8274                         continue;
8275
8276                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8277                     common_rates[i] > aconnector->max_vfreq * 1000)
8278                         continue;
8279
8280                 num = (unsigned long long)m->clock * 1000 * 1000;
8281                 den = common_rates[i] * (unsigned long long)m->htotal;
8282                 target_vtotal = div_u64(num, den);
8283                 target_vtotal_diff = target_vtotal - m->vtotal;
8284
8285                 /* Check for illegal modes */
8286                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8287                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8288                     m->vtotal + target_vtotal_diff < m->vsync_end)
8289                         continue;
8290
8291                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8292                 if (!new_mode)
8293                         goto out;
8294
8295                 new_mode->vtotal += (u16)target_vtotal_diff;
8296                 new_mode->vsync_start += (u16)target_vtotal_diff;
8297                 new_mode->vsync_end += (u16)target_vtotal_diff;
8298                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8299                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8300
8301                 if (!is_duplicate_mode(aconnector, new_mode)) {
8302                         drm_mode_probed_add(&aconnector->base, new_mode);
8303                         new_modes_count += 1;
8304                 } else
8305                         drm_mode_destroy(aconnector->base.dev, new_mode);
8306         }
8307  out:
8308         return new_modes_count;
8309 }
8310
8311 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8312                                                    struct edid *edid)
8313 {
8314         struct amdgpu_dm_connector *amdgpu_dm_connector =
8315                 to_amdgpu_dm_connector(connector);
8316
8317         if (!edid)
8318                 return;
8319
8320         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8321                 amdgpu_dm_connector->num_modes +=
8322                         add_fs_modes(amdgpu_dm_connector);
8323 }
8324
8325 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8326 {
8327         struct amdgpu_dm_connector *amdgpu_dm_connector =
8328                         to_amdgpu_dm_connector(connector);
8329         struct drm_encoder *encoder;
8330         struct edid *edid = amdgpu_dm_connector->edid;
8331
8332         encoder = amdgpu_dm_connector_to_encoder(connector);
8333
8334         if (!drm_edid_is_valid(edid)) {
8335                 amdgpu_dm_connector->num_modes =
8336                                 drm_add_modes_noedid(connector, 640, 480);
8337         } else {
8338                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8339                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8340                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8341         }
8342         amdgpu_dm_fbc_init(connector);
8343
8344         return amdgpu_dm_connector->num_modes;
8345 }
8346
8347 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8348                                      struct amdgpu_dm_connector *aconnector,
8349                                      int connector_type,
8350                                      struct dc_link *link,
8351                                      int link_index)
8352 {
8353         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8354
8355         /*
8356          * Some of the properties below require access to state, like bpc.
8357          * Allocate some default initial connector state with our reset helper.
8358          */
8359         if (aconnector->base.funcs->reset)
8360                 aconnector->base.funcs->reset(&aconnector->base);
8361
8362         aconnector->connector_id = link_index;
8363         aconnector->dc_link = link;
8364         aconnector->base.interlace_allowed = false;
8365         aconnector->base.doublescan_allowed = false;
8366         aconnector->base.stereo_allowed = false;
8367         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8368         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8369         aconnector->audio_inst = -1;
8370         mutex_init(&aconnector->hpd_lock);
8371
8372         /*
8373          * configure support HPD hot plug connector_>polled default value is 0
8374          * which means HPD hot plug not supported
8375          */
8376         switch (connector_type) {
8377         case DRM_MODE_CONNECTOR_HDMIA:
8378                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8379                 aconnector->base.ycbcr_420_allowed =
8380                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8381                 break;
8382         case DRM_MODE_CONNECTOR_DisplayPort:
8383                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8384                 link->link_enc = link_enc_cfg_get_link_enc(link);
8385                 ASSERT(link->link_enc);
8386                 if (link->link_enc)
8387                         aconnector->base.ycbcr_420_allowed =
8388                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8389                 break;
8390         case DRM_MODE_CONNECTOR_DVID:
8391                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8392                 break;
8393         default:
8394                 break;
8395         }
8396
8397         drm_object_attach_property(&aconnector->base.base,
8398                                 dm->ddev->mode_config.scaling_mode_property,
8399                                 DRM_MODE_SCALE_NONE);
8400
8401         drm_object_attach_property(&aconnector->base.base,
8402                                 adev->mode_info.underscan_property,
8403                                 UNDERSCAN_OFF);
8404         drm_object_attach_property(&aconnector->base.base,
8405                                 adev->mode_info.underscan_hborder_property,
8406                                 0);
8407         drm_object_attach_property(&aconnector->base.base,
8408                                 adev->mode_info.underscan_vborder_property,
8409                                 0);
8410
8411         if (!aconnector->mst_port)
8412                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8413
8414         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8415         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8416         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8417
8418         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8419             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8420                 drm_object_attach_property(&aconnector->base.base,
8421                                 adev->mode_info.abm_level_property, 0);
8422         }
8423
8424         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8425             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8426             connector_type == DRM_MODE_CONNECTOR_eDP) {
8427                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8428
8429                 if (!aconnector->mst_port)
8430                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8431
8432 #ifdef CONFIG_DRM_AMD_DC_HDCP
8433                 if (adev->dm.hdcp_workqueue)
8434                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8435 #endif
8436         }
8437 }
8438
8439 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8440                               struct i2c_msg *msgs, int num)
8441 {
8442         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8443         struct ddc_service *ddc_service = i2c->ddc_service;
8444         struct i2c_command cmd;
8445         int i;
8446         int result = -EIO;
8447
8448         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8449
8450         if (!cmd.payloads)
8451                 return result;
8452
8453         cmd.number_of_payloads = num;
8454         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8455         cmd.speed = 100;
8456
8457         for (i = 0; i < num; i++) {
8458                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8459                 cmd.payloads[i].address = msgs[i].addr;
8460                 cmd.payloads[i].length = msgs[i].len;
8461                 cmd.payloads[i].data = msgs[i].buf;
8462         }
8463
8464         if (dc_submit_i2c(
8465                         ddc_service->ctx->dc,
8466                         ddc_service->ddc_pin->hw_info.ddc_channel,
8467                         &cmd))
8468                 result = num;
8469
8470         kfree(cmd.payloads);
8471         return result;
8472 }
8473
8474 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8475 {
8476         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8477 }
8478
8479 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8480         .master_xfer = amdgpu_dm_i2c_xfer,
8481         .functionality = amdgpu_dm_i2c_func,
8482 };
8483
8484 static struct amdgpu_i2c_adapter *
8485 create_i2c(struct ddc_service *ddc_service,
8486            int link_index,
8487            int *res)
8488 {
8489         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8490         struct amdgpu_i2c_adapter *i2c;
8491
8492         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8493         if (!i2c)
8494                 return NULL;
8495         i2c->base.owner = THIS_MODULE;
8496         i2c->base.class = I2C_CLASS_DDC;
8497         i2c->base.dev.parent = &adev->pdev->dev;
8498         i2c->base.algo = &amdgpu_dm_i2c_algo;
8499         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8500         i2c_set_adapdata(&i2c->base, i2c);
8501         i2c->ddc_service = ddc_service;
8502         if (i2c->ddc_service->ddc_pin)
8503                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8504
8505         return i2c;
8506 }
8507
8508
8509 /*
8510  * Note: this function assumes that dc_link_detect() was called for the
8511  * dc_link which will be represented by this aconnector.
8512  */
8513 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8514                                     struct amdgpu_dm_connector *aconnector,
8515                                     uint32_t link_index,
8516                                     struct amdgpu_encoder *aencoder)
8517 {
8518         int res = 0;
8519         int connector_type;
8520         struct dc *dc = dm->dc;
8521         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8522         struct amdgpu_i2c_adapter *i2c;
8523
8524         link->priv = aconnector;
8525
8526         DRM_DEBUG_DRIVER("%s()\n", __func__);
8527
8528         i2c = create_i2c(link->ddc, link->link_index, &res);
8529         if (!i2c) {
8530                 DRM_ERROR("Failed to create i2c adapter data\n");
8531                 return -ENOMEM;
8532         }
8533
8534         aconnector->i2c = i2c;
8535         res = i2c_add_adapter(&i2c->base);
8536
8537         if (res) {
8538                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8539                 goto out_free;
8540         }
8541
8542         connector_type = to_drm_connector_type(link->connector_signal);
8543
8544         res = drm_connector_init_with_ddc(
8545                         dm->ddev,
8546                         &aconnector->base,
8547                         &amdgpu_dm_connector_funcs,
8548                         connector_type,
8549                         &i2c->base);
8550
8551         if (res) {
8552                 DRM_ERROR("connector_init failed\n");
8553                 aconnector->connector_id = -1;
8554                 goto out_free;
8555         }
8556
8557         drm_connector_helper_add(
8558                         &aconnector->base,
8559                         &amdgpu_dm_connector_helper_funcs);
8560
8561         amdgpu_dm_connector_init_helper(
8562                 dm,
8563                 aconnector,
8564                 connector_type,
8565                 link,
8566                 link_index);
8567
8568         drm_connector_attach_encoder(
8569                 &aconnector->base, &aencoder->base);
8570
8571         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8572                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8573                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8574
8575 out_free:
8576         if (res) {
8577                 kfree(i2c);
8578                 aconnector->i2c = NULL;
8579         }
8580         return res;
8581 }
8582
8583 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8584 {
8585         switch (adev->mode_info.num_crtc) {
8586         case 1:
8587                 return 0x1;
8588         case 2:
8589                 return 0x3;
8590         case 3:
8591                 return 0x7;
8592         case 4:
8593                 return 0xf;
8594         case 5:
8595                 return 0x1f;
8596         case 6:
8597         default:
8598                 return 0x3f;
8599         }
8600 }
8601
8602 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8603                                   struct amdgpu_encoder *aencoder,
8604                                   uint32_t link_index)
8605 {
8606         struct amdgpu_device *adev = drm_to_adev(dev);
8607
8608         int res = drm_encoder_init(dev,
8609                                    &aencoder->base,
8610                                    &amdgpu_dm_encoder_funcs,
8611                                    DRM_MODE_ENCODER_TMDS,
8612                                    NULL);
8613
8614         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8615
8616         if (!res)
8617                 aencoder->encoder_id = link_index;
8618         else
8619                 aencoder->encoder_id = -1;
8620
8621         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8622
8623         return res;
8624 }
8625
8626 static void manage_dm_interrupts(struct amdgpu_device *adev,
8627                                  struct amdgpu_crtc *acrtc,
8628                                  bool enable)
8629 {
8630         /*
8631          * We have no guarantee that the frontend index maps to the same
8632          * backend index - some even map to more than one.
8633          *
8634          * TODO: Use a different interrupt or check DC itself for the mapping.
8635          */
8636         int irq_type =
8637                 amdgpu_display_crtc_idx_to_irq_type(
8638                         adev,
8639                         acrtc->crtc_id);
8640
8641         if (enable) {
8642                 drm_crtc_vblank_on(&acrtc->base);
8643                 amdgpu_irq_get(
8644                         adev,
8645                         &adev->pageflip_irq,
8646                         irq_type);
8647 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8648                 amdgpu_irq_get(
8649                         adev,
8650                         &adev->vline0_irq,
8651                         irq_type);
8652 #endif
8653         } else {
8654 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8655                 amdgpu_irq_put(
8656                         adev,
8657                         &adev->vline0_irq,
8658                         irq_type);
8659 #endif
8660                 amdgpu_irq_put(
8661                         adev,
8662                         &adev->pageflip_irq,
8663                         irq_type);
8664                 drm_crtc_vblank_off(&acrtc->base);
8665         }
8666 }
8667
8668 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8669                                       struct amdgpu_crtc *acrtc)
8670 {
8671         int irq_type =
8672                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8673
8674         /**
8675          * This reads the current state for the IRQ and force reapplies
8676          * the setting to hardware.
8677          */
8678         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8679 }
8680
8681 static bool
8682 is_scaling_state_different(const struct dm_connector_state *dm_state,
8683                            const struct dm_connector_state *old_dm_state)
8684 {
8685         if (dm_state->scaling != old_dm_state->scaling)
8686                 return true;
8687         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8688                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8689                         return true;
8690         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8691                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8692                         return true;
8693         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8694                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8695                 return true;
8696         return false;
8697 }
8698
8699 #ifdef CONFIG_DRM_AMD_DC_HDCP
8700 static bool is_content_protection_different(struct drm_connector_state *state,
8701                                             const struct drm_connector_state *old_state,
8702                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8703 {
8704         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8705         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8706
8707         /* Handle: Type0/1 change */
8708         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8709             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8710                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8711                 return true;
8712         }
8713
8714         /* CP is being re enabled, ignore this
8715          *
8716          * Handles:     ENABLED -> DESIRED
8717          */
8718         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8719             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8720                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8721                 return false;
8722         }
8723
8724         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8725          *
8726          * Handles:     UNDESIRED -> ENABLED
8727          */
8728         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8729             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8730                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8731
8732         /* Stream removed and re-enabled
8733          *
8734          * Can sometimes overlap with the HPD case,
8735          * thus set update_hdcp to false to avoid
8736          * setting HDCP multiple times.
8737          *
8738          * Handles:     DESIRED -> DESIRED (Special case)
8739          */
8740         if (!(old_state->crtc && old_state->crtc->enabled) &&
8741                 state->crtc && state->crtc->enabled &&
8742                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8743                 dm_con_state->update_hdcp = false;
8744                 return true;
8745         }
8746
8747         /* Hot-plug, headless s3, dpms
8748          *
8749          * Only start HDCP if the display is connected/enabled.
8750          * update_hdcp flag will be set to false until the next
8751          * HPD comes in.
8752          *
8753          * Handles:     DESIRED -> DESIRED (Special case)
8754          */
8755         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8756             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8757                 dm_con_state->update_hdcp = false;
8758                 return true;
8759         }
8760
8761         /*
8762          * Handles:     UNDESIRED -> UNDESIRED
8763          *              DESIRED -> DESIRED
8764          *              ENABLED -> ENABLED
8765          */
8766         if (old_state->content_protection == state->content_protection)
8767                 return false;
8768
8769         /*
8770          * Handles:     UNDESIRED -> DESIRED
8771          *              DESIRED -> UNDESIRED
8772          *              ENABLED -> UNDESIRED
8773          */
8774         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8775                 return true;
8776
8777         /*
8778          * Handles:     DESIRED -> ENABLED
8779          */
8780         return false;
8781 }
8782
8783 #endif
8784 static void remove_stream(struct amdgpu_device *adev,
8785                           struct amdgpu_crtc *acrtc,
8786                           struct dc_stream_state *stream)
8787 {
8788         /* this is the update mode case */
8789
8790         acrtc->otg_inst = -1;
8791         acrtc->enabled = false;
8792 }
8793
8794 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8795                                struct dc_cursor_position *position)
8796 {
8797         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8798         int x, y;
8799         int xorigin = 0, yorigin = 0;
8800
8801         if (!crtc || !plane->state->fb)
8802                 return 0;
8803
8804         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8805             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8806                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8807                           __func__,
8808                           plane->state->crtc_w,
8809                           plane->state->crtc_h);
8810                 return -EINVAL;
8811         }
8812
8813         x = plane->state->crtc_x;
8814         y = plane->state->crtc_y;
8815
8816         if (x <= -amdgpu_crtc->max_cursor_width ||
8817             y <= -amdgpu_crtc->max_cursor_height)
8818                 return 0;
8819
8820         if (x < 0) {
8821                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8822                 x = 0;
8823         }
8824         if (y < 0) {
8825                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8826                 y = 0;
8827         }
8828         position->enable = true;
8829         position->translate_by_source = true;
8830         position->x = x;
8831         position->y = y;
8832         position->x_hotspot = xorigin;
8833         position->y_hotspot = yorigin;
8834
8835         return 0;
8836 }
8837
8838 static void handle_cursor_update(struct drm_plane *plane,
8839                                  struct drm_plane_state *old_plane_state)
8840 {
8841         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8842         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8843         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8844         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8845         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8846         uint64_t address = afb ? afb->address : 0;
8847         struct dc_cursor_position position = {0};
8848         struct dc_cursor_attributes attributes;
8849         int ret;
8850
8851         if (!plane->state->fb && !old_plane_state->fb)
8852                 return;
8853
8854         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8855                       __func__,
8856                       amdgpu_crtc->crtc_id,
8857                       plane->state->crtc_w,
8858                       plane->state->crtc_h);
8859
8860         ret = get_cursor_position(plane, crtc, &position);
8861         if (ret)
8862                 return;
8863
8864         if (!position.enable) {
8865                 /* turn off cursor */
8866                 if (crtc_state && crtc_state->stream) {
8867                         mutex_lock(&adev->dm.dc_lock);
8868                         dc_stream_set_cursor_position(crtc_state->stream,
8869                                                       &position);
8870                         mutex_unlock(&adev->dm.dc_lock);
8871                 }
8872                 return;
8873         }
8874
8875         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8876         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8877
8878         memset(&attributes, 0, sizeof(attributes));
8879         attributes.address.high_part = upper_32_bits(address);
8880         attributes.address.low_part  = lower_32_bits(address);
8881         attributes.width             = plane->state->crtc_w;
8882         attributes.height            = plane->state->crtc_h;
8883         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8884         attributes.rotation_angle    = 0;
8885         attributes.attribute_flags.value = 0;
8886
8887         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8888
8889         if (crtc_state->stream) {
8890                 mutex_lock(&adev->dm.dc_lock);
8891                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8892                                                          &attributes))
8893                         DRM_ERROR("DC failed to set cursor attributes\n");
8894
8895                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8896                                                    &position))
8897                         DRM_ERROR("DC failed to set cursor position\n");
8898                 mutex_unlock(&adev->dm.dc_lock);
8899         }
8900 }
8901
8902 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8903 {
8904
8905         assert_spin_locked(&acrtc->base.dev->event_lock);
8906         WARN_ON(acrtc->event);
8907
8908         acrtc->event = acrtc->base.state->event;
8909
8910         /* Set the flip status */
8911         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8912
8913         /* Mark this event as consumed */
8914         acrtc->base.state->event = NULL;
8915
8916         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8917                      acrtc->crtc_id);
8918 }
8919
8920 static void update_freesync_state_on_stream(
8921         struct amdgpu_display_manager *dm,
8922         struct dm_crtc_state *new_crtc_state,
8923         struct dc_stream_state *new_stream,
8924         struct dc_plane_state *surface,
8925         u32 flip_timestamp_in_us)
8926 {
8927         struct mod_vrr_params vrr_params;
8928         struct dc_info_packet vrr_infopacket = {0};
8929         struct amdgpu_device *adev = dm->adev;
8930         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8931         unsigned long flags;
8932         bool pack_sdp_v1_3 = false;
8933
8934         if (!new_stream)
8935                 return;
8936
8937         /*
8938          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8939          * For now it's sufficient to just guard against these conditions.
8940          */
8941
8942         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8943                 return;
8944
8945         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8946         vrr_params = acrtc->dm_irq_params.vrr_params;
8947
8948         if (surface) {
8949                 mod_freesync_handle_preflip(
8950                         dm->freesync_module,
8951                         surface,
8952                         new_stream,
8953                         flip_timestamp_in_us,
8954                         &vrr_params);
8955
8956                 if (adev->family < AMDGPU_FAMILY_AI &&
8957                     amdgpu_dm_vrr_active(new_crtc_state)) {
8958                         mod_freesync_handle_v_update(dm->freesync_module,
8959                                                      new_stream, &vrr_params);
8960
8961                         /* Need to call this before the frame ends. */
8962                         dc_stream_adjust_vmin_vmax(dm->dc,
8963                                                    new_crtc_state->stream,
8964                                                    &vrr_params.adjust);
8965                 }
8966         }
8967
8968         mod_freesync_build_vrr_infopacket(
8969                 dm->freesync_module,
8970                 new_stream,
8971                 &vrr_params,
8972                 PACKET_TYPE_VRR,
8973                 TRANSFER_FUNC_UNKNOWN,
8974                 &vrr_infopacket,
8975                 pack_sdp_v1_3);
8976
8977         new_crtc_state->freesync_timing_changed |=
8978                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8979                         &vrr_params.adjust,
8980                         sizeof(vrr_params.adjust)) != 0);
8981
8982         new_crtc_state->freesync_vrr_info_changed |=
8983                 (memcmp(&new_crtc_state->vrr_infopacket,
8984                         &vrr_infopacket,
8985                         sizeof(vrr_infopacket)) != 0);
8986
8987         acrtc->dm_irq_params.vrr_params = vrr_params;
8988         new_crtc_state->vrr_infopacket = vrr_infopacket;
8989
8990         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8991         new_stream->vrr_infopacket = vrr_infopacket;
8992
8993         if (new_crtc_state->freesync_vrr_info_changed)
8994                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8995                               new_crtc_state->base.crtc->base.id,
8996                               (int)new_crtc_state->base.vrr_enabled,
8997                               (int)vrr_params.state);
8998
8999         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9000 }
9001
9002 static void update_stream_irq_parameters(
9003         struct amdgpu_display_manager *dm,
9004         struct dm_crtc_state *new_crtc_state)
9005 {
9006         struct dc_stream_state *new_stream = new_crtc_state->stream;
9007         struct mod_vrr_params vrr_params;
9008         struct mod_freesync_config config = new_crtc_state->freesync_config;
9009         struct amdgpu_device *adev = dm->adev;
9010         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9011         unsigned long flags;
9012
9013         if (!new_stream)
9014                 return;
9015
9016         /*
9017          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9018          * For now it's sufficient to just guard against these conditions.
9019          */
9020         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9021                 return;
9022
9023         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9024         vrr_params = acrtc->dm_irq_params.vrr_params;
9025
9026         if (new_crtc_state->vrr_supported &&
9027             config.min_refresh_in_uhz &&
9028             config.max_refresh_in_uhz) {
9029                 /*
9030                  * if freesync compatible mode was set, config.state will be set
9031                  * in atomic check
9032                  */
9033                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9034                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9035                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9036                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9037                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9038                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9039                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9040                 } else {
9041                         config.state = new_crtc_state->base.vrr_enabled ?
9042                                                      VRR_STATE_ACTIVE_VARIABLE :
9043                                                      VRR_STATE_INACTIVE;
9044                 }
9045         } else {
9046                 config.state = VRR_STATE_UNSUPPORTED;
9047         }
9048
9049         mod_freesync_build_vrr_params(dm->freesync_module,
9050                                       new_stream,
9051                                       &config, &vrr_params);
9052
9053         new_crtc_state->freesync_timing_changed |=
9054                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9055                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9056
9057         new_crtc_state->freesync_config = config;
9058         /* Copy state for access from DM IRQ handler */
9059         acrtc->dm_irq_params.freesync_config = config;
9060         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9061         acrtc->dm_irq_params.vrr_params = vrr_params;
9062         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9063 }
9064
9065 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9066                                             struct dm_crtc_state *new_state)
9067 {
9068         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9069         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9070
9071         if (!old_vrr_active && new_vrr_active) {
9072                 /* Transition VRR inactive -> active:
9073                  * While VRR is active, we must not disable vblank irq, as a
9074                  * reenable after disable would compute bogus vblank/pflip
9075                  * timestamps if it likely happened inside display front-porch.
9076                  *
9077                  * We also need vupdate irq for the actual core vblank handling
9078                  * at end of vblank.
9079                  */
9080                 dm_set_vupdate_irq(new_state->base.crtc, true);
9081                 drm_crtc_vblank_get(new_state->base.crtc);
9082                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9083                                  __func__, new_state->base.crtc->base.id);
9084         } else if (old_vrr_active && !new_vrr_active) {
9085                 /* Transition VRR active -> inactive:
9086                  * Allow vblank irq disable again for fixed refresh rate.
9087                  */
9088                 dm_set_vupdate_irq(new_state->base.crtc, false);
9089                 drm_crtc_vblank_put(new_state->base.crtc);
9090                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9091                                  __func__, new_state->base.crtc->base.id);
9092         }
9093 }
9094
9095 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9096 {
9097         struct drm_plane *plane;
9098         struct drm_plane_state *old_plane_state;
9099         int i;
9100
9101         /*
9102          * TODO: Make this per-stream so we don't issue redundant updates for
9103          * commits with multiple streams.
9104          */
9105         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9106                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9107                         handle_cursor_update(plane, old_plane_state);
9108 }
9109
9110 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9111                                     struct dc_state *dc_state,
9112                                     struct drm_device *dev,
9113                                     struct amdgpu_display_manager *dm,
9114                                     struct drm_crtc *pcrtc,
9115                                     bool wait_for_vblank)
9116 {
9117         uint32_t i;
9118         uint64_t timestamp_ns;
9119         struct drm_plane *plane;
9120         struct drm_plane_state *old_plane_state, *new_plane_state;
9121         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9122         struct drm_crtc_state *new_pcrtc_state =
9123                         drm_atomic_get_new_crtc_state(state, pcrtc);
9124         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9125         struct dm_crtc_state *dm_old_crtc_state =
9126                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9127         int planes_count = 0, vpos, hpos;
9128         long r;
9129         unsigned long flags;
9130         struct amdgpu_bo *abo;
9131         uint32_t target_vblank, last_flip_vblank;
9132         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9133         bool pflip_present = false;
9134         struct {
9135                 struct dc_surface_update surface_updates[MAX_SURFACES];
9136                 struct dc_plane_info plane_infos[MAX_SURFACES];
9137                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9138                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9139                 struct dc_stream_update stream_update;
9140         } *bundle;
9141
9142         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9143
9144         if (!bundle) {
9145                 dm_error("Failed to allocate update bundle\n");
9146                 goto cleanup;
9147         }
9148
9149         /*
9150          * Disable the cursor first if we're disabling all the planes.
9151          * It'll remain on the screen after the planes are re-enabled
9152          * if we don't.
9153          */
9154         if (acrtc_state->active_planes == 0)
9155                 amdgpu_dm_commit_cursors(state);
9156
9157         /* update planes when needed */
9158         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9159                 struct drm_crtc *crtc = new_plane_state->crtc;
9160                 struct drm_crtc_state *new_crtc_state;
9161                 struct drm_framebuffer *fb = new_plane_state->fb;
9162                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9163                 bool plane_needs_flip;
9164                 struct dc_plane_state *dc_plane;
9165                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9166
9167                 /* Cursor plane is handled after stream updates */
9168                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9169                         continue;
9170
9171                 if (!fb || !crtc || pcrtc != crtc)
9172                         continue;
9173
9174                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9175                 if (!new_crtc_state->active)
9176                         continue;
9177
9178                 dc_plane = dm_new_plane_state->dc_state;
9179
9180                 bundle->surface_updates[planes_count].surface = dc_plane;
9181                 if (new_pcrtc_state->color_mgmt_changed) {
9182                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9183                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9184                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9185                 }
9186
9187                 fill_dc_scaling_info(dm->adev, new_plane_state,
9188                                      &bundle->scaling_infos[planes_count]);
9189
9190                 bundle->surface_updates[planes_count].scaling_info =
9191                         &bundle->scaling_infos[planes_count];
9192
9193                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9194
9195                 pflip_present = pflip_present || plane_needs_flip;
9196
9197                 if (!plane_needs_flip) {
9198                         planes_count += 1;
9199                         continue;
9200                 }
9201
9202                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9203
9204                 /*
9205                  * Wait for all fences on this FB. Do limited wait to avoid
9206                  * deadlock during GPU reset when this fence will not signal
9207                  * but we hold reservation lock for the BO.
9208                  */
9209                 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9210                                           DMA_RESV_USAGE_WRITE, false,
9211                                           msecs_to_jiffies(5000));
9212                 if (unlikely(r <= 0))
9213                         DRM_ERROR("Waiting for fences timed out!");
9214
9215                 fill_dc_plane_info_and_addr(
9216                         dm->adev, new_plane_state,
9217                         afb->tiling_flags,
9218                         &bundle->plane_infos[planes_count],
9219                         &bundle->flip_addrs[planes_count].address,
9220                         afb->tmz_surface, false);
9221
9222                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9223                                  new_plane_state->plane->index,
9224                                  bundle->plane_infos[planes_count].dcc.enable);
9225
9226                 bundle->surface_updates[planes_count].plane_info =
9227                         &bundle->plane_infos[planes_count];
9228
9229                 /*
9230                  * Only allow immediate flips for fast updates that don't
9231                  * change FB pitch, DCC state, rotation or mirroing.
9232                  */
9233                 bundle->flip_addrs[planes_count].flip_immediate =
9234                         crtc->state->async_flip &&
9235                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9236
9237                 timestamp_ns = ktime_get_ns();
9238                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9239                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9240                 bundle->surface_updates[planes_count].surface = dc_plane;
9241
9242                 if (!bundle->surface_updates[planes_count].surface) {
9243                         DRM_ERROR("No surface for CRTC: id=%d\n",
9244                                         acrtc_attach->crtc_id);
9245                         continue;
9246                 }
9247
9248                 if (plane == pcrtc->primary)
9249                         update_freesync_state_on_stream(
9250                                 dm,
9251                                 acrtc_state,
9252                                 acrtc_state->stream,
9253                                 dc_plane,
9254                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9255
9256                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9257                                  __func__,
9258                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9259                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9260
9261                 planes_count += 1;
9262
9263         }
9264
9265         if (pflip_present) {
9266                 if (!vrr_active) {
9267                         /* Use old throttling in non-vrr fixed refresh rate mode
9268                          * to keep flip scheduling based on target vblank counts
9269                          * working in a backwards compatible way, e.g., for
9270                          * clients using the GLX_OML_sync_control extension or
9271                          * DRI3/Present extension with defined target_msc.
9272                          */
9273                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9274                 }
9275                 else {
9276                         /* For variable refresh rate mode only:
9277                          * Get vblank of last completed flip to avoid > 1 vrr
9278                          * flips per video frame by use of throttling, but allow
9279                          * flip programming anywhere in the possibly large
9280                          * variable vrr vblank interval for fine-grained flip
9281                          * timing control and more opportunity to avoid stutter
9282                          * on late submission of flips.
9283                          */
9284                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9285                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9286                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9287                 }
9288
9289                 target_vblank = last_flip_vblank + wait_for_vblank;
9290
9291                 /*
9292                  * Wait until we're out of the vertical blank period before the one
9293                  * targeted by the flip
9294                  */
9295                 while ((acrtc_attach->enabled &&
9296                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9297                                                             0, &vpos, &hpos, NULL,
9298                                                             NULL, &pcrtc->hwmode)
9299                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9300                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9301                         (int)(target_vblank -
9302                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9303                         usleep_range(1000, 1100);
9304                 }
9305
9306                 /**
9307                  * Prepare the flip event for the pageflip interrupt to handle.
9308                  *
9309                  * This only works in the case where we've already turned on the
9310                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9311                  * from 0 -> n planes we have to skip a hardware generated event
9312                  * and rely on sending it from software.
9313                  */
9314                 if (acrtc_attach->base.state->event &&
9315                     acrtc_state->active_planes > 0 &&
9316                     !acrtc_state->force_dpms_off) {
9317                         drm_crtc_vblank_get(pcrtc);
9318
9319                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9320
9321                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9322                         prepare_flip_isr(acrtc_attach);
9323
9324                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9325                 }
9326
9327                 if (acrtc_state->stream) {
9328                         if (acrtc_state->freesync_vrr_info_changed)
9329                                 bundle->stream_update.vrr_infopacket =
9330                                         &acrtc_state->stream->vrr_infopacket;
9331                 }
9332         }
9333
9334         /* Update the planes if changed or disable if we don't have any. */
9335         if ((planes_count || acrtc_state->active_planes == 0) &&
9336                 acrtc_state->stream) {
9337                 /*
9338                  * If PSR or idle optimizations are enabled then flush out
9339                  * any pending work before hardware programming.
9340                  */
9341                 if (dm->vblank_control_workqueue)
9342                         flush_workqueue(dm->vblank_control_workqueue);
9343
9344                 bundle->stream_update.stream = acrtc_state->stream;
9345                 if (new_pcrtc_state->mode_changed) {
9346                         bundle->stream_update.src = acrtc_state->stream->src;
9347                         bundle->stream_update.dst = acrtc_state->stream->dst;
9348                 }
9349
9350                 if (new_pcrtc_state->color_mgmt_changed) {
9351                         /*
9352                          * TODO: This isn't fully correct since we've actually
9353                          * already modified the stream in place.
9354                          */
9355                         bundle->stream_update.gamut_remap =
9356                                 &acrtc_state->stream->gamut_remap_matrix;
9357                         bundle->stream_update.output_csc_transform =
9358                                 &acrtc_state->stream->csc_color_matrix;
9359                         bundle->stream_update.out_transfer_func =
9360                                 acrtc_state->stream->out_transfer_func;
9361                 }
9362
9363                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9364                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9365                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9366
9367                 /*
9368                  * If FreeSync state on the stream has changed then we need to
9369                  * re-adjust the min/max bounds now that DC doesn't handle this
9370                  * as part of commit.
9371                  */
9372                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9373                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9374                         dc_stream_adjust_vmin_vmax(
9375                                 dm->dc, acrtc_state->stream,
9376                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9377                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9378                 }
9379                 mutex_lock(&dm->dc_lock);
9380                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9381                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9382                         amdgpu_dm_psr_disable(acrtc_state->stream);
9383
9384                 dc_commit_updates_for_stream(dm->dc,
9385                                                      bundle->surface_updates,
9386                                                      planes_count,
9387                                                      acrtc_state->stream,
9388                                                      &bundle->stream_update,
9389                                                      dc_state);
9390
9391                 /**
9392                  * Enable or disable the interrupts on the backend.
9393                  *
9394                  * Most pipes are put into power gating when unused.
9395                  *
9396                  * When power gating is enabled on a pipe we lose the
9397                  * interrupt enablement state when power gating is disabled.
9398                  *
9399                  * So we need to update the IRQ control state in hardware
9400                  * whenever the pipe turns on (since it could be previously
9401                  * power gated) or off (since some pipes can't be power gated
9402                  * on some ASICs).
9403                  */
9404                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9405                         dm_update_pflip_irq_state(drm_to_adev(dev),
9406                                                   acrtc_attach);
9407
9408                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9409                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9410                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9411                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9412
9413                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9414                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9415                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9416                         struct amdgpu_dm_connector *aconn =
9417                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9418
9419                         if (aconn->psr_skip_count > 0)
9420                                 aconn->psr_skip_count--;
9421
9422                         /* Allow PSR when skip count is 0. */
9423                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9424                 } else {
9425                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9426                 }
9427
9428                 mutex_unlock(&dm->dc_lock);
9429         }
9430
9431         /*
9432          * Update cursor state *after* programming all the planes.
9433          * This avoids redundant programming in the case where we're going
9434          * to be disabling a single plane - those pipes are being disabled.
9435          */
9436         if (acrtc_state->active_planes)
9437                 amdgpu_dm_commit_cursors(state);
9438
9439 cleanup:
9440         kfree(bundle);
9441 }
9442
9443 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9444                                    struct drm_atomic_state *state)
9445 {
9446         struct amdgpu_device *adev = drm_to_adev(dev);
9447         struct amdgpu_dm_connector *aconnector;
9448         struct drm_connector *connector;
9449         struct drm_connector_state *old_con_state, *new_con_state;
9450         struct drm_crtc_state *new_crtc_state;
9451         struct dm_crtc_state *new_dm_crtc_state;
9452         const struct dc_stream_status *status;
9453         int i, inst;
9454
9455         /* Notify device removals. */
9456         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9457                 if (old_con_state->crtc != new_con_state->crtc) {
9458                         /* CRTC changes require notification. */
9459                         goto notify;
9460                 }
9461
9462                 if (!new_con_state->crtc)
9463                         continue;
9464
9465                 new_crtc_state = drm_atomic_get_new_crtc_state(
9466                         state, new_con_state->crtc);
9467
9468                 if (!new_crtc_state)
9469                         continue;
9470
9471                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9472                         continue;
9473
9474         notify:
9475                 aconnector = to_amdgpu_dm_connector(connector);
9476
9477                 mutex_lock(&adev->dm.audio_lock);
9478                 inst = aconnector->audio_inst;
9479                 aconnector->audio_inst = -1;
9480                 mutex_unlock(&adev->dm.audio_lock);
9481
9482                 amdgpu_dm_audio_eld_notify(adev, inst);
9483         }
9484
9485         /* Notify audio device additions. */
9486         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9487                 if (!new_con_state->crtc)
9488                         continue;
9489
9490                 new_crtc_state = drm_atomic_get_new_crtc_state(
9491                         state, new_con_state->crtc);
9492
9493                 if (!new_crtc_state)
9494                         continue;
9495
9496                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9497                         continue;
9498
9499                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9500                 if (!new_dm_crtc_state->stream)
9501                         continue;
9502
9503                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9504                 if (!status)
9505                         continue;
9506
9507                 aconnector = to_amdgpu_dm_connector(connector);
9508
9509                 mutex_lock(&adev->dm.audio_lock);
9510                 inst = status->audio_inst;
9511                 aconnector->audio_inst = inst;
9512                 mutex_unlock(&adev->dm.audio_lock);
9513
9514                 amdgpu_dm_audio_eld_notify(adev, inst);
9515         }
9516 }
9517
9518 /*
9519  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9520  * @crtc_state: the DRM CRTC state
9521  * @stream_state: the DC stream state.
9522  *
9523  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9524  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9525  */
9526 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9527                                                 struct dc_stream_state *stream_state)
9528 {
9529         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9530 }
9531
9532 /**
9533  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9534  * @state: The atomic state to commit
9535  *
9536  * This will tell DC to commit the constructed DC state from atomic_check,
9537  * programming the hardware. Any failures here implies a hardware failure, since
9538  * atomic check should have filtered anything non-kosher.
9539  */
9540 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9541 {
9542         struct drm_device *dev = state->dev;
9543         struct amdgpu_device *adev = drm_to_adev(dev);
9544         struct amdgpu_display_manager *dm = &adev->dm;
9545         struct dm_atomic_state *dm_state;
9546         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9547         uint32_t i, j;
9548         struct drm_crtc *crtc;
9549         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9550         unsigned long flags;
9551         bool wait_for_vblank = true;
9552         struct drm_connector *connector;
9553         struct drm_connector_state *old_con_state, *new_con_state;
9554         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9555         int crtc_disable_count = 0;
9556         bool mode_set_reset_required = false;
9557
9558         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9559
9560         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9561
9562         dm_state = dm_atomic_get_new_state(state);
9563         if (dm_state && dm_state->context) {
9564                 dc_state = dm_state->context;
9565         } else {
9566                 /* No state changes, retain current state. */
9567                 dc_state_temp = dc_create_state(dm->dc);
9568                 ASSERT(dc_state_temp);
9569                 dc_state = dc_state_temp;
9570                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9571         }
9572
9573         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9574                                        new_crtc_state, i) {
9575                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9576
9577                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9578
9579                 if (old_crtc_state->active &&
9580                     (!new_crtc_state->active ||
9581                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9582                         manage_dm_interrupts(adev, acrtc, false);
9583                         dc_stream_release(dm_old_crtc_state->stream);
9584                 }
9585         }
9586
9587         drm_atomic_helper_calc_timestamping_constants(state);
9588
9589         /* update changed items */
9590         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9591                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9592
9593                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9594                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9595
9596                 drm_dbg_state(state->dev,
9597                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9598                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9599                         "connectors_changed:%d\n",
9600                         acrtc->crtc_id,
9601                         new_crtc_state->enable,
9602                         new_crtc_state->active,
9603                         new_crtc_state->planes_changed,
9604                         new_crtc_state->mode_changed,
9605                         new_crtc_state->active_changed,
9606                         new_crtc_state->connectors_changed);
9607
9608                 /* Disable cursor if disabling crtc */
9609                 if (old_crtc_state->active && !new_crtc_state->active) {
9610                         struct dc_cursor_position position;
9611
9612                         memset(&position, 0, sizeof(position));
9613                         mutex_lock(&dm->dc_lock);
9614                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9615                         mutex_unlock(&dm->dc_lock);
9616                 }
9617
9618                 /* Copy all transient state flags into dc state */
9619                 if (dm_new_crtc_state->stream) {
9620                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9621                                                             dm_new_crtc_state->stream);
9622                 }
9623
9624                 /* handles headless hotplug case, updating new_state and
9625                  * aconnector as needed
9626                  */
9627
9628                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9629
9630                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9631
9632                         if (!dm_new_crtc_state->stream) {
9633                                 /*
9634                                  * this could happen because of issues with
9635                                  * userspace notifications delivery.
9636                                  * In this case userspace tries to set mode on
9637                                  * display which is disconnected in fact.
9638                                  * dc_sink is NULL in this case on aconnector.
9639                                  * We expect reset mode will come soon.
9640                                  *
9641                                  * This can also happen when unplug is done
9642                                  * during resume sequence ended
9643                                  *
9644                                  * In this case, we want to pretend we still
9645                                  * have a sink to keep the pipe running so that
9646                                  * hw state is consistent with the sw state
9647                                  */
9648                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9649                                                 __func__, acrtc->base.base.id);
9650                                 continue;
9651                         }
9652
9653                         if (dm_old_crtc_state->stream)
9654                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9655
9656                         pm_runtime_get_noresume(dev->dev);
9657
9658                         acrtc->enabled = true;
9659                         acrtc->hw_mode = new_crtc_state->mode;
9660                         crtc->hwmode = new_crtc_state->mode;
9661                         mode_set_reset_required = true;
9662                 } else if (modereset_required(new_crtc_state)) {
9663                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9664                         /* i.e. reset mode */
9665                         if (dm_old_crtc_state->stream)
9666                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9667
9668                         mode_set_reset_required = true;
9669                 }
9670         } /* for_each_crtc_in_state() */
9671
9672         if (dc_state) {
9673                 /* if there mode set or reset, disable eDP PSR */
9674                 if (mode_set_reset_required) {
9675                         if (dm->vblank_control_workqueue)
9676                                 flush_workqueue(dm->vblank_control_workqueue);
9677
9678                         amdgpu_dm_psr_disable_all(dm);
9679                 }
9680
9681                 dm_enable_per_frame_crtc_master_sync(dc_state);
9682                 mutex_lock(&dm->dc_lock);
9683                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9684
9685                 /* Allow idle optimization when vblank count is 0 for display off */
9686                 if (dm->active_vblank_irq_count == 0)
9687                         dc_allow_idle_optimizations(dm->dc, true);
9688                 mutex_unlock(&dm->dc_lock);
9689         }
9690
9691         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9692                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9693
9694                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9695
9696                 if (dm_new_crtc_state->stream != NULL) {
9697                         const struct dc_stream_status *status =
9698                                         dc_stream_get_status(dm_new_crtc_state->stream);
9699
9700                         if (!status)
9701                                 status = dc_stream_get_status_from_state(dc_state,
9702                                                                          dm_new_crtc_state->stream);
9703                         if (!status)
9704                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9705                         else
9706                                 acrtc->otg_inst = status->primary_otg_inst;
9707                 }
9708         }
9709 #ifdef CONFIG_DRM_AMD_DC_HDCP
9710         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9711                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9712                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9713                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9714
9715                 new_crtc_state = NULL;
9716
9717                 if (acrtc)
9718                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9719
9720                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9721
9722                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9723                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9724                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9725                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9726                         dm_new_con_state->update_hdcp = true;
9727                         continue;
9728                 }
9729
9730                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9731                         hdcp_update_display(
9732                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9733                                 new_con_state->hdcp_content_type,
9734                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9735         }
9736 #endif
9737
9738         /* Handle connector state changes */
9739         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9740                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9741                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9742                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9743                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9744                 struct dc_stream_update stream_update;
9745                 struct dc_info_packet hdr_packet;
9746                 struct dc_stream_status *status = NULL;
9747                 bool abm_changed, hdr_changed, scaling_changed;
9748
9749                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9750                 memset(&stream_update, 0, sizeof(stream_update));
9751
9752                 if (acrtc) {
9753                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9754                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9755                 }
9756
9757                 /* Skip any modesets/resets */
9758                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9759                         continue;
9760
9761                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9762                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9763
9764                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9765                                                              dm_old_con_state);
9766
9767                 abm_changed = dm_new_crtc_state->abm_level !=
9768                               dm_old_crtc_state->abm_level;
9769
9770                 hdr_changed =
9771                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9772
9773                 if (!scaling_changed && !abm_changed && !hdr_changed)
9774                         continue;
9775
9776                 stream_update.stream = dm_new_crtc_state->stream;
9777                 if (scaling_changed) {
9778                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9779                                         dm_new_con_state, dm_new_crtc_state->stream);
9780
9781                         stream_update.src = dm_new_crtc_state->stream->src;
9782                         stream_update.dst = dm_new_crtc_state->stream->dst;
9783                 }
9784
9785                 if (abm_changed) {
9786                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9787
9788                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9789                 }
9790
9791                 if (hdr_changed) {
9792                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9793                         stream_update.hdr_static_metadata = &hdr_packet;
9794                 }
9795
9796                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9797
9798                 if (WARN_ON(!status))
9799                         continue;
9800
9801                 WARN_ON(!status->plane_count);
9802
9803                 /*
9804                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9805                  * Here we create an empty update on each plane.
9806                  * To fix this, DC should permit updating only stream properties.
9807                  */
9808                 for (j = 0; j < status->plane_count; j++)
9809                         dummy_updates[j].surface = status->plane_states[0];
9810
9811
9812                 mutex_lock(&dm->dc_lock);
9813                 dc_commit_updates_for_stream(dm->dc,
9814                                                      dummy_updates,
9815                                                      status->plane_count,
9816                                                      dm_new_crtc_state->stream,
9817                                                      &stream_update,
9818                                                      dc_state);
9819                 mutex_unlock(&dm->dc_lock);
9820         }
9821
9822         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9823         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9824                                       new_crtc_state, i) {
9825                 if (old_crtc_state->active && !new_crtc_state->active)
9826                         crtc_disable_count++;
9827
9828                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9829                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9830
9831                 /* For freesync config update on crtc state and params for irq */
9832                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9833
9834                 /* Handle vrr on->off / off->on transitions */
9835                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9836                                                 dm_new_crtc_state);
9837         }
9838
9839         /**
9840          * Enable interrupts for CRTCs that are newly enabled or went through
9841          * a modeset. It was intentionally deferred until after the front end
9842          * state was modified to wait until the OTG was on and so the IRQ
9843          * handlers didn't access stale or invalid state.
9844          */
9845         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9846                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9847 #ifdef CONFIG_DEBUG_FS
9848                 bool configure_crc = false;
9849                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9850 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9851                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9852 #endif
9853                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9854                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9855                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9856 #endif
9857                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9858
9859                 if (new_crtc_state->active &&
9860                     (!old_crtc_state->active ||
9861                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9862                         dc_stream_retain(dm_new_crtc_state->stream);
9863                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9864                         manage_dm_interrupts(adev, acrtc, true);
9865
9866 #ifdef CONFIG_DEBUG_FS
9867                         /**
9868                          * Frontend may have changed so reapply the CRC capture
9869                          * settings for the stream.
9870                          */
9871                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9872
9873                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9874                                 configure_crc = true;
9875 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9876                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9877                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9878                                         acrtc->dm_irq_params.crc_window.update_win = true;
9879                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9880                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9881                                         crc_rd_wrk->crtc = crtc;
9882                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9883                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9884                                 }
9885 #endif
9886                         }
9887
9888                         if (configure_crc)
9889                                 if (amdgpu_dm_crtc_configure_crc_source(
9890                                         crtc, dm_new_crtc_state, cur_crc_src))
9891                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9892 #endif
9893                 }
9894         }
9895
9896         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9897                 if (new_crtc_state->async_flip)
9898                         wait_for_vblank = false;
9899
9900         /* update planes when needed per crtc*/
9901         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9902                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9903
9904                 if (dm_new_crtc_state->stream)
9905                         amdgpu_dm_commit_planes(state, dc_state, dev,
9906                                                 dm, crtc, wait_for_vblank);
9907         }
9908
9909         /* Update audio instances for each connector. */
9910         amdgpu_dm_commit_audio(dev, state);
9911
9912 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9913         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9914         /* restore the backlight level */
9915         for (i = 0; i < dm->num_of_edps; i++) {
9916                 if (dm->backlight_dev[i] &&
9917                     (dm->actual_brightness[i] != dm->brightness[i]))
9918                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9919         }
9920 #endif
9921         /*
9922          * send vblank event on all events not handled in flip and
9923          * mark consumed event for drm_atomic_helper_commit_hw_done
9924          */
9925         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9926         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9927
9928                 if (new_crtc_state->event)
9929                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9930
9931                 new_crtc_state->event = NULL;
9932         }
9933         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9934
9935         /* Signal HW programming completion */
9936         drm_atomic_helper_commit_hw_done(state);
9937
9938         if (wait_for_vblank)
9939                 drm_atomic_helper_wait_for_flip_done(dev, state);
9940
9941         drm_atomic_helper_cleanup_planes(dev, state);
9942
9943         /* return the stolen vga memory back to VRAM */
9944         if (!adev->mman.keep_stolen_vga_memory)
9945                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9946         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9947
9948         /*
9949          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9950          * so we can put the GPU into runtime suspend if we're not driving any
9951          * displays anymore
9952          */
9953         for (i = 0; i < crtc_disable_count; i++)
9954                 pm_runtime_put_autosuspend(dev->dev);
9955         pm_runtime_mark_last_busy(dev->dev);
9956
9957         if (dc_state_temp)
9958                 dc_release_state(dc_state_temp);
9959 }
9960
9961
9962 static int dm_force_atomic_commit(struct drm_connector *connector)
9963 {
9964         int ret = 0;
9965         struct drm_device *ddev = connector->dev;
9966         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9967         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9968         struct drm_plane *plane = disconnected_acrtc->base.primary;
9969         struct drm_connector_state *conn_state;
9970         struct drm_crtc_state *crtc_state;
9971         struct drm_plane_state *plane_state;
9972
9973         if (!state)
9974                 return -ENOMEM;
9975
9976         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9977
9978         /* Construct an atomic state to restore previous display setting */
9979
9980         /*
9981          * Attach connectors to drm_atomic_state
9982          */
9983         conn_state = drm_atomic_get_connector_state(state, connector);
9984
9985         ret = PTR_ERR_OR_ZERO(conn_state);
9986         if (ret)
9987                 goto out;
9988
9989         /* Attach crtc to drm_atomic_state*/
9990         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9991
9992         ret = PTR_ERR_OR_ZERO(crtc_state);
9993         if (ret)
9994                 goto out;
9995
9996         /* force a restore */
9997         crtc_state->mode_changed = true;
9998
9999         /* Attach plane to drm_atomic_state */
10000         plane_state = drm_atomic_get_plane_state(state, plane);
10001
10002         ret = PTR_ERR_OR_ZERO(plane_state);
10003         if (ret)
10004                 goto out;
10005
10006         /* Call commit internally with the state we just constructed */
10007         ret = drm_atomic_commit(state);
10008
10009 out:
10010         drm_atomic_state_put(state);
10011         if (ret)
10012                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10013
10014         return ret;
10015 }
10016
10017 /*
10018  * This function handles all cases when set mode does not come upon hotplug.
10019  * This includes when a display is unplugged then plugged back into the
10020  * same port and when running without usermode desktop manager supprot
10021  */
10022 void dm_restore_drm_connector_state(struct drm_device *dev,
10023                                     struct drm_connector *connector)
10024 {
10025         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10026         struct amdgpu_crtc *disconnected_acrtc;
10027         struct dm_crtc_state *acrtc_state;
10028
10029         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10030                 return;
10031
10032         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10033         if (!disconnected_acrtc)
10034                 return;
10035
10036         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10037         if (!acrtc_state->stream)
10038                 return;
10039
10040         /*
10041          * If the previous sink is not released and different from the current,
10042          * we deduce we are in a state where we can not rely on usermode call
10043          * to turn on the display, so we do it here
10044          */
10045         if (acrtc_state->stream->sink != aconnector->dc_sink)
10046                 dm_force_atomic_commit(&aconnector->base);
10047 }
10048
10049 /*
10050  * Grabs all modesetting locks to serialize against any blocking commits,
10051  * Waits for completion of all non blocking commits.
10052  */
10053 static int do_aquire_global_lock(struct drm_device *dev,
10054                                  struct drm_atomic_state *state)
10055 {
10056         struct drm_crtc *crtc;
10057         struct drm_crtc_commit *commit;
10058         long ret;
10059
10060         /*
10061          * Adding all modeset locks to aquire_ctx will
10062          * ensure that when the framework release it the
10063          * extra locks we are locking here will get released to
10064          */
10065         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10066         if (ret)
10067                 return ret;
10068
10069         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10070                 spin_lock(&crtc->commit_lock);
10071                 commit = list_first_entry_or_null(&crtc->commit_list,
10072                                 struct drm_crtc_commit, commit_entry);
10073                 if (commit)
10074                         drm_crtc_commit_get(commit);
10075                 spin_unlock(&crtc->commit_lock);
10076
10077                 if (!commit)
10078                         continue;
10079
10080                 /*
10081                  * Make sure all pending HW programming completed and
10082                  * page flips done
10083                  */
10084                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10085
10086                 if (ret > 0)
10087                         ret = wait_for_completion_interruptible_timeout(
10088                                         &commit->flip_done, 10*HZ);
10089
10090                 if (ret == 0)
10091                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10092                                   "timed out\n", crtc->base.id, crtc->name);
10093
10094                 drm_crtc_commit_put(commit);
10095         }
10096
10097         return ret < 0 ? ret : 0;
10098 }
10099
10100 static void get_freesync_config_for_crtc(
10101         struct dm_crtc_state *new_crtc_state,
10102         struct dm_connector_state *new_con_state)
10103 {
10104         struct mod_freesync_config config = {0};
10105         struct amdgpu_dm_connector *aconnector =
10106                         to_amdgpu_dm_connector(new_con_state->base.connector);
10107         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10108         int vrefresh = drm_mode_vrefresh(mode);
10109         bool fs_vid_mode = false;
10110
10111         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10112                                         vrefresh >= aconnector->min_vfreq &&
10113                                         vrefresh <= aconnector->max_vfreq;
10114
10115         if (new_crtc_state->vrr_supported) {
10116                 new_crtc_state->stream->ignore_msa_timing_param = true;
10117                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10118
10119                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10120                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10121                 config.vsif_supported = true;
10122                 config.btr = true;
10123
10124                 if (fs_vid_mode) {
10125                         config.state = VRR_STATE_ACTIVE_FIXED;
10126                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10127                         goto out;
10128                 } else if (new_crtc_state->base.vrr_enabled) {
10129                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10130                 } else {
10131                         config.state = VRR_STATE_INACTIVE;
10132                 }
10133         }
10134 out:
10135         new_crtc_state->freesync_config = config;
10136 }
10137
10138 static void reset_freesync_config_for_crtc(
10139         struct dm_crtc_state *new_crtc_state)
10140 {
10141         new_crtc_state->vrr_supported = false;
10142
10143         memset(&new_crtc_state->vrr_infopacket, 0,
10144                sizeof(new_crtc_state->vrr_infopacket));
10145 }
10146
10147 static bool
10148 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10149                                  struct drm_crtc_state *new_crtc_state)
10150 {
10151         const struct drm_display_mode *old_mode, *new_mode;
10152
10153         if (!old_crtc_state || !new_crtc_state)
10154                 return false;
10155
10156         old_mode = &old_crtc_state->mode;
10157         new_mode = &new_crtc_state->mode;
10158
10159         if (old_mode->clock       == new_mode->clock &&
10160             old_mode->hdisplay    == new_mode->hdisplay &&
10161             old_mode->vdisplay    == new_mode->vdisplay &&
10162             old_mode->htotal      == new_mode->htotal &&
10163             old_mode->vtotal      != new_mode->vtotal &&
10164             old_mode->hsync_start == new_mode->hsync_start &&
10165             old_mode->vsync_start != new_mode->vsync_start &&
10166             old_mode->hsync_end   == new_mode->hsync_end &&
10167             old_mode->vsync_end   != new_mode->vsync_end &&
10168             old_mode->hskew       == new_mode->hskew &&
10169             old_mode->vscan       == new_mode->vscan &&
10170             (old_mode->vsync_end - old_mode->vsync_start) ==
10171             (new_mode->vsync_end - new_mode->vsync_start))
10172                 return true;
10173
10174         return false;
10175 }
10176
10177 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10178         uint64_t num, den, res;
10179         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10180
10181         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10182
10183         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10184         den = (unsigned long long)new_crtc_state->mode.htotal *
10185               (unsigned long long)new_crtc_state->mode.vtotal;
10186
10187         res = div_u64(num, den);
10188         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10189 }
10190
10191 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10192                          struct drm_atomic_state *state,
10193                          struct drm_crtc *crtc,
10194                          struct drm_crtc_state *old_crtc_state,
10195                          struct drm_crtc_state *new_crtc_state,
10196                          bool enable,
10197                          bool *lock_and_validation_needed)
10198 {
10199         struct dm_atomic_state *dm_state = NULL;
10200         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10201         struct dc_stream_state *new_stream;
10202         int ret = 0;
10203
10204         /*
10205          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10206          * update changed items
10207          */
10208         struct amdgpu_crtc *acrtc = NULL;
10209         struct amdgpu_dm_connector *aconnector = NULL;
10210         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10211         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10212
10213         new_stream = NULL;
10214
10215         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10216         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10217         acrtc = to_amdgpu_crtc(crtc);
10218         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10219
10220         /* TODO This hack should go away */
10221         if (aconnector && enable) {
10222                 /* Make sure fake sink is created in plug-in scenario */
10223                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10224                                                             &aconnector->base);
10225                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10226                                                             &aconnector->base);
10227
10228                 if (IS_ERR(drm_new_conn_state)) {
10229                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10230                         goto fail;
10231                 }
10232
10233                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10234                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10235
10236                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10237                         goto skip_modeset;
10238
10239                 new_stream = create_validate_stream_for_sink(aconnector,
10240                                                              &new_crtc_state->mode,
10241                                                              dm_new_conn_state,
10242                                                              dm_old_crtc_state->stream);
10243
10244                 /*
10245                  * we can have no stream on ACTION_SET if a display
10246                  * was disconnected during S3, in this case it is not an
10247                  * error, the OS will be updated after detection, and
10248                  * will do the right thing on next atomic commit
10249                  */
10250
10251                 if (!new_stream) {
10252                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10253                                         __func__, acrtc->base.base.id);
10254                         ret = -ENOMEM;
10255                         goto fail;
10256                 }
10257
10258                 /*
10259                  * TODO: Check VSDB bits to decide whether this should
10260                  * be enabled or not.
10261                  */
10262                 new_stream->triggered_crtc_reset.enabled =
10263                         dm->force_timing_sync;
10264
10265                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10266
10267                 ret = fill_hdr_info_packet(drm_new_conn_state,
10268                                            &new_stream->hdr_static_metadata);
10269                 if (ret)
10270                         goto fail;
10271
10272                 /*
10273                  * If we already removed the old stream from the context
10274                  * (and set the new stream to NULL) then we can't reuse
10275                  * the old stream even if the stream and scaling are unchanged.
10276                  * We'll hit the BUG_ON and black screen.
10277                  *
10278                  * TODO: Refactor this function to allow this check to work
10279                  * in all conditions.
10280                  */
10281                 if (dm_new_crtc_state->stream &&
10282                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10283                         goto skip_modeset;
10284
10285                 if (dm_new_crtc_state->stream &&
10286                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10287                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10288                         new_crtc_state->mode_changed = false;
10289                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10290                                          new_crtc_state->mode_changed);
10291                 }
10292         }
10293
10294         /* mode_changed flag may get updated above, need to check again */
10295         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10296                 goto skip_modeset;
10297
10298         drm_dbg_state(state->dev,
10299                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10300                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10301                 "connectors_changed:%d\n",
10302                 acrtc->crtc_id,
10303                 new_crtc_state->enable,
10304                 new_crtc_state->active,
10305                 new_crtc_state->planes_changed,
10306                 new_crtc_state->mode_changed,
10307                 new_crtc_state->active_changed,
10308                 new_crtc_state->connectors_changed);
10309
10310         /* Remove stream for any changed/disabled CRTC */
10311         if (!enable) {
10312
10313                 if (!dm_old_crtc_state->stream)
10314                         goto skip_modeset;
10315
10316                 if (dm_new_crtc_state->stream &&
10317                     is_timing_unchanged_for_freesync(new_crtc_state,
10318                                                      old_crtc_state)) {
10319                         new_crtc_state->mode_changed = false;
10320                         DRM_DEBUG_DRIVER(
10321                                 "Mode change not required for front porch change, "
10322                                 "setting mode_changed to %d",
10323                                 new_crtc_state->mode_changed);
10324
10325                         set_freesync_fixed_config(dm_new_crtc_state);
10326
10327                         goto skip_modeset;
10328                 } else if (aconnector &&
10329                            is_freesync_video_mode(&new_crtc_state->mode,
10330                                                   aconnector)) {
10331                         struct drm_display_mode *high_mode;
10332
10333                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10334                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10335                                 set_freesync_fixed_config(dm_new_crtc_state);
10336                         }
10337                 }
10338
10339                 ret = dm_atomic_get_state(state, &dm_state);
10340                 if (ret)
10341                         goto fail;
10342
10343                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10344                                 crtc->base.id);
10345
10346                 /* i.e. reset mode */
10347                 if (dc_remove_stream_from_ctx(
10348                                 dm->dc,
10349                                 dm_state->context,
10350                                 dm_old_crtc_state->stream) != DC_OK) {
10351                         ret = -EINVAL;
10352                         goto fail;
10353                 }
10354
10355                 dc_stream_release(dm_old_crtc_state->stream);
10356                 dm_new_crtc_state->stream = NULL;
10357
10358                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10359
10360                 *lock_and_validation_needed = true;
10361
10362         } else {/* Add stream for any updated/enabled CRTC */
10363                 /*
10364                  * Quick fix to prevent NULL pointer on new_stream when
10365                  * added MST connectors not found in existing crtc_state in the chained mode
10366                  * TODO: need to dig out the root cause of that
10367                  */
10368                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10369                         goto skip_modeset;
10370
10371                 if (modereset_required(new_crtc_state))
10372                         goto skip_modeset;
10373
10374                 if (modeset_required(new_crtc_state, new_stream,
10375                                      dm_old_crtc_state->stream)) {
10376
10377                         WARN_ON(dm_new_crtc_state->stream);
10378
10379                         ret = dm_atomic_get_state(state, &dm_state);
10380                         if (ret)
10381                                 goto fail;
10382
10383                         dm_new_crtc_state->stream = new_stream;
10384
10385                         dc_stream_retain(new_stream);
10386
10387                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10388                                          crtc->base.id);
10389
10390                         if (dc_add_stream_to_ctx(
10391                                         dm->dc,
10392                                         dm_state->context,
10393                                         dm_new_crtc_state->stream) != DC_OK) {
10394                                 ret = -EINVAL;
10395                                 goto fail;
10396                         }
10397
10398                         *lock_and_validation_needed = true;
10399                 }
10400         }
10401
10402 skip_modeset:
10403         /* Release extra reference */
10404         if (new_stream)
10405                  dc_stream_release(new_stream);
10406
10407         /*
10408          * We want to do dc stream updates that do not require a
10409          * full modeset below.
10410          */
10411         if (!(enable && aconnector && new_crtc_state->active))
10412                 return 0;
10413         /*
10414          * Given above conditions, the dc state cannot be NULL because:
10415          * 1. We're in the process of enabling CRTCs (just been added
10416          *    to the dc context, or already is on the context)
10417          * 2. Has a valid connector attached, and
10418          * 3. Is currently active and enabled.
10419          * => The dc stream state currently exists.
10420          */
10421         BUG_ON(dm_new_crtc_state->stream == NULL);
10422
10423         /* Scaling or underscan settings */
10424         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10425                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10426                 update_stream_scaling_settings(
10427                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10428
10429         /* ABM settings */
10430         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10431
10432         /*
10433          * Color management settings. We also update color properties
10434          * when a modeset is needed, to ensure it gets reprogrammed.
10435          */
10436         if (dm_new_crtc_state->base.color_mgmt_changed ||
10437             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10438                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10439                 if (ret)
10440                         goto fail;
10441         }
10442
10443         /* Update Freesync settings. */
10444         get_freesync_config_for_crtc(dm_new_crtc_state,
10445                                      dm_new_conn_state);
10446
10447         return ret;
10448
10449 fail:
10450         if (new_stream)
10451                 dc_stream_release(new_stream);
10452         return ret;
10453 }
10454
10455 static bool should_reset_plane(struct drm_atomic_state *state,
10456                                struct drm_plane *plane,
10457                                struct drm_plane_state *old_plane_state,
10458                                struct drm_plane_state *new_plane_state)
10459 {
10460         struct drm_plane *other;
10461         struct drm_plane_state *old_other_state, *new_other_state;
10462         struct drm_crtc_state *new_crtc_state;
10463         int i;
10464
10465         /*
10466          * TODO: Remove this hack once the checks below are sufficient
10467          * enough to determine when we need to reset all the planes on
10468          * the stream.
10469          */
10470         if (state->allow_modeset)
10471                 return true;
10472
10473         /* Exit early if we know that we're adding or removing the plane. */
10474         if (old_plane_state->crtc != new_plane_state->crtc)
10475                 return true;
10476
10477         /* old crtc == new_crtc == NULL, plane not in context. */
10478         if (!new_plane_state->crtc)
10479                 return false;
10480
10481         new_crtc_state =
10482                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10483
10484         if (!new_crtc_state)
10485                 return true;
10486
10487         /* CRTC Degamma changes currently require us to recreate planes. */
10488         if (new_crtc_state->color_mgmt_changed)
10489                 return true;
10490
10491         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10492                 return true;
10493
10494         /*
10495          * If there are any new primary or overlay planes being added or
10496          * removed then the z-order can potentially change. To ensure
10497          * correct z-order and pipe acquisition the current DC architecture
10498          * requires us to remove and recreate all existing planes.
10499          *
10500          * TODO: Come up with a more elegant solution for this.
10501          */
10502         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10503                 struct amdgpu_framebuffer *old_afb, *new_afb;
10504                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10505                         continue;
10506
10507                 if (old_other_state->crtc != new_plane_state->crtc &&
10508                     new_other_state->crtc != new_plane_state->crtc)
10509                         continue;
10510
10511                 if (old_other_state->crtc != new_other_state->crtc)
10512                         return true;
10513
10514                 /* Src/dst size and scaling updates. */
10515                 if (old_other_state->src_w != new_other_state->src_w ||
10516                     old_other_state->src_h != new_other_state->src_h ||
10517                     old_other_state->crtc_w != new_other_state->crtc_w ||
10518                     old_other_state->crtc_h != new_other_state->crtc_h)
10519                         return true;
10520
10521                 /* Rotation / mirroring updates. */
10522                 if (old_other_state->rotation != new_other_state->rotation)
10523                         return true;
10524
10525                 /* Blending updates. */
10526                 if (old_other_state->pixel_blend_mode !=
10527                     new_other_state->pixel_blend_mode)
10528                         return true;
10529
10530                 /* Alpha updates. */
10531                 if (old_other_state->alpha != new_other_state->alpha)
10532                         return true;
10533
10534                 /* Colorspace changes. */
10535                 if (old_other_state->color_range != new_other_state->color_range ||
10536                     old_other_state->color_encoding != new_other_state->color_encoding)
10537                         return true;
10538
10539                 /* Framebuffer checks fall at the end. */
10540                 if (!old_other_state->fb || !new_other_state->fb)
10541                         continue;
10542
10543                 /* Pixel format changes can require bandwidth updates. */
10544                 if (old_other_state->fb->format != new_other_state->fb->format)
10545                         return true;
10546
10547                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10548                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10549
10550                 /* Tiling and DCC changes also require bandwidth updates. */
10551                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10552                     old_afb->base.modifier != new_afb->base.modifier)
10553                         return true;
10554         }
10555
10556         return false;
10557 }
10558
10559 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10560                               struct drm_plane_state *new_plane_state,
10561                               struct drm_framebuffer *fb)
10562 {
10563         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10564         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10565         unsigned int pitch;
10566         bool linear;
10567
10568         if (fb->width > new_acrtc->max_cursor_width ||
10569             fb->height > new_acrtc->max_cursor_height) {
10570                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10571                                  new_plane_state->fb->width,
10572                                  new_plane_state->fb->height);
10573                 return -EINVAL;
10574         }
10575         if (new_plane_state->src_w != fb->width << 16 ||
10576             new_plane_state->src_h != fb->height << 16) {
10577                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10578                 return -EINVAL;
10579         }
10580
10581         /* Pitch in pixels */
10582         pitch = fb->pitches[0] / fb->format->cpp[0];
10583
10584         if (fb->width != pitch) {
10585                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10586                                  fb->width, pitch);
10587                 return -EINVAL;
10588         }
10589
10590         switch (pitch) {
10591         case 64:
10592         case 128:
10593         case 256:
10594                 /* FB pitch is supported by cursor plane */
10595                 break;
10596         default:
10597                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10598                 return -EINVAL;
10599         }
10600
10601         /* Core DRM takes care of checking FB modifiers, so we only need to
10602          * check tiling flags when the FB doesn't have a modifier. */
10603         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10604                 if (adev->family < AMDGPU_FAMILY_AI) {
10605                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10606                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10607                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10608                 } else {
10609                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10610                 }
10611                 if (!linear) {
10612                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10613                         return -EINVAL;
10614                 }
10615         }
10616
10617         return 0;
10618 }
10619
10620 static int dm_update_plane_state(struct dc *dc,
10621                                  struct drm_atomic_state *state,
10622                                  struct drm_plane *plane,
10623                                  struct drm_plane_state *old_plane_state,
10624                                  struct drm_plane_state *new_plane_state,
10625                                  bool enable,
10626                                  bool *lock_and_validation_needed)
10627 {
10628
10629         struct dm_atomic_state *dm_state = NULL;
10630         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10631         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10632         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10633         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10634         struct amdgpu_crtc *new_acrtc;
10635         bool needs_reset;
10636         int ret = 0;
10637
10638
10639         new_plane_crtc = new_plane_state->crtc;
10640         old_plane_crtc = old_plane_state->crtc;
10641         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10642         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10643
10644         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10645                 if (!enable || !new_plane_crtc ||
10646                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10647                         return 0;
10648
10649                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10650
10651                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10652                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10653                         return -EINVAL;
10654                 }
10655
10656                 if (new_plane_state->fb) {
10657                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10658                                                  new_plane_state->fb);
10659                         if (ret)
10660                                 return ret;
10661                 }
10662
10663                 return 0;
10664         }
10665
10666         needs_reset = should_reset_plane(state, plane, old_plane_state,
10667                                          new_plane_state);
10668
10669         /* Remove any changed/removed planes */
10670         if (!enable) {
10671                 if (!needs_reset)
10672                         return 0;
10673
10674                 if (!old_plane_crtc)
10675                         return 0;
10676
10677                 old_crtc_state = drm_atomic_get_old_crtc_state(
10678                                 state, old_plane_crtc);
10679                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10680
10681                 if (!dm_old_crtc_state->stream)
10682                         return 0;
10683
10684                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10685                                 plane->base.id, old_plane_crtc->base.id);
10686
10687                 ret = dm_atomic_get_state(state, &dm_state);
10688                 if (ret)
10689                         return ret;
10690
10691                 if (!dc_remove_plane_from_context(
10692                                 dc,
10693                                 dm_old_crtc_state->stream,
10694                                 dm_old_plane_state->dc_state,
10695                                 dm_state->context)) {
10696
10697                         return -EINVAL;
10698                 }
10699
10700
10701                 dc_plane_state_release(dm_old_plane_state->dc_state);
10702                 dm_new_plane_state->dc_state = NULL;
10703
10704                 *lock_and_validation_needed = true;
10705
10706         } else { /* Add new planes */
10707                 struct dc_plane_state *dc_new_plane_state;
10708
10709                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10710                         return 0;
10711
10712                 if (!new_plane_crtc)
10713                         return 0;
10714
10715                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10716                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10717
10718                 if (!dm_new_crtc_state->stream)
10719                         return 0;
10720
10721                 if (!needs_reset)
10722                         return 0;
10723
10724                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10725                 if (ret)
10726                         return ret;
10727
10728                 WARN_ON(dm_new_plane_state->dc_state);
10729
10730                 dc_new_plane_state = dc_create_plane_state(dc);
10731                 if (!dc_new_plane_state)
10732                         return -ENOMEM;
10733
10734                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10735                                  plane->base.id, new_plane_crtc->base.id);
10736
10737                 ret = fill_dc_plane_attributes(
10738                         drm_to_adev(new_plane_crtc->dev),
10739                         dc_new_plane_state,
10740                         new_plane_state,
10741                         new_crtc_state);
10742                 if (ret) {
10743                         dc_plane_state_release(dc_new_plane_state);
10744                         return ret;
10745                 }
10746
10747                 ret = dm_atomic_get_state(state, &dm_state);
10748                 if (ret) {
10749                         dc_plane_state_release(dc_new_plane_state);
10750                         return ret;
10751                 }
10752
10753                 /*
10754                  * Any atomic check errors that occur after this will
10755                  * not need a release. The plane state will be attached
10756                  * to the stream, and therefore part of the atomic
10757                  * state. It'll be released when the atomic state is
10758                  * cleaned.
10759                  */
10760                 if (!dc_add_plane_to_context(
10761                                 dc,
10762                                 dm_new_crtc_state->stream,
10763                                 dc_new_plane_state,
10764                                 dm_state->context)) {
10765
10766                         dc_plane_state_release(dc_new_plane_state);
10767                         return -EINVAL;
10768                 }
10769
10770                 dm_new_plane_state->dc_state = dc_new_plane_state;
10771
10772                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10773
10774                 /* Tell DC to do a full surface update every time there
10775                  * is a plane change. Inefficient, but works for now.
10776                  */
10777                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10778
10779                 *lock_and_validation_needed = true;
10780         }
10781
10782
10783         return ret;
10784 }
10785
10786 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10787                                        int *src_w, int *src_h)
10788 {
10789         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10790         case DRM_MODE_ROTATE_90:
10791         case DRM_MODE_ROTATE_270:
10792                 *src_w = plane_state->src_h >> 16;
10793                 *src_h = plane_state->src_w >> 16;
10794                 break;
10795         case DRM_MODE_ROTATE_0:
10796         case DRM_MODE_ROTATE_180:
10797         default:
10798                 *src_w = plane_state->src_w >> 16;
10799                 *src_h = plane_state->src_h >> 16;
10800                 break;
10801         }
10802 }
10803
10804 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10805                                 struct drm_crtc *crtc,
10806                                 struct drm_crtc_state *new_crtc_state)
10807 {
10808         struct drm_plane *cursor = crtc->cursor, *underlying;
10809         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10810         int i;
10811         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10812         int cursor_src_w, cursor_src_h;
10813         int underlying_src_w, underlying_src_h;
10814
10815         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10816          * cursor per pipe but it's going to inherit the scaling and
10817          * positioning from the underlying pipe. Check the cursor plane's
10818          * blending properties match the underlying planes'. */
10819
10820         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10821         if (!new_cursor_state || !new_cursor_state->fb) {
10822                 return 0;
10823         }
10824
10825         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10826         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10827         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10828
10829         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10830                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10831                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10832                         continue;
10833
10834                 /* Ignore disabled planes */
10835                 if (!new_underlying_state->fb)
10836                         continue;
10837
10838                 dm_get_oriented_plane_size(new_underlying_state,
10839                                            &underlying_src_w, &underlying_src_h);
10840                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10841                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10842
10843                 if (cursor_scale_w != underlying_scale_w ||
10844                     cursor_scale_h != underlying_scale_h) {
10845                         drm_dbg_atomic(crtc->dev,
10846                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10847                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10848                         return -EINVAL;
10849                 }
10850
10851                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10852                 if (new_underlying_state->crtc_x <= 0 &&
10853                     new_underlying_state->crtc_y <= 0 &&
10854                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10855                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10856                         break;
10857         }
10858
10859         return 0;
10860 }
10861
10862 #if defined(CONFIG_DRM_AMD_DC_DCN)
10863 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10864 {
10865         struct drm_connector *connector;
10866         struct drm_connector_state *conn_state, *old_conn_state;
10867         struct amdgpu_dm_connector *aconnector = NULL;
10868         int i;
10869         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10870                 if (!conn_state->crtc)
10871                         conn_state = old_conn_state;
10872
10873                 if (conn_state->crtc != crtc)
10874                         continue;
10875
10876                 aconnector = to_amdgpu_dm_connector(connector);
10877                 if (!aconnector->port || !aconnector->mst_port)
10878                         aconnector = NULL;
10879                 else
10880                         break;
10881         }
10882
10883         if (!aconnector)
10884                 return 0;
10885
10886         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10887 }
10888 #endif
10889
10890 /**
10891  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10892  * @dev: The DRM device
10893  * @state: The atomic state to commit
10894  *
10895  * Validate that the given atomic state is programmable by DC into hardware.
10896  * This involves constructing a &struct dc_state reflecting the new hardware
10897  * state we wish to commit, then querying DC to see if it is programmable. It's
10898  * important not to modify the existing DC state. Otherwise, atomic_check
10899  * may unexpectedly commit hardware changes.
10900  *
10901  * When validating the DC state, it's important that the right locks are
10902  * acquired. For full updates case which removes/adds/updates streams on one
10903  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10904  * that any such full update commit will wait for completion of any outstanding
10905  * flip using DRMs synchronization events.
10906  *
10907  * Note that DM adds the affected connectors for all CRTCs in state, when that
10908  * might not seem necessary. This is because DC stream creation requires the
10909  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10910  * be possible but non-trivial - a possible TODO item.
10911  *
10912  * Return: -Error code if validation failed.
10913  */
10914 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10915                                   struct drm_atomic_state *state)
10916 {
10917         struct amdgpu_device *adev = drm_to_adev(dev);
10918         struct dm_atomic_state *dm_state = NULL;
10919         struct dc *dc = adev->dm.dc;
10920         struct drm_connector *connector;
10921         struct drm_connector_state *old_con_state, *new_con_state;
10922         struct drm_crtc *crtc;
10923         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10924         struct drm_plane *plane;
10925         struct drm_plane_state *old_plane_state, *new_plane_state;
10926         enum dc_status status;
10927         int ret, i;
10928         bool lock_and_validation_needed = false;
10929         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10930 #if defined(CONFIG_DRM_AMD_DC_DCN)
10931         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10932         struct drm_dp_mst_topology_state *mst_state;
10933         struct drm_dp_mst_topology_mgr *mgr;
10934 #endif
10935
10936         trace_amdgpu_dm_atomic_check_begin(state);
10937
10938         ret = drm_atomic_helper_check_modeset(dev, state);
10939         if (ret) {
10940                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10941                 goto fail;
10942         }
10943
10944         /* Check connector changes */
10945         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10946                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10947                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10948
10949                 /* Skip connectors that are disabled or part of modeset already. */
10950                 if (!old_con_state->crtc && !new_con_state->crtc)
10951                         continue;
10952
10953                 if (!new_con_state->crtc)
10954                         continue;
10955
10956                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10957                 if (IS_ERR(new_crtc_state)) {
10958                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10959                         ret = PTR_ERR(new_crtc_state);
10960                         goto fail;
10961                 }
10962
10963                 if (dm_old_con_state->abm_level !=
10964                     dm_new_con_state->abm_level)
10965                         new_crtc_state->connectors_changed = true;
10966         }
10967
10968 #if defined(CONFIG_DRM_AMD_DC_DCN)
10969         if (dc_resource_is_dsc_encoding_supported(dc)) {
10970                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10971                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10972                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10973                                 if (ret) {
10974                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10975                                         goto fail;
10976                                 }
10977                         }
10978                 }
10979                 pre_validate_dsc(state, &dm_state, vars);
10980         }
10981 #endif
10982         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10983                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10984
10985                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10986                     !new_crtc_state->color_mgmt_changed &&
10987                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10988                         dm_old_crtc_state->dsc_force_changed == false)
10989                         continue;
10990
10991                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10992                 if (ret) {
10993                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10994                         goto fail;
10995                 }
10996
10997                 if (!new_crtc_state->enable)
10998                         continue;
10999
11000                 ret = drm_atomic_add_affected_connectors(state, crtc);
11001                 if (ret) {
11002                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11003                         goto fail;
11004                 }
11005
11006                 ret = drm_atomic_add_affected_planes(state, crtc);
11007                 if (ret) {
11008                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11009                         goto fail;
11010                 }
11011
11012                 if (dm_old_crtc_state->dsc_force_changed)
11013                         new_crtc_state->mode_changed = true;
11014         }
11015
11016         /*
11017          * Add all primary and overlay planes on the CRTC to the state
11018          * whenever a plane is enabled to maintain correct z-ordering
11019          * and to enable fast surface updates.
11020          */
11021         drm_for_each_crtc(crtc, dev) {
11022                 bool modified = false;
11023
11024                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11025                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11026                                 continue;
11027
11028                         if (new_plane_state->crtc == crtc ||
11029                             old_plane_state->crtc == crtc) {
11030                                 modified = true;
11031                                 break;
11032                         }
11033                 }
11034
11035                 if (!modified)
11036                         continue;
11037
11038                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11039                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11040                                 continue;
11041
11042                         new_plane_state =
11043                                 drm_atomic_get_plane_state(state, plane);
11044
11045                         if (IS_ERR(new_plane_state)) {
11046                                 ret = PTR_ERR(new_plane_state);
11047                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11048                                 goto fail;
11049                         }
11050                 }
11051         }
11052
11053         /* Remove exiting planes if they are modified */
11054         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11055                 ret = dm_update_plane_state(dc, state, plane,
11056                                             old_plane_state,
11057                                             new_plane_state,
11058                                             false,
11059                                             &lock_and_validation_needed);
11060                 if (ret) {
11061                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11062                         goto fail;
11063                 }
11064         }
11065
11066         /* Disable all crtcs which require disable */
11067         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11068                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11069                                            old_crtc_state,
11070                                            new_crtc_state,
11071                                            false,
11072                                            &lock_and_validation_needed);
11073                 if (ret) {
11074                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11075                         goto fail;
11076                 }
11077         }
11078
11079         /* Enable all crtcs which require enable */
11080         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11081                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11082                                            old_crtc_state,
11083                                            new_crtc_state,
11084                                            true,
11085                                            &lock_and_validation_needed);
11086                 if (ret) {
11087                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11088                         goto fail;
11089                 }
11090         }
11091
11092         /* Add new/modified planes */
11093         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11094                 ret = dm_update_plane_state(dc, state, plane,
11095                                             old_plane_state,
11096                                             new_plane_state,
11097                                             true,
11098                                             &lock_and_validation_needed);
11099                 if (ret) {
11100                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11101                         goto fail;
11102                 }
11103         }
11104
11105         /* Run this here since we want to validate the streams we created */
11106         ret = drm_atomic_helper_check_planes(dev, state);
11107         if (ret) {
11108                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11109                 goto fail;
11110         }
11111
11112         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11113                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11114                 if (dm_new_crtc_state->mpo_requested)
11115                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11116         }
11117
11118         /* Check cursor planes scaling */
11119         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11120                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11121                 if (ret) {
11122                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11123                         goto fail;
11124                 }
11125         }
11126
11127         if (state->legacy_cursor_update) {
11128                 /*
11129                  * This is a fast cursor update coming from the plane update
11130                  * helper, check if it can be done asynchronously for better
11131                  * performance.
11132                  */
11133                 state->async_update =
11134                         !drm_atomic_helper_async_check(dev, state);
11135
11136                 /*
11137                  * Skip the remaining global validation if this is an async
11138                  * update. Cursor updates can be done without affecting
11139                  * state or bandwidth calcs and this avoids the performance
11140                  * penalty of locking the private state object and
11141                  * allocating a new dc_state.
11142                  */
11143                 if (state->async_update)
11144                         return 0;
11145         }
11146
11147         /* Check scaling and underscan changes*/
11148         /* TODO Removed scaling changes validation due to inability to commit
11149          * new stream into context w\o causing full reset. Need to
11150          * decide how to handle.
11151          */
11152         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11153                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11154                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11155                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11156
11157                 /* Skip any modesets/resets */
11158                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11159                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11160                         continue;
11161
11162                 /* Skip any thing not scale or underscan changes */
11163                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11164                         continue;
11165
11166                 lock_and_validation_needed = true;
11167         }
11168
11169 #if defined(CONFIG_DRM_AMD_DC_DCN)
11170         /* set the slot info for each mst_state based on the link encoding format */
11171         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11172                 struct amdgpu_dm_connector *aconnector;
11173                 struct drm_connector *connector;
11174                 struct drm_connector_list_iter iter;
11175                 u8 link_coding_cap;
11176
11177                 if (!mgr->mst_state )
11178                         continue;
11179
11180                 drm_connector_list_iter_begin(dev, &iter);
11181                 drm_for_each_connector_iter(connector, &iter) {
11182                         int id = connector->index;
11183
11184                         if (id == mst_state->mgr->conn_base_id) {
11185                                 aconnector = to_amdgpu_dm_connector(connector);
11186                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11187                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11188
11189                                 break;
11190                         }
11191                 }
11192                 drm_connector_list_iter_end(&iter);
11193
11194         }
11195 #endif
11196         /**
11197          * Streams and planes are reset when there are changes that affect
11198          * bandwidth. Anything that affects bandwidth needs to go through
11199          * DC global validation to ensure that the configuration can be applied
11200          * to hardware.
11201          *
11202          * We have to currently stall out here in atomic_check for outstanding
11203          * commits to finish in this case because our IRQ handlers reference
11204          * DRM state directly - we can end up disabling interrupts too early
11205          * if we don't.
11206          *
11207          * TODO: Remove this stall and drop DM state private objects.
11208          */
11209         if (lock_and_validation_needed) {
11210                 ret = dm_atomic_get_state(state, &dm_state);
11211                 if (ret) {
11212                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11213                         goto fail;
11214                 }
11215
11216                 ret = do_aquire_global_lock(dev, state);
11217                 if (ret) {
11218                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11219                         goto fail;
11220                 }
11221
11222 #if defined(CONFIG_DRM_AMD_DC_DCN)
11223                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11224                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11225                         goto fail;
11226                 }
11227
11228                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11229                 if (ret) {
11230                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11231                         goto fail;
11232                 }
11233 #endif
11234
11235                 /*
11236                  * Perform validation of MST topology in the state:
11237                  * We need to perform MST atomic check before calling
11238                  * dc_validate_global_state(), or there is a chance
11239                  * to get stuck in an infinite loop and hang eventually.
11240                  */
11241                 ret = drm_dp_mst_atomic_check(state);
11242                 if (ret) {
11243                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11244                         goto fail;
11245                 }
11246                 status = dc_validate_global_state(dc, dm_state->context, true);
11247                 if (status != DC_OK) {
11248                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11249                                        dc_status_to_str(status), status);
11250                         ret = -EINVAL;
11251                         goto fail;
11252                 }
11253         } else {
11254                 /*
11255                  * The commit is a fast update. Fast updates shouldn't change
11256                  * the DC context, affect global validation, and can have their
11257                  * commit work done in parallel with other commits not touching
11258                  * the same resource. If we have a new DC context as part of
11259                  * the DM atomic state from validation we need to free it and
11260                  * retain the existing one instead.
11261                  *
11262                  * Furthermore, since the DM atomic state only contains the DC
11263                  * context and can safely be annulled, we can free the state
11264                  * and clear the associated private object now to free
11265                  * some memory and avoid a possible use-after-free later.
11266                  */
11267
11268                 for (i = 0; i < state->num_private_objs; i++) {
11269                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11270
11271                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11272                                 int j = state->num_private_objs-1;
11273
11274                                 dm_atomic_destroy_state(obj,
11275                                                 state->private_objs[i].state);
11276
11277                                 /* If i is not at the end of the array then the
11278                                  * last element needs to be moved to where i was
11279                                  * before the array can safely be truncated.
11280                                  */
11281                                 if (i != j)
11282                                         state->private_objs[i] =
11283                                                 state->private_objs[j];
11284
11285                                 state->private_objs[j].ptr = NULL;
11286                                 state->private_objs[j].state = NULL;
11287                                 state->private_objs[j].old_state = NULL;
11288                                 state->private_objs[j].new_state = NULL;
11289
11290                                 state->num_private_objs = j;
11291                                 break;
11292                         }
11293                 }
11294         }
11295
11296         /* Store the overall update type for use later in atomic check. */
11297         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11298                 struct dm_crtc_state *dm_new_crtc_state =
11299                         to_dm_crtc_state(new_crtc_state);
11300
11301                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11302                                                          UPDATE_TYPE_FULL :
11303                                                          UPDATE_TYPE_FAST;
11304         }
11305
11306         /* Must be success */
11307         WARN_ON(ret);
11308
11309         trace_amdgpu_dm_atomic_check_finish(state, ret);
11310
11311         return ret;
11312
11313 fail:
11314         if (ret == -EDEADLK)
11315                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11316         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11317                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11318         else
11319                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11320
11321         trace_amdgpu_dm_atomic_check_finish(state, ret);
11322
11323         return ret;
11324 }
11325
11326 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11327                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11328 {
11329         uint8_t dpcd_data;
11330         bool capable = false;
11331
11332         if (amdgpu_dm_connector->dc_link &&
11333                 dm_helpers_dp_read_dpcd(
11334                                 NULL,
11335                                 amdgpu_dm_connector->dc_link,
11336                                 DP_DOWN_STREAM_PORT_COUNT,
11337                                 &dpcd_data,
11338                                 sizeof(dpcd_data))) {
11339                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11340         }
11341
11342         return capable;
11343 }
11344
11345 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11346                 unsigned int offset,
11347                 unsigned int total_length,
11348                 uint8_t *data,
11349                 unsigned int length,
11350                 struct amdgpu_hdmi_vsdb_info *vsdb)
11351 {
11352         bool res;
11353         union dmub_rb_cmd cmd;
11354         struct dmub_cmd_send_edid_cea *input;
11355         struct dmub_cmd_edid_cea_output *output;
11356
11357         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11358                 return false;
11359
11360         memset(&cmd, 0, sizeof(cmd));
11361
11362         input = &cmd.edid_cea.data.input;
11363
11364         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11365         cmd.edid_cea.header.sub_type = 0;
11366         cmd.edid_cea.header.payload_bytes =
11367                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11368         input->offset = offset;
11369         input->length = length;
11370         input->cea_total_length = total_length;
11371         memcpy(input->payload, data, length);
11372
11373         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11374         if (!res) {
11375                 DRM_ERROR("EDID CEA parser failed\n");
11376                 return false;
11377         }
11378
11379         output = &cmd.edid_cea.data.output;
11380
11381         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11382                 if (!output->ack.success) {
11383                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11384                                         output->ack.offset);
11385                 }
11386         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11387                 if (!output->amd_vsdb.vsdb_found)
11388                         return false;
11389
11390                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11391                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11392                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11393                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11394         } else {
11395                 DRM_WARN("Unknown EDID CEA parser results\n");
11396                 return false;
11397         }
11398
11399         return true;
11400 }
11401
11402 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11403                 uint8_t *edid_ext, int len,
11404                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11405 {
11406         int i;
11407
11408         /* send extension block to DMCU for parsing */
11409         for (i = 0; i < len; i += 8) {
11410                 bool res;
11411                 int offset;
11412
11413                 /* send 8 bytes a time */
11414                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11415                         return false;
11416
11417                 if (i+8 == len) {
11418                         /* EDID block sent completed, expect result */
11419                         int version, min_rate, max_rate;
11420
11421                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11422                         if (res) {
11423                                 /* amd vsdb found */
11424                                 vsdb_info->freesync_supported = 1;
11425                                 vsdb_info->amd_vsdb_version = version;
11426                                 vsdb_info->min_refresh_rate_hz = min_rate;
11427                                 vsdb_info->max_refresh_rate_hz = max_rate;
11428                                 return true;
11429                         }
11430                         /* not amd vsdb */
11431                         return false;
11432                 }
11433
11434                 /* check for ack*/
11435                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11436                 if (!res)
11437                         return false;
11438         }
11439
11440         return false;
11441 }
11442
11443 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11444                 uint8_t *edid_ext, int len,
11445                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11446 {
11447         int i;
11448
11449         /* send extension block to DMCU for parsing */
11450         for (i = 0; i < len; i += 8) {
11451                 /* send 8 bytes a time */
11452                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11453                         return false;
11454         }
11455
11456         return vsdb_info->freesync_supported;
11457 }
11458
11459 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11460                 uint8_t *edid_ext, int len,
11461                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11462 {
11463         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11464
11465         if (adev->dm.dmub_srv)
11466                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11467         else
11468                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11469 }
11470
11471 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11472                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11473 {
11474         uint8_t *edid_ext = NULL;
11475         int i;
11476         bool valid_vsdb_found = false;
11477
11478         /*----- drm_find_cea_extension() -----*/
11479         /* No EDID or EDID extensions */
11480         if (edid == NULL || edid->extensions == 0)
11481                 return -ENODEV;
11482
11483         /* Find CEA extension */
11484         for (i = 0; i < edid->extensions; i++) {
11485                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11486                 if (edid_ext[0] == CEA_EXT)
11487                         break;
11488         }
11489
11490         if (i == edid->extensions)
11491                 return -ENODEV;
11492
11493         /*----- cea_db_offsets() -----*/
11494         if (edid_ext[0] != CEA_EXT)
11495                 return -ENODEV;
11496
11497         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11498
11499         return valid_vsdb_found ? i : -ENODEV;
11500 }
11501
11502 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11503                                         struct edid *edid)
11504 {
11505         int i = 0;
11506         struct detailed_timing *timing;
11507         struct detailed_non_pixel *data;
11508         struct detailed_data_monitor_range *range;
11509         struct amdgpu_dm_connector *amdgpu_dm_connector =
11510                         to_amdgpu_dm_connector(connector);
11511         struct dm_connector_state *dm_con_state = NULL;
11512         struct dc_sink *sink;
11513
11514         struct drm_device *dev = connector->dev;
11515         struct amdgpu_device *adev = drm_to_adev(dev);
11516         bool freesync_capable = false;
11517         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11518
11519         if (!connector->state) {
11520                 DRM_ERROR("%s - Connector has no state", __func__);
11521                 goto update;
11522         }
11523
11524         sink = amdgpu_dm_connector->dc_sink ?
11525                 amdgpu_dm_connector->dc_sink :
11526                 amdgpu_dm_connector->dc_em_sink;
11527
11528         if (!edid || !sink) {
11529                 dm_con_state = to_dm_connector_state(connector->state);
11530
11531                 amdgpu_dm_connector->min_vfreq = 0;
11532                 amdgpu_dm_connector->max_vfreq = 0;
11533                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11534                 connector->display_info.monitor_range.min_vfreq = 0;
11535                 connector->display_info.monitor_range.max_vfreq = 0;
11536                 freesync_capable = false;
11537
11538                 goto update;
11539         }
11540
11541         dm_con_state = to_dm_connector_state(connector->state);
11542
11543         if (!adev->dm.freesync_module)
11544                 goto update;
11545
11546
11547         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11548                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11549                 bool edid_check_required = false;
11550
11551                 if (edid) {
11552                         edid_check_required = is_dp_capable_without_timing_msa(
11553                                                 adev->dm.dc,
11554                                                 amdgpu_dm_connector);
11555                 }
11556
11557                 if (edid_check_required == true && (edid->version > 1 ||
11558                    (edid->version == 1 && edid->revision > 1))) {
11559                         for (i = 0; i < 4; i++) {
11560
11561                                 timing  = &edid->detailed_timings[i];
11562                                 data    = &timing->data.other_data;
11563                                 range   = &data->data.range;
11564                                 /*
11565                                  * Check if monitor has continuous frequency mode
11566                                  */
11567                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11568                                         continue;
11569                                 /*
11570                                  * Check for flag range limits only. If flag == 1 then
11571                                  * no additional timing information provided.
11572                                  * Default GTF, GTF Secondary curve and CVT are not
11573                                  * supported
11574                                  */
11575                                 if (range->flags != 1)
11576                                         continue;
11577
11578                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11579                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11580                                 amdgpu_dm_connector->pixel_clock_mhz =
11581                                         range->pixel_clock_mhz * 10;
11582
11583                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11584                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11585
11586                                 break;
11587                         }
11588
11589                         if (amdgpu_dm_connector->max_vfreq -
11590                             amdgpu_dm_connector->min_vfreq > 10) {
11591
11592                                 freesync_capable = true;
11593                         }
11594                 }
11595         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11596                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11597                 if (i >= 0 && vsdb_info.freesync_supported) {
11598                         timing  = &edid->detailed_timings[i];
11599                         data    = &timing->data.other_data;
11600
11601                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11602                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11603                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11604                                 freesync_capable = true;
11605
11606                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11607                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11608                 }
11609         }
11610
11611 update:
11612         if (dm_con_state)
11613                 dm_con_state->freesync_capable = freesync_capable;
11614
11615         if (connector->vrr_capable_property)
11616                 drm_connector_set_vrr_capable_property(connector,
11617                                                        freesync_capable);
11618 }
11619
11620 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11621 {
11622         struct amdgpu_device *adev = drm_to_adev(dev);
11623         struct dc *dc = adev->dm.dc;
11624         int i;
11625
11626         mutex_lock(&adev->dm.dc_lock);
11627         if (dc->current_state) {
11628                 for (i = 0; i < dc->current_state->stream_count; ++i)
11629                         dc->current_state->streams[i]
11630                                 ->triggered_crtc_reset.enabled =
11631                                 adev->dm.force_timing_sync;
11632
11633                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11634                 dc_trigger_sync(dc, dc->current_state);
11635         }
11636         mutex_unlock(&adev->dm.dc_lock);
11637 }
11638
11639 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11640                        uint32_t value, const char *func_name)
11641 {
11642 #ifdef DM_CHECK_ADDR_0
11643         if (address == 0) {
11644                 DC_ERR("invalid register write. address = 0");
11645                 return;
11646         }
11647 #endif
11648         cgs_write_register(ctx->cgs_device, address, value);
11649         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11650 }
11651
11652 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11653                           const char *func_name)
11654 {
11655         uint32_t value;
11656 #ifdef DM_CHECK_ADDR_0
11657         if (address == 0) {
11658                 DC_ERR("invalid register read; address = 0\n");
11659                 return 0;
11660         }
11661 #endif
11662
11663         if (ctx->dmub_srv &&
11664             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11665             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11666                 ASSERT(false);
11667                 return 0;
11668         }
11669
11670         value = cgs_read_register(ctx->cgs_device, address);
11671
11672         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11673
11674         return value;
11675 }
11676
11677 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11678                                                 struct dc_context *ctx,
11679                                                 uint8_t status_type,
11680                                                 uint32_t *operation_result)
11681 {
11682         struct amdgpu_device *adev = ctx->driver_context;
11683         int return_status = -1;
11684         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11685
11686         if (is_cmd_aux) {
11687                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11688                         return_status = p_notify->aux_reply.length;
11689                         *operation_result = p_notify->result;
11690                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11691                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11692                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11693                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11694                 } else {
11695                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11696                 }
11697         } else {
11698                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11699                         return_status = 0;
11700                         *operation_result = p_notify->sc_status;
11701                 } else {
11702                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11703                 }
11704         }
11705
11706         return return_status;
11707 }
11708
11709 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11710         unsigned int link_index, void *cmd_payload, void *operation_result)
11711 {
11712         struct amdgpu_device *adev = ctx->driver_context;
11713         int ret = 0;
11714
11715         if (is_cmd_aux) {
11716                 dc_process_dmub_aux_transfer_async(ctx->dc,
11717                         link_index, (struct aux_payload *)cmd_payload);
11718         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11719                                         (struct set_config_cmd_payload *)cmd_payload,
11720                                         adev->dm.dmub_notify)) {
11721                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11722                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11723                                         (uint32_t *)operation_result);
11724         }
11725
11726         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11727         if (ret == 0) {
11728                 DRM_ERROR("wait_for_completion_timeout timeout!");
11729                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11730                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11731                                 (uint32_t *)operation_result);
11732         }
11733
11734         if (is_cmd_aux) {
11735                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11736                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11737
11738                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11739                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11740                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11741                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11742                                        adev->dm.dmub_notify->aux_reply.length);
11743                         }
11744                 }
11745         }
11746
11747         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11748                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11749                         (uint32_t *)operation_result);
11750 }
11751
11752 /*
11753  * Check whether seamless boot is supported.
11754  *
11755  * So far we only support seamless boot on CHIP_VANGOGH.
11756  * If everything goes well, we may consider expanding
11757  * seamless boot to other ASICs.
11758  */
11759 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11760 {
11761         switch (adev->asic_type) {
11762         case CHIP_VANGOGH:
11763                 if (!adev->mman.keep_stolen_vga_memory)
11764                         return true;
11765                 break;
11766         default:
11767                 break;
11768         }
11769
11770         return false;
11771 }