drm/amd/display: For vblank_disable_immediate, check PSR is really used
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117
118 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147         switch (link->dpcd_caps.dongle_type) {
148         case DISPLAY_DONGLE_NONE:
149                 return DRM_MODE_SUBCONNECTOR_Native;
150         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151                 return DRM_MODE_SUBCONNECTOR_VGA;
152         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153         case DISPLAY_DONGLE_DP_DVI_DONGLE:
154                 return DRM_MODE_SUBCONNECTOR_DVID;
155         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_HDMIA;
158         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159         default:
160                 return DRM_MODE_SUBCONNECTOR_Unknown;
161         }
162 }
163
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166         struct dc_link *link = aconnector->dc_link;
167         struct drm_connector *connector = &aconnector->base;
168         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171                 return;
172
173         if (aconnector->dc_sink)
174                 subconnector = get_subconnector_type(link);
175
176         drm_object_property_set_value(&connector->base,
177                         connector->dev->mode_config.dp_subconnector_property,
178                         subconnector);
179 }
180
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193                                 struct drm_plane *plane,
194                                 unsigned long possible_crtcs,
195                                 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197                                struct drm_plane *plane,
198                                uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
201                                     uint32_t link_index,
202                                     struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204                                   struct amdgpu_encoder *aencoder,
205                                   uint32_t link_index);
206
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212                                   struct drm_atomic_state *state);
213
214 static void handle_cursor_update(struct drm_plane *plane,
215                                  struct drm_plane_state *old_plane_state);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225                                  struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241         if (crtc >= adev->mode_info.num_crtc)
242                 return 0;
243         else {
244                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
246                 if (acrtc->dm_irq_params.stream == NULL) {
247                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248                                   crtc);
249                         return 0;
250                 }
251
252                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253         }
254 }
255
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257                                   u32 *vbl, u32 *position)
258 {
259         uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
261         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262                 return -EINVAL;
263         else {
264                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
266                 if (acrtc->dm_irq_params.stream ==  NULL) {
267                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268                                   crtc);
269                         return 0;
270                 }
271
272                 /*
273                  * TODO rework base driver to use values directly.
274                  * for now parse it back into reg-format
275                  */
276                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277                                          &v_blank_start,
278                                          &v_blank_end,
279                                          &h_position,
280                                          &v_position);
281
282                 *position = v_position | (h_position << 16);
283                 *vbl = v_blank_start | (v_blank_end << 16);
284         }
285
286         return 0;
287 }
288
289 static bool dm_is_idle(void *handle)
290 {
291         /* XXX todo */
292         return true;
293 }
294
295 static int dm_wait_for_idle(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static bool dm_check_soft_reset(void *handle)
302 {
303         return false;
304 }
305
306 static int dm_soft_reset(void *handle)
307 {
308         /* XXX todo */
309         return 0;
310 }
311
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314                      int otg_inst)
315 {
316         struct drm_device *dev = adev_to_drm(adev);
317         struct drm_crtc *crtc;
318         struct amdgpu_crtc *amdgpu_crtc;
319
320         if (WARN_ON(otg_inst == -1))
321                 return adev->mode_info.crtcs[0];
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367         struct amdgpu_crtc *amdgpu_crtc;
368         struct common_irq_params *irq_params = interrupt_params;
369         struct amdgpu_device *adev = irq_params->adev;
370         unsigned long flags;
371         struct drm_pending_vblank_event *e;
372         uint32_t vpos, hpos, v_blank_start, v_blank_end;
373         bool vrr_active;
374
375         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377         /* IRQ could occur when in initial stage */
378         /* TODO work and BO cleanup */
379         if (amdgpu_crtc == NULL) {
380                 DC_LOG_PFLIP("CRTC is null, returning.\n");
381                 return;
382         }
383
384         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388                                                  amdgpu_crtc->pflip_status,
389                                                  AMDGPU_FLIP_SUBMITTED,
390                                                  amdgpu_crtc->crtc_id,
391                                                  amdgpu_crtc);
392                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393                 return;
394         }
395
396         /* page flip completed. */
397         e = amdgpu_crtc->event;
398         amdgpu_crtc->event = NULL;
399
400         WARN_ON(!e);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625
626 /**
627  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
636                                         struct dmub_notification *notify)
637 {
638         if (adev->dm.dmub_notify)
639                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
640         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
641                 complete(&adev->dm.dmub_aux_transfer_done);
642 }
643
644 /**
645  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
646  * @adev: amdgpu_device pointer
647  * @notify: dmub notification structure
648  *
649  * Dmub Hpd interrupt processing callback. Gets displayindex through the
650  * ink index and calls helper to do the processing.
651  */
652 static void dmub_hpd_callback(struct amdgpu_device *adev,
653                               struct dmub_notification *notify)
654 {
655         struct amdgpu_dm_connector *aconnector;
656         struct amdgpu_dm_connector *hpd_aconnector = NULL;
657         struct drm_connector *connector;
658         struct drm_connector_list_iter iter;
659         struct dc_link *link;
660         uint8_t link_index = 0;
661         struct drm_device *dev;
662
663         if (adev == NULL)
664                 return;
665
666         if (notify == NULL) {
667                 DRM_ERROR("DMUB HPD callback notification was NULL");
668                 return;
669         }
670
671         if (notify->link_index > adev->dm.dc->link_count) {
672                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
673                 return;
674         }
675
676         link_index = notify->link_index;
677         link = adev->dm.dc->links[link_index];
678         dev = adev->dm.ddev;
679
680         drm_connector_list_iter_begin(dev, &iter);
681         drm_for_each_connector_iter(connector, &iter) {
682                 aconnector = to_amdgpu_dm_connector(connector);
683                 if (link && aconnector->dc_link == link) {
684                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
685                         hpd_aconnector = aconnector;
686                         break;
687                 }
688         }
689         drm_connector_list_iter_end(&iter);
690
691         if (hpd_aconnector) {
692                 if (notify->type == DMUB_NOTIFICATION_HPD)
693                         handle_hpd_irq_helper(hpd_aconnector);
694                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
695                         handle_hpd_rx_irq(hpd_aconnector);
696         }
697 }
698
699 /**
700  * register_dmub_notify_callback - Sets callback for DMUB notify
701  * @adev: amdgpu_device pointer
702  * @type: Type of dmub notification
703  * @callback: Dmub interrupt callback function
704  * @dmub_int_thread_offload: offload indicator
705  *
706  * API to register a dmub callback handler for a dmub notification
707  * Also sets indicator whether callback processing to be offloaded.
708  * to dmub interrupt handling thread
709  * Return: true if successfully registered, false if there is existing registration
710  */
711 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
712                                           enum dmub_notification_type type,
713                                           dmub_notify_interrupt_callback_t callback,
714                                           bool dmub_int_thread_offload)
715 {
716         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
717                 adev->dm.dmub_callback[type] = callback;
718                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
719         } else
720                 return false;
721
722         return true;
723 }
724
725 static void dm_handle_hpd_work(struct work_struct *work)
726 {
727         struct dmub_hpd_work *dmub_hpd_wrk;
728
729         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
730
731         if (!dmub_hpd_wrk->dmub_notify) {
732                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
733                 return;
734         }
735
736         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
737                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
738                 dmub_hpd_wrk->dmub_notify);
739         }
740
741         kfree(dmub_hpd_wrk->dmub_notify);
742         kfree(dmub_hpd_wrk);
743
744 }
745
746 #define DMUB_TRACE_MAX_READ 64
747 /**
748  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
749  * @interrupt_params: used for determining the Outbox instance
750  *
751  * Handles the Outbox Interrupt
752  * event handler.
753  */
754 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
755 {
756         struct dmub_notification notify;
757         struct common_irq_params *irq_params = interrupt_params;
758         struct amdgpu_device *adev = irq_params->adev;
759         struct amdgpu_display_manager *dm = &adev->dm;
760         struct dmcub_trace_buf_entry entry = { 0 };
761         uint32_t count = 0;
762         struct dmub_hpd_work *dmub_hpd_wrk;
763         struct dc_link *plink = NULL;
764
765         if (dc_enable_dmub_notifications(adev->dm.dc) &&
766                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
767
768                 do {
769                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
770                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
771                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
772                                 continue;
773                         }
774                         if (!dm->dmub_callback[notify.type]) {
775                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
776                                 continue;
777                         }
778                         if (dm->dmub_thread_offload[notify.type] == true) {
779                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
780                                 if (!dmub_hpd_wrk) {
781                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
782                                         return;
783                                 }
784                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
785                                 if (!dmub_hpd_wrk->dmub_notify) {
786                                         kfree(dmub_hpd_wrk);
787                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
788                                         return;
789                                 }
790                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
791                                 if (dmub_hpd_wrk->dmub_notify)
792                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
793                                 dmub_hpd_wrk->adev = adev;
794                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
795                                         plink = adev->dm.dc->links[notify.link_index];
796                                         if (plink) {
797                                                 plink->hpd_status =
798                                                         notify.hpd_status == DP_HPD_PLUG;
799                                         }
800                                 }
801                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
802                         } else {
803                                 dm->dmub_callback[notify.type](adev, &notify);
804                         }
805                 } while (notify.pending_notification);
806         }
807
808
809         do {
810                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
811                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
812                                                         entry.param0, entry.param1);
813
814                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
815                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
816                 } else
817                         break;
818
819                 count++;
820
821         } while (count <= DMUB_TRACE_MAX_READ);
822
823         if (count > DMUB_TRACE_MAX_READ)
824                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
825 }
826 #endif /* CONFIG_DRM_AMD_DC_DCN */
827
828 static int dm_set_clockgating_state(void *handle,
829                   enum amd_clockgating_state state)
830 {
831         return 0;
832 }
833
834 static int dm_set_powergating_state(void *handle,
835                   enum amd_powergating_state state)
836 {
837         return 0;
838 }
839
840 /* Prototypes of private functions */
841 static int dm_early_init(void* handle);
842
843 /* Allocate memory for FBC compressed data  */
844 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
845 {
846         struct drm_device *dev = connector->dev;
847         struct amdgpu_device *adev = drm_to_adev(dev);
848         struct dm_compressor_info *compressor = &adev->dm.compressor;
849         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
850         struct drm_display_mode *mode;
851         unsigned long max_size = 0;
852
853         if (adev->dm.dc->fbc_compressor == NULL)
854                 return;
855
856         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
857                 return;
858
859         if (compressor->bo_ptr)
860                 return;
861
862
863         list_for_each_entry(mode, &connector->modes, head) {
864                 if (max_size < mode->htotal * mode->vtotal)
865                         max_size = mode->htotal * mode->vtotal;
866         }
867
868         if (max_size) {
869                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
870                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
871                             &compressor->gpu_addr, &compressor->cpu_addr);
872
873                 if (r)
874                         DRM_ERROR("DM: Failed to initialize FBC\n");
875                 else {
876                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
877                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
878                 }
879
880         }
881
882 }
883
884 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
885                                           int pipe, bool *enabled,
886                                           unsigned char *buf, int max_bytes)
887 {
888         struct drm_device *dev = dev_get_drvdata(kdev);
889         struct amdgpu_device *adev = drm_to_adev(dev);
890         struct drm_connector *connector;
891         struct drm_connector_list_iter conn_iter;
892         struct amdgpu_dm_connector *aconnector;
893         int ret = 0;
894
895         *enabled = false;
896
897         mutex_lock(&adev->dm.audio_lock);
898
899         drm_connector_list_iter_begin(dev, &conn_iter);
900         drm_for_each_connector_iter(connector, &conn_iter) {
901                 aconnector = to_amdgpu_dm_connector(connector);
902                 if (aconnector->audio_inst != port)
903                         continue;
904
905                 *enabled = true;
906                 ret = drm_eld_size(connector->eld);
907                 memcpy(buf, connector->eld, min(max_bytes, ret));
908
909                 break;
910         }
911         drm_connector_list_iter_end(&conn_iter);
912
913         mutex_unlock(&adev->dm.audio_lock);
914
915         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
916
917         return ret;
918 }
919
920 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
921         .get_eld = amdgpu_dm_audio_component_get_eld,
922 };
923
924 static int amdgpu_dm_audio_component_bind(struct device *kdev,
925                                        struct device *hda_kdev, void *data)
926 {
927         struct drm_device *dev = dev_get_drvdata(kdev);
928         struct amdgpu_device *adev = drm_to_adev(dev);
929         struct drm_audio_component *acomp = data;
930
931         acomp->ops = &amdgpu_dm_audio_component_ops;
932         acomp->dev = kdev;
933         adev->dm.audio_component = acomp;
934
935         return 0;
936 }
937
938 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
939                                           struct device *hda_kdev, void *data)
940 {
941         struct drm_device *dev = dev_get_drvdata(kdev);
942         struct amdgpu_device *adev = drm_to_adev(dev);
943         struct drm_audio_component *acomp = data;
944
945         acomp->ops = NULL;
946         acomp->dev = NULL;
947         adev->dm.audio_component = NULL;
948 }
949
950 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
951         .bind   = amdgpu_dm_audio_component_bind,
952         .unbind = amdgpu_dm_audio_component_unbind,
953 };
954
955 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
956 {
957         int i, ret;
958
959         if (!amdgpu_audio)
960                 return 0;
961
962         adev->mode_info.audio.enabled = true;
963
964         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
965
966         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
967                 adev->mode_info.audio.pin[i].channels = -1;
968                 adev->mode_info.audio.pin[i].rate = -1;
969                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
970                 adev->mode_info.audio.pin[i].status_bits = 0;
971                 adev->mode_info.audio.pin[i].category_code = 0;
972                 adev->mode_info.audio.pin[i].connected = false;
973                 adev->mode_info.audio.pin[i].id =
974                         adev->dm.dc->res_pool->audios[i]->inst;
975                 adev->mode_info.audio.pin[i].offset = 0;
976         }
977
978         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
979         if (ret < 0)
980                 return ret;
981
982         adev->dm.audio_registered = true;
983
984         return 0;
985 }
986
987 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
988 {
989         if (!amdgpu_audio)
990                 return;
991
992         if (!adev->mode_info.audio.enabled)
993                 return;
994
995         if (adev->dm.audio_registered) {
996                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
997                 adev->dm.audio_registered = false;
998         }
999
1000         /* TODO: Disable audio? */
1001
1002         adev->mode_info.audio.enabled = false;
1003 }
1004
1005 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1006 {
1007         struct drm_audio_component *acomp = adev->dm.audio_component;
1008
1009         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1010                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1011
1012                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1013                                                  pin, -1);
1014         }
1015 }
1016
1017 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1018 {
1019         const struct dmcub_firmware_header_v1_0 *hdr;
1020         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1021         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1022         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1023         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1024         struct abm *abm = adev->dm.dc->res_pool->abm;
1025         struct dmub_srv_hw_params hw_params;
1026         enum dmub_status status;
1027         const unsigned char *fw_inst_const, *fw_bss_data;
1028         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1029         bool has_hw_support;
1030
1031         if (!dmub_srv)
1032                 /* DMUB isn't supported on the ASIC. */
1033                 return 0;
1034
1035         if (!fb_info) {
1036                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1037                 return -EINVAL;
1038         }
1039
1040         if (!dmub_fw) {
1041                 /* Firmware required for DMUB support. */
1042                 DRM_ERROR("No firmware provided for DMUB.\n");
1043                 return -EINVAL;
1044         }
1045
1046         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1047         if (status != DMUB_STATUS_OK) {
1048                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1049                 return -EINVAL;
1050         }
1051
1052         if (!has_hw_support) {
1053                 DRM_INFO("DMUB unsupported on ASIC\n");
1054                 return 0;
1055         }
1056
1057         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1058         status = dmub_srv_hw_reset(dmub_srv);
1059         if (status != DMUB_STATUS_OK)
1060                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1061
1062         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1063
1064         fw_inst_const = dmub_fw->data +
1065                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1066                         PSP_HEADER_BYTES;
1067
1068         fw_bss_data = dmub_fw->data +
1069                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070                       le32_to_cpu(hdr->inst_const_bytes);
1071
1072         /* Copy firmware and bios info into FB memory. */
1073         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1074                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1075
1076         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1077
1078         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1079          * amdgpu_ucode_init_single_fw will load dmub firmware
1080          * fw_inst_const part to cw0; otherwise, the firmware back door load
1081          * will be done by dm_dmub_hw_init
1082          */
1083         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1084                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1085                                 fw_inst_const_size);
1086         }
1087
1088         if (fw_bss_data_size)
1089                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1090                        fw_bss_data, fw_bss_data_size);
1091
1092         /* Copy firmware bios info into FB memory. */
1093         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1094                adev->bios_size);
1095
1096         /* Reset regions that need to be reset. */
1097         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1098         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1099
1100         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1101                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1102
1103         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1104                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1105
1106         /* Initialize hardware. */
1107         memset(&hw_params, 0, sizeof(hw_params));
1108         hw_params.fb_base = adev->gmc.fb_start;
1109         hw_params.fb_offset = adev->gmc.aper_base;
1110
1111         /* backdoor load firmware and trigger dmub running */
1112         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1113                 hw_params.load_inst_const = true;
1114
1115         if (dmcu)
1116                 hw_params.psp_version = dmcu->psp_version;
1117
1118         for (i = 0; i < fb_info->num_fb; ++i)
1119                 hw_params.fb[i] = &fb_info->fb[i];
1120
1121         switch (adev->ip_versions[DCE_HWIP][0]) {
1122         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1123                 hw_params.dpia_supported = true;
1124 #if defined(CONFIG_DRM_AMD_DC_DCN)
1125                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1126 #endif
1127                 break;
1128         default:
1129                 break;
1130         }
1131
1132         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1133         if (status != DMUB_STATUS_OK) {
1134                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1135                 return -EINVAL;
1136         }
1137
1138         /* Wait for firmware load to finish. */
1139         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1140         if (status != DMUB_STATUS_OK)
1141                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1142
1143         /* Init DMCU and ABM if available. */
1144         if (dmcu && abm) {
1145                 dmcu->funcs->dmcu_init(dmcu);
1146                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1147         }
1148
1149         if (!adev->dm.dc->ctx->dmub_srv)
1150                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1151         if (!adev->dm.dc->ctx->dmub_srv) {
1152                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1153                 return -ENOMEM;
1154         }
1155
1156         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1157                  adev->dm.dmcub_fw_version);
1158
1159         return 0;
1160 }
1161
1162 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1163 {
1164         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1165         enum dmub_status status;
1166         bool init;
1167
1168         if (!dmub_srv) {
1169                 /* DMUB isn't supported on the ASIC. */
1170                 return;
1171         }
1172
1173         status = dmub_srv_is_hw_init(dmub_srv, &init);
1174         if (status != DMUB_STATUS_OK)
1175                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1176
1177         if (status == DMUB_STATUS_OK && init) {
1178                 /* Wait for firmware load to finish. */
1179                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1180                 if (status != DMUB_STATUS_OK)
1181                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1182         } else {
1183                 /* Perform the full hardware initialization. */
1184                 dm_dmub_hw_init(adev);
1185         }
1186 }
1187
1188 #if defined(CONFIG_DRM_AMD_DC_DCN)
1189 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1190 {
1191         uint64_t pt_base;
1192         uint32_t logical_addr_low;
1193         uint32_t logical_addr_high;
1194         uint32_t agp_base, agp_bot, agp_top;
1195         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1196
1197         memset(pa_config, 0, sizeof(*pa_config));
1198
1199         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1200         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1201
1202         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1203                 /*
1204                  * Raven2 has a HW issue that it is unable to use the vram which
1205                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1206                  * workaround that increase system aperture high address (add 1)
1207                  * to get rid of the VM fault and hardware hang.
1208                  */
1209                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1210         else
1211                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1212
1213         agp_base = 0;
1214         agp_bot = adev->gmc.agp_start >> 24;
1215         agp_top = adev->gmc.agp_end >> 24;
1216
1217
1218         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1219         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1220         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1221         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1222         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1223         page_table_base.low_part = lower_32_bits(pt_base);
1224
1225         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1226         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1227
1228         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1229         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1230         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1231
1232         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1233         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1234         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1235
1236         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1237         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1238         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1239
1240         pa_config->is_hvm_enabled = 0;
1241
1242 }
1243 #endif
1244 #if defined(CONFIG_DRM_AMD_DC_DCN)
1245 static void vblank_control_worker(struct work_struct *work)
1246 {
1247         struct vblank_control_work *vblank_work =
1248                 container_of(work, struct vblank_control_work, work);
1249         struct amdgpu_display_manager *dm = vblank_work->dm;
1250
1251         mutex_lock(&dm->dc_lock);
1252
1253         if (vblank_work->enable)
1254                 dm->active_vblank_irq_count++;
1255         else if(dm->active_vblank_irq_count)
1256                 dm->active_vblank_irq_count--;
1257
1258         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1259
1260         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1261
1262         /* Control PSR based on vblank requirements from OS */
1263         if (vblank_work->stream && vblank_work->stream->link) {
1264                 if (vblank_work->enable) {
1265                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1266                                 amdgpu_dm_psr_disable(vblank_work->stream);
1267                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1268                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1269                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1270                         amdgpu_dm_psr_enable(vblank_work->stream);
1271                 }
1272         }
1273
1274         mutex_unlock(&dm->dc_lock);
1275
1276         dc_stream_release(vblank_work->stream);
1277
1278         kfree(vblank_work);
1279 }
1280
1281 #endif
1282
1283 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1284 {
1285         struct hpd_rx_irq_offload_work *offload_work;
1286         struct amdgpu_dm_connector *aconnector;
1287         struct dc_link *dc_link;
1288         struct amdgpu_device *adev;
1289         enum dc_connection_type new_connection_type = dc_connection_none;
1290         unsigned long flags;
1291
1292         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1293         aconnector = offload_work->offload_wq->aconnector;
1294
1295         if (!aconnector) {
1296                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1297                 goto skip;
1298         }
1299
1300         adev = drm_to_adev(aconnector->base.dev);
1301         dc_link = aconnector->dc_link;
1302
1303         mutex_lock(&aconnector->hpd_lock);
1304         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1305                 DRM_ERROR("KMS: Failed to detect connector\n");
1306         mutex_unlock(&aconnector->hpd_lock);
1307
1308         if (new_connection_type == dc_connection_none)
1309                 goto skip;
1310
1311         if (amdgpu_in_reset(adev))
1312                 goto skip;
1313
1314         mutex_lock(&adev->dm.dc_lock);
1315         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1316                 dc_link_dp_handle_automated_test(dc_link);
1317         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1318                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1319                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1320                 dc_link_dp_handle_link_loss(dc_link);
1321                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1322                 offload_work->offload_wq->is_handling_link_loss = false;
1323                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1324         }
1325         mutex_unlock(&adev->dm.dc_lock);
1326
1327 skip:
1328         kfree(offload_work);
1329
1330 }
1331
1332 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1333 {
1334         int max_caps = dc->caps.max_links;
1335         int i = 0;
1336         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1337
1338         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1339
1340         if (!hpd_rx_offload_wq)
1341                 return NULL;
1342
1343
1344         for (i = 0; i < max_caps; i++) {
1345                 hpd_rx_offload_wq[i].wq =
1346                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1347
1348                 if (hpd_rx_offload_wq[i].wq == NULL) {
1349                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1350                         return NULL;
1351                 }
1352
1353                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1354         }
1355
1356         return hpd_rx_offload_wq;
1357 }
1358
1359 struct amdgpu_stutter_quirk {
1360         u16 chip_vendor;
1361         u16 chip_device;
1362         u16 subsys_vendor;
1363         u16 subsys_device;
1364         u8 revision;
1365 };
1366
1367 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1368         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1369         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1370         { 0, 0, 0, 0, 0 },
1371 };
1372
1373 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1374 {
1375         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1376
1377         while (p && p->chip_device != 0) {
1378                 if (pdev->vendor == p->chip_vendor &&
1379                     pdev->device == p->chip_device &&
1380                     pdev->subsystem_vendor == p->subsys_vendor &&
1381                     pdev->subsystem_device == p->subsys_device &&
1382                     pdev->revision == p->revision) {
1383                         return true;
1384                 }
1385                 ++p;
1386         }
1387         return false;
1388 }
1389
1390 static int amdgpu_dm_init(struct amdgpu_device *adev)
1391 {
1392         struct dc_init_data init_data;
1393 #ifdef CONFIG_DRM_AMD_DC_HDCP
1394         struct dc_callback_init init_params;
1395 #endif
1396         int r;
1397
1398         adev->dm.ddev = adev_to_drm(adev);
1399         adev->dm.adev = adev;
1400
1401         /* Zero all the fields */
1402         memset(&init_data, 0, sizeof(init_data));
1403 #ifdef CONFIG_DRM_AMD_DC_HDCP
1404         memset(&init_params, 0, sizeof(init_params));
1405 #endif
1406
1407         mutex_init(&adev->dm.dc_lock);
1408         mutex_init(&adev->dm.audio_lock);
1409 #if defined(CONFIG_DRM_AMD_DC_DCN)
1410         spin_lock_init(&adev->dm.vblank_lock);
1411 #endif
1412
1413         if(amdgpu_dm_irq_init(adev)) {
1414                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1415                 goto error;
1416         }
1417
1418         init_data.asic_id.chip_family = adev->family;
1419
1420         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1421         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1422         init_data.asic_id.chip_id = adev->pdev->device;
1423
1424         init_data.asic_id.vram_width = adev->gmc.vram_width;
1425         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1426         init_data.asic_id.atombios_base_address =
1427                 adev->mode_info.atom_context->bios;
1428
1429         init_data.driver = adev;
1430
1431         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1432
1433         if (!adev->dm.cgs_device) {
1434                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1435                 goto error;
1436         }
1437
1438         init_data.cgs_device = adev->dm.cgs_device;
1439
1440         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1441
1442         switch (adev->asic_type) {
1443         case CHIP_CARRIZO:
1444         case CHIP_STONEY:
1445                 init_data.flags.gpu_vm_support = true;
1446                 break;
1447         default:
1448                 switch (adev->ip_versions[DCE_HWIP][0]) {
1449                 case IP_VERSION(2, 1, 0):
1450                         init_data.flags.gpu_vm_support = true;
1451                         switch (adev->dm.dmcub_fw_version) {
1452                         case 0: /* development */
1453                         case 0x1: /* linux-firmware.git hash 6d9f399 */
1454                         case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1455                                 init_data.flags.disable_dmcu = false;
1456                                 break;
1457                         default:
1458                                 init_data.flags.disable_dmcu = true;
1459                         }
1460                         break;
1461                 case IP_VERSION(1, 0, 0):
1462                 case IP_VERSION(1, 0, 1):
1463                 case IP_VERSION(3, 0, 1):
1464                 case IP_VERSION(3, 1, 2):
1465                 case IP_VERSION(3, 1, 3):
1466                         init_data.flags.gpu_vm_support = true;
1467                         break;
1468                 case IP_VERSION(2, 0, 3):
1469                         init_data.flags.disable_dmcu = true;
1470                         break;
1471                 default:
1472                         break;
1473                 }
1474                 break;
1475         }
1476
1477         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1478                 init_data.flags.fbc_support = true;
1479
1480         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1481                 init_data.flags.multi_mon_pp_mclk_switch = true;
1482
1483         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1484                 init_data.flags.disable_fractional_pwm = true;
1485
1486         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1487                 init_data.flags.edp_no_power_sequencing = true;
1488
1489 #ifdef CONFIG_DRM_AMD_DC_DCN
1490         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1491                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1492         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1493                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1494 #endif
1495
1496         init_data.flags.seamless_boot_edp_requested = false;
1497
1498         if (check_seamless_boot_capability(adev)) {
1499                 init_data.flags.seamless_boot_edp_requested = true;
1500                 init_data.flags.allow_seamless_boot_optimization = true;
1501                 DRM_INFO("Seamless boot condition check passed\n");
1502         }
1503
1504         INIT_LIST_HEAD(&adev->dm.da_list);
1505         /* Display Core create. */
1506         adev->dm.dc = dc_create(&init_data);
1507
1508         if (adev->dm.dc) {
1509                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1510         } else {
1511                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1512                 goto error;
1513         }
1514
1515         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1516                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1517                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1518         }
1519
1520         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1521                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1522         if (dm_should_disable_stutter(adev->pdev))
1523                 adev->dm.dc->debug.disable_stutter = true;
1524
1525         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1526                 adev->dm.dc->debug.disable_stutter = true;
1527
1528         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1529                 adev->dm.dc->debug.disable_dsc = true;
1530                 adev->dm.dc->debug.disable_dsc_edp = true;
1531         }
1532
1533         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1534                 adev->dm.dc->debug.disable_clock_gate = true;
1535
1536         r = dm_dmub_hw_init(adev);
1537         if (r) {
1538                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1539                 goto error;
1540         }
1541
1542         dc_hardware_init(adev->dm.dc);
1543
1544         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1545         if (!adev->dm.hpd_rx_offload_wq) {
1546                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1547                 goto error;
1548         }
1549
1550 #if defined(CONFIG_DRM_AMD_DC_DCN)
1551         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1552                 struct dc_phy_addr_space_config pa_config;
1553
1554                 mmhub_read_system_context(adev, &pa_config);
1555
1556                 // Call the DC init_memory func
1557                 dc_setup_system_context(adev->dm.dc, &pa_config);
1558         }
1559 #endif
1560
1561         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1562         if (!adev->dm.freesync_module) {
1563                 DRM_ERROR(
1564                 "amdgpu: failed to initialize freesync_module.\n");
1565         } else
1566                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1567                                 adev->dm.freesync_module);
1568
1569         amdgpu_dm_init_color_mod();
1570
1571 #if defined(CONFIG_DRM_AMD_DC_DCN)
1572         if (adev->dm.dc->caps.max_links > 0) {
1573                 adev->dm.vblank_control_workqueue =
1574                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1575                 if (!adev->dm.vblank_control_workqueue)
1576                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1577         }
1578 #endif
1579
1580 #ifdef CONFIG_DRM_AMD_DC_HDCP
1581         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1582                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1583
1584                 if (!adev->dm.hdcp_workqueue)
1585                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1586                 else
1587                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1588
1589                 dc_init_callbacks(adev->dm.dc, &init_params);
1590         }
1591 #endif
1592 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1593         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1594 #endif
1595         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1596                 init_completion(&adev->dm.dmub_aux_transfer_done);
1597                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1598                 if (!adev->dm.dmub_notify) {
1599                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1600                         goto error;
1601                 }
1602
1603                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1604                 if (!adev->dm.delayed_hpd_wq) {
1605                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1606                         goto error;
1607                 }
1608
1609                 amdgpu_dm_outbox_init(adev);
1610 #if defined(CONFIG_DRM_AMD_DC_DCN)
1611                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1612                         dmub_aux_setconfig_callback, false)) {
1613                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1614                         goto error;
1615                 }
1616                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1617                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1618                         goto error;
1619                 }
1620                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1621                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1622                         goto error;
1623                 }
1624 #endif /* CONFIG_DRM_AMD_DC_DCN */
1625         }
1626
1627         if (amdgpu_dm_initialize_drm_device(adev)) {
1628                 DRM_ERROR(
1629                 "amdgpu: failed to initialize sw for display support.\n");
1630                 goto error;
1631         }
1632
1633         /* create fake encoders for MST */
1634         dm_dp_create_fake_mst_encoders(adev);
1635
1636         /* TODO: Add_display_info? */
1637
1638         /* TODO use dynamic cursor width */
1639         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1640         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1641
1642         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1643                 DRM_ERROR(
1644                 "amdgpu: failed to initialize sw for display support.\n");
1645                 goto error;
1646         }
1647
1648
1649         DRM_DEBUG_DRIVER("KMS initialized.\n");
1650
1651         return 0;
1652 error:
1653         amdgpu_dm_fini(adev);
1654
1655         return -EINVAL;
1656 }
1657
1658 static int amdgpu_dm_early_fini(void *handle)
1659 {
1660         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1661
1662         amdgpu_dm_audio_fini(adev);
1663
1664         return 0;
1665 }
1666
1667 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1668 {
1669         int i;
1670
1671 #if defined(CONFIG_DRM_AMD_DC_DCN)
1672         if (adev->dm.vblank_control_workqueue) {
1673                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1674                 adev->dm.vblank_control_workqueue = NULL;
1675         }
1676 #endif
1677
1678         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1679                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1680         }
1681
1682         amdgpu_dm_destroy_drm_device(&adev->dm);
1683
1684 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1685         if (adev->dm.crc_rd_wrk) {
1686                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1687                 kfree(adev->dm.crc_rd_wrk);
1688                 adev->dm.crc_rd_wrk = NULL;
1689         }
1690 #endif
1691 #ifdef CONFIG_DRM_AMD_DC_HDCP
1692         if (adev->dm.hdcp_workqueue) {
1693                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1694                 adev->dm.hdcp_workqueue = NULL;
1695         }
1696
1697         if (adev->dm.dc)
1698                 dc_deinit_callbacks(adev->dm.dc);
1699 #endif
1700
1701         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1702
1703         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1704                 kfree(adev->dm.dmub_notify);
1705                 adev->dm.dmub_notify = NULL;
1706                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1707                 adev->dm.delayed_hpd_wq = NULL;
1708         }
1709
1710         if (adev->dm.dmub_bo)
1711                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1712                                       &adev->dm.dmub_bo_gpu_addr,
1713                                       &adev->dm.dmub_bo_cpu_addr);
1714
1715         if (adev->dm.hpd_rx_offload_wq) {
1716                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1717                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1718                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1719                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1720                         }
1721                 }
1722
1723                 kfree(adev->dm.hpd_rx_offload_wq);
1724                 adev->dm.hpd_rx_offload_wq = NULL;
1725         }
1726
1727         /* DC Destroy TODO: Replace destroy DAL */
1728         if (adev->dm.dc)
1729                 dc_destroy(&adev->dm.dc);
1730         /*
1731          * TODO: pageflip, vlank interrupt
1732          *
1733          * amdgpu_dm_irq_fini(adev);
1734          */
1735
1736         if (adev->dm.cgs_device) {
1737                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1738                 adev->dm.cgs_device = NULL;
1739         }
1740         if (adev->dm.freesync_module) {
1741                 mod_freesync_destroy(adev->dm.freesync_module);
1742                 adev->dm.freesync_module = NULL;
1743         }
1744
1745         mutex_destroy(&adev->dm.audio_lock);
1746         mutex_destroy(&adev->dm.dc_lock);
1747
1748         return;
1749 }
1750
1751 static int load_dmcu_fw(struct amdgpu_device *adev)
1752 {
1753         const char *fw_name_dmcu = NULL;
1754         int r;
1755         const struct dmcu_firmware_header_v1_0 *hdr;
1756
1757         switch(adev->asic_type) {
1758 #if defined(CONFIG_DRM_AMD_DC_SI)
1759         case CHIP_TAHITI:
1760         case CHIP_PITCAIRN:
1761         case CHIP_VERDE:
1762         case CHIP_OLAND:
1763 #endif
1764         case CHIP_BONAIRE:
1765         case CHIP_HAWAII:
1766         case CHIP_KAVERI:
1767         case CHIP_KABINI:
1768         case CHIP_MULLINS:
1769         case CHIP_TONGA:
1770         case CHIP_FIJI:
1771         case CHIP_CARRIZO:
1772         case CHIP_STONEY:
1773         case CHIP_POLARIS11:
1774         case CHIP_POLARIS10:
1775         case CHIP_POLARIS12:
1776         case CHIP_VEGAM:
1777         case CHIP_VEGA10:
1778         case CHIP_VEGA12:
1779         case CHIP_VEGA20:
1780                 return 0;
1781         case CHIP_NAVI12:
1782                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1783                 break;
1784         case CHIP_RAVEN:
1785                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1786                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1788                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1789                 else
1790                         return 0;
1791                 break;
1792         default:
1793                 switch (adev->ip_versions[DCE_HWIP][0]) {
1794                 case IP_VERSION(2, 0, 2):
1795                 case IP_VERSION(2, 0, 3):
1796                 case IP_VERSION(2, 0, 0):
1797                 case IP_VERSION(2, 1, 0):
1798                 case IP_VERSION(3, 0, 0):
1799                 case IP_VERSION(3, 0, 2):
1800                 case IP_VERSION(3, 0, 3):
1801                 case IP_VERSION(3, 0, 1):
1802                 case IP_VERSION(3, 1, 2):
1803                 case IP_VERSION(3, 1, 3):
1804                         return 0;
1805                 default:
1806                         break;
1807                 }
1808                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1809                 return -EINVAL;
1810         }
1811
1812         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1813                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1814                 return 0;
1815         }
1816
1817         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1818         if (r == -ENOENT) {
1819                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1820                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1821                 adev->dm.fw_dmcu = NULL;
1822                 return 0;
1823         }
1824         if (r) {
1825                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1826                         fw_name_dmcu);
1827                 return r;
1828         }
1829
1830         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1831         if (r) {
1832                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1833                         fw_name_dmcu);
1834                 release_firmware(adev->dm.fw_dmcu);
1835                 adev->dm.fw_dmcu = NULL;
1836                 return r;
1837         }
1838
1839         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1840         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1841         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1842         adev->firmware.fw_size +=
1843                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1844
1845         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1846         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1847         adev->firmware.fw_size +=
1848                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1849
1850         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1851
1852         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1853
1854         return 0;
1855 }
1856
1857 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1858 {
1859         struct amdgpu_device *adev = ctx;
1860
1861         return dm_read_reg(adev->dm.dc->ctx, address);
1862 }
1863
1864 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1865                                      uint32_t value)
1866 {
1867         struct amdgpu_device *adev = ctx;
1868
1869         return dm_write_reg(adev->dm.dc->ctx, address, value);
1870 }
1871
1872 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1873 {
1874         struct dmub_srv_create_params create_params;
1875         struct dmub_srv_region_params region_params;
1876         struct dmub_srv_region_info region_info;
1877         struct dmub_srv_fb_params fb_params;
1878         struct dmub_srv_fb_info *fb_info;
1879         struct dmub_srv *dmub_srv;
1880         const struct dmcub_firmware_header_v1_0 *hdr;
1881         const char *fw_name_dmub;
1882         enum dmub_asic dmub_asic;
1883         enum dmub_status status;
1884         int r;
1885
1886         switch (adev->ip_versions[DCE_HWIP][0]) {
1887         case IP_VERSION(2, 1, 0):
1888                 dmub_asic = DMUB_ASIC_DCN21;
1889                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1890                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1891                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1892                 break;
1893         case IP_VERSION(3, 0, 0):
1894                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1895                         dmub_asic = DMUB_ASIC_DCN30;
1896                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1897                 } else {
1898                         dmub_asic = DMUB_ASIC_DCN30;
1899                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1900                 }
1901                 break;
1902         case IP_VERSION(3, 0, 1):
1903                 dmub_asic = DMUB_ASIC_DCN301;
1904                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1905                 break;
1906         case IP_VERSION(3, 0, 2):
1907                 dmub_asic = DMUB_ASIC_DCN302;
1908                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1909                 break;
1910         case IP_VERSION(3, 0, 3):
1911                 dmub_asic = DMUB_ASIC_DCN303;
1912                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1913                 break;
1914         case IP_VERSION(3, 1, 2):
1915         case IP_VERSION(3, 1, 3):
1916                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1917                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1918                 break;
1919
1920         default:
1921                 /* ASIC doesn't support DMUB. */
1922                 return 0;
1923         }
1924
1925         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1926         if (r) {
1927                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1928                 return 0;
1929         }
1930
1931         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1932         if (r) {
1933                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1934                 return 0;
1935         }
1936
1937         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1938         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1939
1940         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1941                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1942                         AMDGPU_UCODE_ID_DMCUB;
1943                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1944                         adev->dm.dmub_fw;
1945                 adev->firmware.fw_size +=
1946                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1947
1948                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1949                          adev->dm.dmcub_fw_version);
1950         }
1951
1952
1953         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1954         dmub_srv = adev->dm.dmub_srv;
1955
1956         if (!dmub_srv) {
1957                 DRM_ERROR("Failed to allocate DMUB service!\n");
1958                 return -ENOMEM;
1959         }
1960
1961         memset(&create_params, 0, sizeof(create_params));
1962         create_params.user_ctx = adev;
1963         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1964         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1965         create_params.asic = dmub_asic;
1966
1967         /* Create the DMUB service. */
1968         status = dmub_srv_create(dmub_srv, &create_params);
1969         if (status != DMUB_STATUS_OK) {
1970                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1971                 return -EINVAL;
1972         }
1973
1974         /* Calculate the size of all the regions for the DMUB service. */
1975         memset(&region_params, 0, sizeof(region_params));
1976
1977         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1978                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1979         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1980         region_params.vbios_size = adev->bios_size;
1981         region_params.fw_bss_data = region_params.bss_data_size ?
1982                 adev->dm.dmub_fw->data +
1983                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1984                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1985         region_params.fw_inst_const =
1986                 adev->dm.dmub_fw->data +
1987                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1988                 PSP_HEADER_BYTES;
1989
1990         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1991                                            &region_info);
1992
1993         if (status != DMUB_STATUS_OK) {
1994                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1995                 return -EINVAL;
1996         }
1997
1998         /*
1999          * Allocate a framebuffer based on the total size of all the regions.
2000          * TODO: Move this into GART.
2001          */
2002         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2003                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2004                                     &adev->dm.dmub_bo_gpu_addr,
2005                                     &adev->dm.dmub_bo_cpu_addr);
2006         if (r)
2007                 return r;
2008
2009         /* Rebase the regions on the framebuffer address. */
2010         memset(&fb_params, 0, sizeof(fb_params));
2011         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2012         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2013         fb_params.region_info = &region_info;
2014
2015         adev->dm.dmub_fb_info =
2016                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2017         fb_info = adev->dm.dmub_fb_info;
2018
2019         if (!fb_info) {
2020                 DRM_ERROR(
2021                         "Failed to allocate framebuffer info for DMUB service!\n");
2022                 return -ENOMEM;
2023         }
2024
2025         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2026         if (status != DMUB_STATUS_OK) {
2027                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2028                 return -EINVAL;
2029         }
2030
2031         return 0;
2032 }
2033
2034 static int dm_sw_init(void *handle)
2035 {
2036         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2037         int r;
2038
2039         r = dm_dmub_sw_init(adev);
2040         if (r)
2041                 return r;
2042
2043         return load_dmcu_fw(adev);
2044 }
2045
2046 static int dm_sw_fini(void *handle)
2047 {
2048         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2049
2050         kfree(adev->dm.dmub_fb_info);
2051         adev->dm.dmub_fb_info = NULL;
2052
2053         if (adev->dm.dmub_srv) {
2054                 dmub_srv_destroy(adev->dm.dmub_srv);
2055                 adev->dm.dmub_srv = NULL;
2056         }
2057
2058         release_firmware(adev->dm.dmub_fw);
2059         adev->dm.dmub_fw = NULL;
2060
2061         release_firmware(adev->dm.fw_dmcu);
2062         adev->dm.fw_dmcu = NULL;
2063
2064         return 0;
2065 }
2066
2067 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2068 {
2069         struct amdgpu_dm_connector *aconnector;
2070         struct drm_connector *connector;
2071         struct drm_connector_list_iter iter;
2072         int ret = 0;
2073
2074         drm_connector_list_iter_begin(dev, &iter);
2075         drm_for_each_connector_iter(connector, &iter) {
2076                 aconnector = to_amdgpu_dm_connector(connector);
2077                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2078                     aconnector->mst_mgr.aux) {
2079                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2080                                          aconnector,
2081                                          aconnector->base.base.id);
2082
2083                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2084                         if (ret < 0) {
2085                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2086                                 aconnector->dc_link->type =
2087                                         dc_connection_single;
2088                                 break;
2089                         }
2090                 }
2091         }
2092         drm_connector_list_iter_end(&iter);
2093
2094         return ret;
2095 }
2096
2097 static int dm_late_init(void *handle)
2098 {
2099         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2100
2101         struct dmcu_iram_parameters params;
2102         unsigned int linear_lut[16];
2103         int i;
2104         struct dmcu *dmcu = NULL;
2105
2106         dmcu = adev->dm.dc->res_pool->dmcu;
2107
2108         for (i = 0; i < 16; i++)
2109                 linear_lut[i] = 0xFFFF * i / 15;
2110
2111         params.set = 0;
2112         params.backlight_ramping_override = false;
2113         params.backlight_ramping_start = 0xCCCC;
2114         params.backlight_ramping_reduction = 0xCCCCCCCC;
2115         params.backlight_lut_array_size = 16;
2116         params.backlight_lut_array = linear_lut;
2117
2118         /* Min backlight level after ABM reduction,  Don't allow below 1%
2119          * 0xFFFF x 0.01 = 0x28F
2120          */
2121         params.min_abm_backlight = 0x28F;
2122         /* In the case where abm is implemented on dmcub,
2123         * dmcu object will be null.
2124         * ABM 2.4 and up are implemented on dmcub.
2125         */
2126         if (dmcu) {
2127                 if (!dmcu_load_iram(dmcu, params))
2128                         return -EINVAL;
2129         } else if (adev->dm.dc->ctx->dmub_srv) {
2130                 struct dc_link *edp_links[MAX_NUM_EDP];
2131                 int edp_num;
2132
2133                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2134                 for (i = 0; i < edp_num; i++) {
2135                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2136                                 return -EINVAL;
2137                 }
2138         }
2139
2140         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2141 }
2142
2143 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2144 {
2145         struct amdgpu_dm_connector *aconnector;
2146         struct drm_connector *connector;
2147         struct drm_connector_list_iter iter;
2148         struct drm_dp_mst_topology_mgr *mgr;
2149         int ret;
2150         bool need_hotplug = false;
2151
2152         drm_connector_list_iter_begin(dev, &iter);
2153         drm_for_each_connector_iter(connector, &iter) {
2154                 aconnector = to_amdgpu_dm_connector(connector);
2155                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2156                     aconnector->mst_port)
2157                         continue;
2158
2159                 mgr = &aconnector->mst_mgr;
2160
2161                 if (suspend) {
2162                         drm_dp_mst_topology_mgr_suspend(mgr);
2163                 } else {
2164                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2165                         if (ret < 0) {
2166                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2167                                 need_hotplug = true;
2168                         }
2169                 }
2170         }
2171         drm_connector_list_iter_end(&iter);
2172
2173         if (need_hotplug)
2174                 drm_kms_helper_hotplug_event(dev);
2175 }
2176
2177 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2178 {
2179         int ret = 0;
2180
2181         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2182          * on window driver dc implementation.
2183          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2184          * should be passed to smu during boot up and resume from s3.
2185          * boot up: dc calculate dcn watermark clock settings within dc_create,
2186          * dcn20_resource_construct
2187          * then call pplib functions below to pass the settings to smu:
2188          * smu_set_watermarks_for_clock_ranges
2189          * smu_set_watermarks_table
2190          * navi10_set_watermarks_table
2191          * smu_write_watermarks_table
2192          *
2193          * For Renoir, clock settings of dcn watermark are also fixed values.
2194          * dc has implemented different flow for window driver:
2195          * dc_hardware_init / dc_set_power_state
2196          * dcn10_init_hw
2197          * notify_wm_ranges
2198          * set_wm_ranges
2199          * -- Linux
2200          * smu_set_watermarks_for_clock_ranges
2201          * renoir_set_watermarks_table
2202          * smu_write_watermarks_table
2203          *
2204          * For Linux,
2205          * dc_hardware_init -> amdgpu_dm_init
2206          * dc_set_power_state --> dm_resume
2207          *
2208          * therefore, this function apply to navi10/12/14 but not Renoir
2209          * *
2210          */
2211         switch (adev->ip_versions[DCE_HWIP][0]) {
2212         case IP_VERSION(2, 0, 2):
2213         case IP_VERSION(2, 0, 0):
2214                 break;
2215         default:
2216                 return 0;
2217         }
2218
2219         ret = amdgpu_dpm_write_watermarks_table(adev);
2220         if (ret) {
2221                 DRM_ERROR("Failed to update WMTABLE!\n");
2222                 return ret;
2223         }
2224
2225         return 0;
2226 }
2227
2228 /**
2229  * dm_hw_init() - Initialize DC device
2230  * @handle: The base driver device containing the amdgpu_dm device.
2231  *
2232  * Initialize the &struct amdgpu_display_manager device. This involves calling
2233  * the initializers of each DM component, then populating the struct with them.
2234  *
2235  * Although the function implies hardware initialization, both hardware and
2236  * software are initialized here. Splitting them out to their relevant init
2237  * hooks is a future TODO item.
2238  *
2239  * Some notable things that are initialized here:
2240  *
2241  * - Display Core, both software and hardware
2242  * - DC modules that we need (freesync and color management)
2243  * - DRM software states
2244  * - Interrupt sources and handlers
2245  * - Vblank support
2246  * - Debug FS entries, if enabled
2247  */
2248 static int dm_hw_init(void *handle)
2249 {
2250         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2251         /* Create DAL display manager */
2252         amdgpu_dm_init(adev);
2253         amdgpu_dm_hpd_init(adev);
2254
2255         return 0;
2256 }
2257
2258 /**
2259  * dm_hw_fini() - Teardown DC device
2260  * @handle: The base driver device containing the amdgpu_dm device.
2261  *
2262  * Teardown components within &struct amdgpu_display_manager that require
2263  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2264  * were loaded. Also flush IRQ workqueues and disable them.
2265  */
2266 static int dm_hw_fini(void *handle)
2267 {
2268         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2269
2270         amdgpu_dm_hpd_fini(adev);
2271
2272         amdgpu_dm_irq_fini(adev);
2273         amdgpu_dm_fini(adev);
2274         return 0;
2275 }
2276
2277
2278 static int dm_enable_vblank(struct drm_crtc *crtc);
2279 static void dm_disable_vblank(struct drm_crtc *crtc);
2280
2281 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2282                                  struct dc_state *state, bool enable)
2283 {
2284         enum dc_irq_source irq_source;
2285         struct amdgpu_crtc *acrtc;
2286         int rc = -EBUSY;
2287         int i = 0;
2288
2289         for (i = 0; i < state->stream_count; i++) {
2290                 acrtc = get_crtc_by_otg_inst(
2291                                 adev, state->stream_status[i].primary_otg_inst);
2292
2293                 if (acrtc && state->stream_status[i].plane_count != 0) {
2294                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2295                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2296                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2297                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2298                         if (rc)
2299                                 DRM_WARN("Failed to %s pflip interrupts\n",
2300                                          enable ? "enable" : "disable");
2301
2302                         if (enable) {
2303                                 rc = dm_enable_vblank(&acrtc->base);
2304                                 if (rc)
2305                                         DRM_WARN("Failed to enable vblank interrupts\n");
2306                         } else {
2307                                 dm_disable_vblank(&acrtc->base);
2308                         }
2309
2310                 }
2311         }
2312
2313 }
2314
2315 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2316 {
2317         struct dc_state *context = NULL;
2318         enum dc_status res = DC_ERROR_UNEXPECTED;
2319         int i;
2320         struct dc_stream_state *del_streams[MAX_PIPES];
2321         int del_streams_count = 0;
2322
2323         memset(del_streams, 0, sizeof(del_streams));
2324
2325         context = dc_create_state(dc);
2326         if (context == NULL)
2327                 goto context_alloc_fail;
2328
2329         dc_resource_state_copy_construct_current(dc, context);
2330
2331         /* First remove from context all streams */
2332         for (i = 0; i < context->stream_count; i++) {
2333                 struct dc_stream_state *stream = context->streams[i];
2334
2335                 del_streams[del_streams_count++] = stream;
2336         }
2337
2338         /* Remove all planes for removed streams and then remove the streams */
2339         for (i = 0; i < del_streams_count; i++) {
2340                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2341                         res = DC_FAIL_DETACH_SURFACES;
2342                         goto fail;
2343                 }
2344
2345                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2346                 if (res != DC_OK)
2347                         goto fail;
2348         }
2349
2350         res = dc_commit_state(dc, context);
2351
2352 fail:
2353         dc_release_state(context);
2354
2355 context_alloc_fail:
2356         return res;
2357 }
2358
2359 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2360 {
2361         int i;
2362
2363         if (dm->hpd_rx_offload_wq) {
2364                 for (i = 0; i < dm->dc->caps.max_links; i++)
2365                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2366         }
2367 }
2368
2369 static int dm_suspend(void *handle)
2370 {
2371         struct amdgpu_device *adev = handle;
2372         struct amdgpu_display_manager *dm = &adev->dm;
2373         int ret = 0;
2374
2375         if (amdgpu_in_reset(adev)) {
2376                 mutex_lock(&dm->dc_lock);
2377
2378 #if defined(CONFIG_DRM_AMD_DC_DCN)
2379                 dc_allow_idle_optimizations(adev->dm.dc, false);
2380 #endif
2381
2382                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2383
2384                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2385
2386                 amdgpu_dm_commit_zero_streams(dm->dc);
2387
2388                 amdgpu_dm_irq_suspend(adev);
2389
2390                 hpd_rx_irq_work_suspend(dm);
2391
2392                 return ret;
2393         }
2394
2395         WARN_ON(adev->dm.cached_state);
2396         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2397
2398         s3_handle_mst(adev_to_drm(adev), true);
2399
2400         amdgpu_dm_irq_suspend(adev);
2401
2402         hpd_rx_irq_work_suspend(dm);
2403
2404         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2405
2406         return 0;
2407 }
2408
2409 struct amdgpu_dm_connector *
2410 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2411                                              struct drm_crtc *crtc)
2412 {
2413         uint32_t i;
2414         struct drm_connector_state *new_con_state;
2415         struct drm_connector *connector;
2416         struct drm_crtc *crtc_from_state;
2417
2418         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2419                 crtc_from_state = new_con_state->crtc;
2420
2421                 if (crtc_from_state == crtc)
2422                         return to_amdgpu_dm_connector(connector);
2423         }
2424
2425         return NULL;
2426 }
2427
2428 static void emulated_link_detect(struct dc_link *link)
2429 {
2430         struct dc_sink_init_data sink_init_data = { 0 };
2431         struct display_sink_capability sink_caps = { 0 };
2432         enum dc_edid_status edid_status;
2433         struct dc_context *dc_ctx = link->ctx;
2434         struct dc_sink *sink = NULL;
2435         struct dc_sink *prev_sink = NULL;
2436
2437         link->type = dc_connection_none;
2438         prev_sink = link->local_sink;
2439
2440         if (prev_sink)
2441                 dc_sink_release(prev_sink);
2442
2443         switch (link->connector_signal) {
2444         case SIGNAL_TYPE_HDMI_TYPE_A: {
2445                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2446                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2447                 break;
2448         }
2449
2450         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2451                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2452                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2453                 break;
2454         }
2455
2456         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2457                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2458                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2459                 break;
2460         }
2461
2462         case SIGNAL_TYPE_LVDS: {
2463                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2464                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2465                 break;
2466         }
2467
2468         case SIGNAL_TYPE_EDP: {
2469                 sink_caps.transaction_type =
2470                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2471                 sink_caps.signal = SIGNAL_TYPE_EDP;
2472                 break;
2473         }
2474
2475         case SIGNAL_TYPE_DISPLAY_PORT: {
2476                 sink_caps.transaction_type =
2477                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2478                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2479                 break;
2480         }
2481
2482         default:
2483                 DC_ERROR("Invalid connector type! signal:%d\n",
2484                         link->connector_signal);
2485                 return;
2486         }
2487
2488         sink_init_data.link = link;
2489         sink_init_data.sink_signal = sink_caps.signal;
2490
2491         sink = dc_sink_create(&sink_init_data);
2492         if (!sink) {
2493                 DC_ERROR("Failed to create sink!\n");
2494                 return;
2495         }
2496
2497         /* dc_sink_create returns a new reference */
2498         link->local_sink = sink;
2499
2500         edid_status = dm_helpers_read_local_edid(
2501                         link->ctx,
2502                         link,
2503                         sink);
2504
2505         if (edid_status != EDID_OK)
2506                 DC_ERROR("Failed to read EDID");
2507
2508 }
2509
2510 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2511                                      struct amdgpu_display_manager *dm)
2512 {
2513         struct {
2514                 struct dc_surface_update surface_updates[MAX_SURFACES];
2515                 struct dc_plane_info plane_infos[MAX_SURFACES];
2516                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2517                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2518                 struct dc_stream_update stream_update;
2519         } * bundle;
2520         int k, m;
2521
2522         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2523
2524         if (!bundle) {
2525                 dm_error("Failed to allocate update bundle\n");
2526                 goto cleanup;
2527         }
2528
2529         for (k = 0; k < dc_state->stream_count; k++) {
2530                 bundle->stream_update.stream = dc_state->streams[k];
2531
2532                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2533                         bundle->surface_updates[m].surface =
2534                                 dc_state->stream_status->plane_states[m];
2535                         bundle->surface_updates[m].surface->force_full_update =
2536                                 true;
2537                 }
2538                 dc_commit_updates_for_stream(
2539                         dm->dc, bundle->surface_updates,
2540                         dc_state->stream_status->plane_count,
2541                         dc_state->streams[k], &bundle->stream_update, dc_state);
2542         }
2543
2544 cleanup:
2545         kfree(bundle);
2546
2547         return;
2548 }
2549
2550 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2551 {
2552         struct dc_stream_state *stream_state;
2553         struct amdgpu_dm_connector *aconnector = link->priv;
2554         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2555         struct dc_stream_update stream_update;
2556         bool dpms_off = true;
2557
2558         memset(&stream_update, 0, sizeof(stream_update));
2559         stream_update.dpms_off = &dpms_off;
2560
2561         mutex_lock(&adev->dm.dc_lock);
2562         stream_state = dc_stream_find_from_link(link);
2563
2564         if (stream_state == NULL) {
2565                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2566                 mutex_unlock(&adev->dm.dc_lock);
2567                 return;
2568         }
2569
2570         stream_update.stream = stream_state;
2571         acrtc_state->force_dpms_off = true;
2572         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2573                                      stream_state, &stream_update,
2574                                      stream_state->ctx->dc->current_state);
2575         mutex_unlock(&adev->dm.dc_lock);
2576 }
2577
2578 static int dm_resume(void *handle)
2579 {
2580         struct amdgpu_device *adev = handle;
2581         struct drm_device *ddev = adev_to_drm(adev);
2582         struct amdgpu_display_manager *dm = &adev->dm;
2583         struct amdgpu_dm_connector *aconnector;
2584         struct drm_connector *connector;
2585         struct drm_connector_list_iter iter;
2586         struct drm_crtc *crtc;
2587         struct drm_crtc_state *new_crtc_state;
2588         struct dm_crtc_state *dm_new_crtc_state;
2589         struct drm_plane *plane;
2590         struct drm_plane_state *new_plane_state;
2591         struct dm_plane_state *dm_new_plane_state;
2592         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2593         enum dc_connection_type new_connection_type = dc_connection_none;
2594         struct dc_state *dc_state;
2595         int i, r, j;
2596
2597         if (amdgpu_in_reset(adev)) {
2598                 dc_state = dm->cached_dc_state;
2599
2600                 /*
2601                  * The dc->current_state is backed up into dm->cached_dc_state
2602                  * before we commit 0 streams.
2603                  *
2604                  * DC will clear link encoder assignments on the real state
2605                  * but the changes won't propagate over to the copy we made
2606                  * before the 0 streams commit.
2607                  *
2608                  * DC expects that link encoder assignments are *not* valid
2609                  * when committing a state, so as a workaround it needs to be
2610                  * cleared here.
2611                  */
2612                 link_enc_cfg_init(dm->dc, dc_state);
2613
2614                 if (dc_enable_dmub_notifications(adev->dm.dc))
2615                         amdgpu_dm_outbox_init(adev);
2616
2617                 r = dm_dmub_hw_init(adev);
2618                 if (r)
2619                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2620
2621                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2622                 dc_resume(dm->dc);
2623
2624                 amdgpu_dm_irq_resume_early(adev);
2625
2626                 for (i = 0; i < dc_state->stream_count; i++) {
2627                         dc_state->streams[i]->mode_changed = true;
2628                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2629                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2630                                         = 0xffffffff;
2631                         }
2632                 }
2633
2634                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2635
2636                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2637
2638                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2639
2640                 dc_release_state(dm->cached_dc_state);
2641                 dm->cached_dc_state = NULL;
2642
2643                 amdgpu_dm_irq_resume_late(adev);
2644
2645                 mutex_unlock(&dm->dc_lock);
2646
2647                 return 0;
2648         }
2649         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2650         dc_release_state(dm_state->context);
2651         dm_state->context = dc_create_state(dm->dc);
2652         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2653         dc_resource_state_construct(dm->dc, dm_state->context);
2654
2655         /* Re-enable outbox interrupts for DPIA. */
2656         if (dc_enable_dmub_notifications(adev->dm.dc))
2657                 amdgpu_dm_outbox_init(adev);
2658
2659         /* Before powering on DC we need to re-initialize DMUB. */
2660         dm_dmub_hw_resume(adev);
2661
2662         /* power on hardware */
2663         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2664
2665         /* program HPD filter */
2666         dc_resume(dm->dc);
2667
2668         /*
2669          * early enable HPD Rx IRQ, should be done before set mode as short
2670          * pulse interrupts are used for MST
2671          */
2672         amdgpu_dm_irq_resume_early(adev);
2673
2674         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2675         s3_handle_mst(ddev, false);
2676
2677         /* Do detection*/
2678         drm_connector_list_iter_begin(ddev, &iter);
2679         drm_for_each_connector_iter(connector, &iter) {
2680                 aconnector = to_amdgpu_dm_connector(connector);
2681
2682                 /*
2683                  * this is the case when traversing through already created
2684                  * MST connectors, should be skipped
2685                  */
2686                 if (aconnector->mst_port)
2687                         continue;
2688
2689                 mutex_lock(&aconnector->hpd_lock);
2690                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2691                         DRM_ERROR("KMS: Failed to detect connector\n");
2692
2693                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2694                         emulated_link_detect(aconnector->dc_link);
2695                 else
2696                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2697
2698                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2699                         aconnector->fake_enable = false;
2700
2701                 if (aconnector->dc_sink)
2702                         dc_sink_release(aconnector->dc_sink);
2703                 aconnector->dc_sink = NULL;
2704                 amdgpu_dm_update_connector_after_detect(aconnector);
2705                 mutex_unlock(&aconnector->hpd_lock);
2706         }
2707         drm_connector_list_iter_end(&iter);
2708
2709         /* Force mode set in atomic commit */
2710         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2711                 new_crtc_state->active_changed = true;
2712
2713         /*
2714          * atomic_check is expected to create the dc states. We need to release
2715          * them here, since they were duplicated as part of the suspend
2716          * procedure.
2717          */
2718         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2719                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2720                 if (dm_new_crtc_state->stream) {
2721                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2722                         dc_stream_release(dm_new_crtc_state->stream);
2723                         dm_new_crtc_state->stream = NULL;
2724                 }
2725         }
2726
2727         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2728                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2729                 if (dm_new_plane_state->dc_state) {
2730                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2731                         dc_plane_state_release(dm_new_plane_state->dc_state);
2732                         dm_new_plane_state->dc_state = NULL;
2733                 }
2734         }
2735
2736         drm_atomic_helper_resume(ddev, dm->cached_state);
2737
2738         dm->cached_state = NULL;
2739
2740         amdgpu_dm_irq_resume_late(adev);
2741
2742         amdgpu_dm_smu_write_watermarks_table(adev);
2743
2744         return 0;
2745 }
2746
2747 /**
2748  * DOC: DM Lifecycle
2749  *
2750  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2751  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2752  * the base driver's device list to be initialized and torn down accordingly.
2753  *
2754  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2755  */
2756
2757 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2758         .name = "dm",
2759         .early_init = dm_early_init,
2760         .late_init = dm_late_init,
2761         .sw_init = dm_sw_init,
2762         .sw_fini = dm_sw_fini,
2763         .early_fini = amdgpu_dm_early_fini,
2764         .hw_init = dm_hw_init,
2765         .hw_fini = dm_hw_fini,
2766         .suspend = dm_suspend,
2767         .resume = dm_resume,
2768         .is_idle = dm_is_idle,
2769         .wait_for_idle = dm_wait_for_idle,
2770         .check_soft_reset = dm_check_soft_reset,
2771         .soft_reset = dm_soft_reset,
2772         .set_clockgating_state = dm_set_clockgating_state,
2773         .set_powergating_state = dm_set_powergating_state,
2774 };
2775
2776 const struct amdgpu_ip_block_version dm_ip_block =
2777 {
2778         .type = AMD_IP_BLOCK_TYPE_DCE,
2779         .major = 1,
2780         .minor = 0,
2781         .rev = 0,
2782         .funcs = &amdgpu_dm_funcs,
2783 };
2784
2785
2786 /**
2787  * DOC: atomic
2788  *
2789  * *WIP*
2790  */
2791
2792 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2793         .fb_create = amdgpu_display_user_framebuffer_create,
2794         .get_format_info = amd_get_format_info,
2795         .output_poll_changed = drm_fb_helper_output_poll_changed,
2796         .atomic_check = amdgpu_dm_atomic_check,
2797         .atomic_commit = drm_atomic_helper_commit,
2798 };
2799
2800 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2801         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2802 };
2803
2804 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2805 {
2806         u32 max_cll, min_cll, max, min, q, r;
2807         struct amdgpu_dm_backlight_caps *caps;
2808         struct amdgpu_display_manager *dm;
2809         struct drm_connector *conn_base;
2810         struct amdgpu_device *adev;
2811         struct dc_link *link = NULL;
2812         static const u8 pre_computed_values[] = {
2813                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2814                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2815         int i;
2816
2817         if (!aconnector || !aconnector->dc_link)
2818                 return;
2819
2820         link = aconnector->dc_link;
2821         if (link->connector_signal != SIGNAL_TYPE_EDP)
2822                 return;
2823
2824         conn_base = &aconnector->base;
2825         adev = drm_to_adev(conn_base->dev);
2826         dm = &adev->dm;
2827         for (i = 0; i < dm->num_of_edps; i++) {
2828                 if (link == dm->backlight_link[i])
2829                         break;
2830         }
2831         if (i >= dm->num_of_edps)
2832                 return;
2833         caps = &dm->backlight_caps[i];
2834         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2835         caps->aux_support = false;
2836         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2837         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2838
2839         if (caps->ext_caps->bits.oled == 1 /*||
2840             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2841             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2842                 caps->aux_support = true;
2843
2844         if (amdgpu_backlight == 0)
2845                 caps->aux_support = false;
2846         else if (amdgpu_backlight == 1)
2847                 caps->aux_support = true;
2848
2849         /* From the specification (CTA-861-G), for calculating the maximum
2850          * luminance we need to use:
2851          *      Luminance = 50*2**(CV/32)
2852          * Where CV is a one-byte value.
2853          * For calculating this expression we may need float point precision;
2854          * to avoid this complexity level, we take advantage that CV is divided
2855          * by a constant. From the Euclids division algorithm, we know that CV
2856          * can be written as: CV = 32*q + r. Next, we replace CV in the
2857          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2858          * need to pre-compute the value of r/32. For pre-computing the values
2859          * We just used the following Ruby line:
2860          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2861          * The results of the above expressions can be verified at
2862          * pre_computed_values.
2863          */
2864         q = max_cll >> 5;
2865         r = max_cll % 32;
2866         max = (1 << q) * pre_computed_values[r];
2867
2868         // min luminance: maxLum * (CV/255)^2 / 100
2869         q = DIV_ROUND_CLOSEST(min_cll, 255);
2870         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2871
2872         caps->aux_max_input_signal = max;
2873         caps->aux_min_input_signal = min;
2874 }
2875
2876 void amdgpu_dm_update_connector_after_detect(
2877                 struct amdgpu_dm_connector *aconnector)
2878 {
2879         struct drm_connector *connector = &aconnector->base;
2880         struct drm_device *dev = connector->dev;
2881         struct dc_sink *sink;
2882
2883         /* MST handled by drm_mst framework */
2884         if (aconnector->mst_mgr.mst_state == true)
2885                 return;
2886
2887         sink = aconnector->dc_link->local_sink;
2888         if (sink)
2889                 dc_sink_retain(sink);
2890
2891         /*
2892          * Edid mgmt connector gets first update only in mode_valid hook and then
2893          * the connector sink is set to either fake or physical sink depends on link status.
2894          * Skip if already done during boot.
2895          */
2896         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2897                         && aconnector->dc_em_sink) {
2898
2899                 /*
2900                  * For S3 resume with headless use eml_sink to fake stream
2901                  * because on resume connector->sink is set to NULL
2902                  */
2903                 mutex_lock(&dev->mode_config.mutex);
2904
2905                 if (sink) {
2906                         if (aconnector->dc_sink) {
2907                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2908                                 /*
2909                                  * retain and release below are used to
2910                                  * bump up refcount for sink because the link doesn't point
2911                                  * to it anymore after disconnect, so on next crtc to connector
2912                                  * reshuffle by UMD we will get into unwanted dc_sink release
2913                                  */
2914                                 dc_sink_release(aconnector->dc_sink);
2915                         }
2916                         aconnector->dc_sink = sink;
2917                         dc_sink_retain(aconnector->dc_sink);
2918                         amdgpu_dm_update_freesync_caps(connector,
2919                                         aconnector->edid);
2920                 } else {
2921                         amdgpu_dm_update_freesync_caps(connector, NULL);
2922                         if (!aconnector->dc_sink) {
2923                                 aconnector->dc_sink = aconnector->dc_em_sink;
2924                                 dc_sink_retain(aconnector->dc_sink);
2925                         }
2926                 }
2927
2928                 mutex_unlock(&dev->mode_config.mutex);
2929
2930                 if (sink)
2931                         dc_sink_release(sink);
2932                 return;
2933         }
2934
2935         /*
2936          * TODO: temporary guard to look for proper fix
2937          * if this sink is MST sink, we should not do anything
2938          */
2939         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2940                 dc_sink_release(sink);
2941                 return;
2942         }
2943
2944         if (aconnector->dc_sink == sink) {
2945                 /*
2946                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2947                  * Do nothing!!
2948                  */
2949                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2950                                 aconnector->connector_id);
2951                 if (sink)
2952                         dc_sink_release(sink);
2953                 return;
2954         }
2955
2956         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2957                 aconnector->connector_id, aconnector->dc_sink, sink);
2958
2959         mutex_lock(&dev->mode_config.mutex);
2960
2961         /*
2962          * 1. Update status of the drm connector
2963          * 2. Send an event and let userspace tell us what to do
2964          */
2965         if (sink) {
2966                 /*
2967                  * TODO: check if we still need the S3 mode update workaround.
2968                  * If yes, put it here.
2969                  */
2970                 if (aconnector->dc_sink) {
2971                         amdgpu_dm_update_freesync_caps(connector, NULL);
2972                         dc_sink_release(aconnector->dc_sink);
2973                 }
2974
2975                 aconnector->dc_sink = sink;
2976                 dc_sink_retain(aconnector->dc_sink);
2977                 if (sink->dc_edid.length == 0) {
2978                         aconnector->edid = NULL;
2979                         if (aconnector->dc_link->aux_mode) {
2980                                 drm_dp_cec_unset_edid(
2981                                         &aconnector->dm_dp_aux.aux);
2982                         }
2983                 } else {
2984                         aconnector->edid =
2985                                 (struct edid *)sink->dc_edid.raw_edid;
2986
2987                         if (aconnector->dc_link->aux_mode)
2988                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2989                                                     aconnector->edid);
2990                 }
2991
2992                 drm_connector_update_edid_property(connector, aconnector->edid);
2993                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2994                 update_connector_ext_caps(aconnector);
2995         } else {
2996                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2997                 amdgpu_dm_update_freesync_caps(connector, NULL);
2998                 drm_connector_update_edid_property(connector, NULL);
2999                 aconnector->num_modes = 0;
3000                 dc_sink_release(aconnector->dc_sink);
3001                 aconnector->dc_sink = NULL;
3002                 aconnector->edid = NULL;
3003 #ifdef CONFIG_DRM_AMD_DC_HDCP
3004                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3005                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3006                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3007 #endif
3008         }
3009
3010         mutex_unlock(&dev->mode_config.mutex);
3011
3012         update_subconnector_property(aconnector);
3013
3014         if (sink)
3015                 dc_sink_release(sink);
3016 }
3017
3018 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3019 {
3020         struct drm_connector *connector = &aconnector->base;
3021         struct drm_device *dev = connector->dev;
3022         enum dc_connection_type new_connection_type = dc_connection_none;
3023         struct amdgpu_device *adev = drm_to_adev(dev);
3024         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3025         struct dm_crtc_state *dm_crtc_state = NULL;
3026
3027         if (adev->dm.disable_hpd_irq)
3028                 return;
3029
3030         if (dm_con_state->base.state && dm_con_state->base.crtc)
3031                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3032                                         dm_con_state->base.state,
3033                                         dm_con_state->base.crtc));
3034         /*
3035          * In case of failure or MST no need to update connector status or notify the OS
3036          * since (for MST case) MST does this in its own context.
3037          */
3038         mutex_lock(&aconnector->hpd_lock);
3039
3040 #ifdef CONFIG_DRM_AMD_DC_HDCP
3041         if (adev->dm.hdcp_workqueue) {
3042                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3043                 dm_con_state->update_hdcp = true;
3044         }
3045 #endif
3046         if (aconnector->fake_enable)
3047                 aconnector->fake_enable = false;
3048
3049         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3050                 DRM_ERROR("KMS: Failed to detect connector\n");
3051
3052         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3053                 emulated_link_detect(aconnector->dc_link);
3054
3055                 drm_modeset_lock_all(dev);
3056                 dm_restore_drm_connector_state(dev, connector);
3057                 drm_modeset_unlock_all(dev);
3058
3059                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3060                         drm_kms_helper_connector_hotplug_event(connector);
3061
3062         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3063                 if (new_connection_type == dc_connection_none &&
3064                     aconnector->dc_link->type == dc_connection_none &&
3065                     dm_crtc_state)
3066                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3067
3068                 amdgpu_dm_update_connector_after_detect(aconnector);
3069
3070                 drm_modeset_lock_all(dev);
3071                 dm_restore_drm_connector_state(dev, connector);
3072                 drm_modeset_unlock_all(dev);
3073
3074                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3075                         drm_kms_helper_connector_hotplug_event(connector);
3076         }
3077         mutex_unlock(&aconnector->hpd_lock);
3078
3079 }
3080
3081 static void handle_hpd_irq(void *param)
3082 {
3083         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3084
3085         handle_hpd_irq_helper(aconnector);
3086
3087 }
3088
3089 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3090 {
3091         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3092         uint8_t dret;
3093         bool new_irq_handled = false;
3094         int dpcd_addr;
3095         int dpcd_bytes_to_read;
3096
3097         const int max_process_count = 30;
3098         int process_count = 0;
3099
3100         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3101
3102         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3103                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3104                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3105                 dpcd_addr = DP_SINK_COUNT;
3106         } else {
3107                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3108                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3109                 dpcd_addr = DP_SINK_COUNT_ESI;
3110         }
3111
3112         dret = drm_dp_dpcd_read(
3113                 &aconnector->dm_dp_aux.aux,
3114                 dpcd_addr,
3115                 esi,
3116                 dpcd_bytes_to_read);
3117
3118         while (dret == dpcd_bytes_to_read &&
3119                 process_count < max_process_count) {
3120                 uint8_t retry;
3121                 dret = 0;
3122
3123                 process_count++;
3124
3125                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3126                 /* handle HPD short pulse irq */
3127                 if (aconnector->mst_mgr.mst_state)
3128                         drm_dp_mst_hpd_irq(
3129                                 &aconnector->mst_mgr,
3130                                 esi,
3131                                 &new_irq_handled);
3132
3133                 if (new_irq_handled) {
3134                         /* ACK at DPCD to notify down stream */
3135                         const int ack_dpcd_bytes_to_write =
3136                                 dpcd_bytes_to_read - 1;
3137
3138                         for (retry = 0; retry < 3; retry++) {
3139                                 uint8_t wret;
3140
3141                                 wret = drm_dp_dpcd_write(
3142                                         &aconnector->dm_dp_aux.aux,
3143                                         dpcd_addr + 1,
3144                                         &esi[1],
3145                                         ack_dpcd_bytes_to_write);
3146                                 if (wret == ack_dpcd_bytes_to_write)
3147                                         break;
3148                         }
3149
3150                         /* check if there is new irq to be handled */
3151                         dret = drm_dp_dpcd_read(
3152                                 &aconnector->dm_dp_aux.aux,
3153                                 dpcd_addr,
3154                                 esi,
3155                                 dpcd_bytes_to_read);
3156
3157                         new_irq_handled = false;
3158                 } else {
3159                         break;
3160                 }
3161         }
3162
3163         if (process_count == max_process_count)
3164                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3165 }
3166
3167 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3168                                                         union hpd_irq_data hpd_irq_data)
3169 {
3170         struct hpd_rx_irq_offload_work *offload_work =
3171                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3172
3173         if (!offload_work) {
3174                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3175                 return;
3176         }
3177
3178         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3179         offload_work->data = hpd_irq_data;
3180         offload_work->offload_wq = offload_wq;
3181
3182         queue_work(offload_wq->wq, &offload_work->work);
3183         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3184 }
3185
3186 static void handle_hpd_rx_irq(void *param)
3187 {
3188         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3189         struct drm_connector *connector = &aconnector->base;
3190         struct drm_device *dev = connector->dev;
3191         struct dc_link *dc_link = aconnector->dc_link;
3192         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3193         bool result = false;
3194         enum dc_connection_type new_connection_type = dc_connection_none;
3195         struct amdgpu_device *adev = drm_to_adev(dev);
3196         union hpd_irq_data hpd_irq_data;
3197         bool link_loss = false;
3198         bool has_left_work = false;
3199         int idx = aconnector->base.index;
3200         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3201
3202         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3203
3204         if (adev->dm.disable_hpd_irq)
3205                 return;
3206
3207         /*
3208          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3209          * conflict, after implement i2c helper, this mutex should be
3210          * retired.
3211          */
3212         mutex_lock(&aconnector->hpd_lock);
3213
3214         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3215                                                 &link_loss, true, &has_left_work);
3216
3217         if (!has_left_work)
3218                 goto out;
3219
3220         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3221                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3222                 goto out;
3223         }
3224
3225         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3226                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3227                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3228                         dm_handle_mst_sideband_msg(aconnector);
3229                         goto out;
3230                 }
3231
3232                 if (link_loss) {
3233                         bool skip = false;
3234
3235                         spin_lock(&offload_wq->offload_lock);
3236                         skip = offload_wq->is_handling_link_loss;
3237
3238                         if (!skip)
3239                                 offload_wq->is_handling_link_loss = true;
3240
3241                         spin_unlock(&offload_wq->offload_lock);
3242
3243                         if (!skip)
3244                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3245
3246                         goto out;
3247                 }
3248         }
3249
3250 out:
3251         if (result && !is_mst_root_connector) {
3252                 /* Downstream Port status changed. */
3253                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3254                         DRM_ERROR("KMS: Failed to detect connector\n");
3255
3256                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3257                         emulated_link_detect(dc_link);
3258
3259                         if (aconnector->fake_enable)
3260                                 aconnector->fake_enable = false;
3261
3262                         amdgpu_dm_update_connector_after_detect(aconnector);
3263
3264
3265                         drm_modeset_lock_all(dev);
3266                         dm_restore_drm_connector_state(dev, connector);
3267                         drm_modeset_unlock_all(dev);
3268
3269                         drm_kms_helper_connector_hotplug_event(connector);
3270                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3271
3272                         if (aconnector->fake_enable)
3273                                 aconnector->fake_enable = false;
3274
3275                         amdgpu_dm_update_connector_after_detect(aconnector);
3276
3277
3278                         drm_modeset_lock_all(dev);
3279                         dm_restore_drm_connector_state(dev, connector);
3280                         drm_modeset_unlock_all(dev);
3281
3282                         drm_kms_helper_connector_hotplug_event(connector);
3283                 }
3284         }
3285 #ifdef CONFIG_DRM_AMD_DC_HDCP
3286         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3287                 if (adev->dm.hdcp_workqueue)
3288                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3289         }
3290 #endif
3291
3292         if (dc_link->type != dc_connection_mst_branch)
3293                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3294
3295         mutex_unlock(&aconnector->hpd_lock);
3296 }
3297
3298 static void register_hpd_handlers(struct amdgpu_device *adev)
3299 {
3300         struct drm_device *dev = adev_to_drm(adev);
3301         struct drm_connector *connector;
3302         struct amdgpu_dm_connector *aconnector;
3303         const struct dc_link *dc_link;
3304         struct dc_interrupt_params int_params = {0};
3305
3306         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3307         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3308
3309         list_for_each_entry(connector,
3310                         &dev->mode_config.connector_list, head) {
3311
3312                 aconnector = to_amdgpu_dm_connector(connector);
3313                 dc_link = aconnector->dc_link;
3314
3315                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3316                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3317                         int_params.irq_source = dc_link->irq_source_hpd;
3318
3319                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3320                                         handle_hpd_irq,
3321                                         (void *) aconnector);
3322                 }
3323
3324                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3325
3326                         /* Also register for DP short pulse (hpd_rx). */
3327                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3328                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3329
3330                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3331                                         handle_hpd_rx_irq,
3332                                         (void *) aconnector);
3333
3334                         if (adev->dm.hpd_rx_offload_wq)
3335                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3336                                         aconnector;
3337                 }
3338         }
3339 }
3340
3341 #if defined(CONFIG_DRM_AMD_DC_SI)
3342 /* Register IRQ sources and initialize IRQ callbacks */
3343 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3344 {
3345         struct dc *dc = adev->dm.dc;
3346         struct common_irq_params *c_irq_params;
3347         struct dc_interrupt_params int_params = {0};
3348         int r;
3349         int i;
3350         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3351
3352         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3353         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3354
3355         /*
3356          * Actions of amdgpu_irq_add_id():
3357          * 1. Register a set() function with base driver.
3358          *    Base driver will call set() function to enable/disable an
3359          *    interrupt in DC hardware.
3360          * 2. Register amdgpu_dm_irq_handler().
3361          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3362          *    coming from DC hardware.
3363          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3364          *    for acknowledging and handling. */
3365
3366         /* Use VBLANK interrupt */
3367         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3368                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3369                 if (r) {
3370                         DRM_ERROR("Failed to add crtc irq id!\n");
3371                         return r;
3372                 }
3373
3374                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3375                 int_params.irq_source =
3376                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3377
3378                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3379
3380                 c_irq_params->adev = adev;
3381                 c_irq_params->irq_src = int_params.irq_source;
3382
3383                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3384                                 dm_crtc_high_irq, c_irq_params);
3385         }
3386
3387         /* Use GRPH_PFLIP interrupt */
3388         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3389                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3390                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3391                 if (r) {
3392                         DRM_ERROR("Failed to add page flip irq id!\n");
3393                         return r;
3394                 }
3395
3396                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3397                 int_params.irq_source =
3398                         dc_interrupt_to_irq_source(dc, i, 0);
3399
3400                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3401
3402                 c_irq_params->adev = adev;
3403                 c_irq_params->irq_src = int_params.irq_source;
3404
3405                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3406                                 dm_pflip_high_irq, c_irq_params);
3407
3408         }
3409
3410         /* HPD */
3411         r = amdgpu_irq_add_id(adev, client_id,
3412                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3413         if (r) {
3414                 DRM_ERROR("Failed to add hpd irq id!\n");
3415                 return r;
3416         }
3417
3418         register_hpd_handlers(adev);
3419
3420         return 0;
3421 }
3422 #endif
3423
3424 /* Register IRQ sources and initialize IRQ callbacks */
3425 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3426 {
3427         struct dc *dc = adev->dm.dc;
3428         struct common_irq_params *c_irq_params;
3429         struct dc_interrupt_params int_params = {0};
3430         int r;
3431         int i;
3432         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3433
3434         if (adev->family >= AMDGPU_FAMILY_AI)
3435                 client_id = SOC15_IH_CLIENTID_DCE;
3436
3437         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3438         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3439
3440         /*
3441          * Actions of amdgpu_irq_add_id():
3442          * 1. Register a set() function with base driver.
3443          *    Base driver will call set() function to enable/disable an
3444          *    interrupt in DC hardware.
3445          * 2. Register amdgpu_dm_irq_handler().
3446          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3447          *    coming from DC hardware.
3448          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3449          *    for acknowledging and handling. */
3450
3451         /* Use VBLANK interrupt */
3452         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3453                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3454                 if (r) {
3455                         DRM_ERROR("Failed to add crtc irq id!\n");
3456                         return r;
3457                 }
3458
3459                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3460                 int_params.irq_source =
3461                         dc_interrupt_to_irq_source(dc, i, 0);
3462
3463                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3464
3465                 c_irq_params->adev = adev;
3466                 c_irq_params->irq_src = int_params.irq_source;
3467
3468                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3469                                 dm_crtc_high_irq, c_irq_params);
3470         }
3471
3472         /* Use VUPDATE interrupt */
3473         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3474                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3475                 if (r) {
3476                         DRM_ERROR("Failed to add vupdate irq id!\n");
3477                         return r;
3478                 }
3479
3480                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3481                 int_params.irq_source =
3482                         dc_interrupt_to_irq_source(dc, i, 0);
3483
3484                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3485
3486                 c_irq_params->adev = adev;
3487                 c_irq_params->irq_src = int_params.irq_source;
3488
3489                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3490                                 dm_vupdate_high_irq, c_irq_params);
3491         }
3492
3493         /* Use GRPH_PFLIP interrupt */
3494         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3495                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3496                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3497                 if (r) {
3498                         DRM_ERROR("Failed to add page flip irq id!\n");
3499                         return r;
3500                 }
3501
3502                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3503                 int_params.irq_source =
3504                         dc_interrupt_to_irq_source(dc, i, 0);
3505
3506                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3507
3508                 c_irq_params->adev = adev;
3509                 c_irq_params->irq_src = int_params.irq_source;
3510
3511                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3512                                 dm_pflip_high_irq, c_irq_params);
3513
3514         }
3515
3516         /* HPD */
3517         r = amdgpu_irq_add_id(adev, client_id,
3518                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3519         if (r) {
3520                 DRM_ERROR("Failed to add hpd irq id!\n");
3521                 return r;
3522         }
3523
3524         register_hpd_handlers(adev);
3525
3526         return 0;
3527 }
3528
3529 #if defined(CONFIG_DRM_AMD_DC_DCN)
3530 /* Register IRQ sources and initialize IRQ callbacks */
3531 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3532 {
3533         struct dc *dc = adev->dm.dc;
3534         struct common_irq_params *c_irq_params;
3535         struct dc_interrupt_params int_params = {0};
3536         int r;
3537         int i;
3538 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3539         static const unsigned int vrtl_int_srcid[] = {
3540                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3541                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3542                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3543                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3544                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3545                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3546         };
3547 #endif
3548
3549         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3550         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3551
3552         /*
3553          * Actions of amdgpu_irq_add_id():
3554          * 1. Register a set() function with base driver.
3555          *    Base driver will call set() function to enable/disable an
3556          *    interrupt in DC hardware.
3557          * 2. Register amdgpu_dm_irq_handler().
3558          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3559          *    coming from DC hardware.
3560          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3561          *    for acknowledging and handling.
3562          */
3563
3564         /* Use VSTARTUP interrupt */
3565         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3566                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3567                         i++) {
3568                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3569
3570                 if (r) {
3571                         DRM_ERROR("Failed to add crtc irq id!\n");
3572                         return r;
3573                 }
3574
3575                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3576                 int_params.irq_source =
3577                         dc_interrupt_to_irq_source(dc, i, 0);
3578
3579                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3580
3581                 c_irq_params->adev = adev;
3582                 c_irq_params->irq_src = int_params.irq_source;
3583
3584                 amdgpu_dm_irq_register_interrupt(
3585                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3586         }
3587
3588         /* Use otg vertical line interrupt */
3589 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3590         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3591                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3592                                 vrtl_int_srcid[i], &adev->vline0_irq);
3593
3594                 if (r) {
3595                         DRM_ERROR("Failed to add vline0 irq id!\n");
3596                         return r;
3597                 }
3598
3599                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3600                 int_params.irq_source =
3601                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3602
3603                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3604                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3605                         break;
3606                 }
3607
3608                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3609                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3610
3611                 c_irq_params->adev = adev;
3612                 c_irq_params->irq_src = int_params.irq_source;
3613
3614                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3615                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3616         }
3617 #endif
3618
3619         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3620          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3621          * to trigger at end of each vblank, regardless of state of the lock,
3622          * matching DCE behaviour.
3623          */
3624         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3625              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3626              i++) {
3627                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3628
3629                 if (r) {
3630                         DRM_ERROR("Failed to add vupdate irq id!\n");
3631                         return r;
3632                 }
3633
3634                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3635                 int_params.irq_source =
3636                         dc_interrupt_to_irq_source(dc, i, 0);
3637
3638                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3639
3640                 c_irq_params->adev = adev;
3641                 c_irq_params->irq_src = int_params.irq_source;
3642
3643                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3644                                 dm_vupdate_high_irq, c_irq_params);
3645         }
3646
3647         /* Use GRPH_PFLIP interrupt */
3648         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3649                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3650                         i++) {
3651                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3652                 if (r) {
3653                         DRM_ERROR("Failed to add page flip irq id!\n");
3654                         return r;
3655                 }
3656
3657                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3658                 int_params.irq_source =
3659                         dc_interrupt_to_irq_source(dc, i, 0);
3660
3661                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3662
3663                 c_irq_params->adev = adev;
3664                 c_irq_params->irq_src = int_params.irq_source;
3665
3666                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3667                                 dm_pflip_high_irq, c_irq_params);
3668
3669         }
3670
3671         /* HPD */
3672         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3673                         &adev->hpd_irq);
3674         if (r) {
3675                 DRM_ERROR("Failed to add hpd irq id!\n");
3676                 return r;
3677         }
3678
3679         register_hpd_handlers(adev);
3680
3681         return 0;
3682 }
3683 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3684 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3685 {
3686         struct dc *dc = adev->dm.dc;
3687         struct common_irq_params *c_irq_params;
3688         struct dc_interrupt_params int_params = {0};
3689         int r, i;
3690
3691         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3692         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3693
3694         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3695                         &adev->dmub_outbox_irq);
3696         if (r) {
3697                 DRM_ERROR("Failed to add outbox irq id!\n");
3698                 return r;
3699         }
3700
3701         if (dc->ctx->dmub_srv) {
3702                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3703                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3704                 int_params.irq_source =
3705                 dc_interrupt_to_irq_source(dc, i, 0);
3706
3707                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3708
3709                 c_irq_params->adev = adev;
3710                 c_irq_params->irq_src = int_params.irq_source;
3711
3712                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3713                                 dm_dmub_outbox1_low_irq, c_irq_params);
3714         }
3715
3716         return 0;
3717 }
3718 #endif
3719
3720 /*
3721  * Acquires the lock for the atomic state object and returns
3722  * the new atomic state.
3723  *
3724  * This should only be called during atomic check.
3725  */
3726 int dm_atomic_get_state(struct drm_atomic_state *state,
3727                         struct dm_atomic_state **dm_state)
3728 {
3729         struct drm_device *dev = state->dev;
3730         struct amdgpu_device *adev = drm_to_adev(dev);
3731         struct amdgpu_display_manager *dm = &adev->dm;
3732         struct drm_private_state *priv_state;
3733
3734         if (*dm_state)
3735                 return 0;
3736
3737         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3738         if (IS_ERR(priv_state))
3739                 return PTR_ERR(priv_state);
3740
3741         *dm_state = to_dm_atomic_state(priv_state);
3742
3743         return 0;
3744 }
3745
3746 static struct dm_atomic_state *
3747 dm_atomic_get_new_state(struct drm_atomic_state *state)
3748 {
3749         struct drm_device *dev = state->dev;
3750         struct amdgpu_device *adev = drm_to_adev(dev);
3751         struct amdgpu_display_manager *dm = &adev->dm;
3752         struct drm_private_obj *obj;
3753         struct drm_private_state *new_obj_state;
3754         int i;
3755
3756         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3757                 if (obj->funcs == dm->atomic_obj.funcs)
3758                         return to_dm_atomic_state(new_obj_state);
3759         }
3760
3761         return NULL;
3762 }
3763
3764 static struct drm_private_state *
3765 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3766 {
3767         struct dm_atomic_state *old_state, *new_state;
3768
3769         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3770         if (!new_state)
3771                 return NULL;
3772
3773         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3774
3775         old_state = to_dm_atomic_state(obj->state);
3776
3777         if (old_state && old_state->context)
3778                 new_state->context = dc_copy_state(old_state->context);
3779
3780         if (!new_state->context) {
3781                 kfree(new_state);
3782                 return NULL;
3783         }
3784
3785         return &new_state->base;
3786 }
3787
3788 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3789                                     struct drm_private_state *state)
3790 {
3791         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3792
3793         if (dm_state && dm_state->context)
3794                 dc_release_state(dm_state->context);
3795
3796         kfree(dm_state);
3797 }
3798
3799 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3800         .atomic_duplicate_state = dm_atomic_duplicate_state,
3801         .atomic_destroy_state = dm_atomic_destroy_state,
3802 };
3803
3804 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3805 {
3806         struct dm_atomic_state *state;
3807         int r;
3808
3809         adev->mode_info.mode_config_initialized = true;
3810
3811         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3812         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3813
3814         adev_to_drm(adev)->mode_config.max_width = 16384;
3815         adev_to_drm(adev)->mode_config.max_height = 16384;
3816
3817         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3818         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3819         /* indicates support for immediate flip */
3820         adev_to_drm(adev)->mode_config.async_page_flip = true;
3821
3822         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3823
3824         state = kzalloc(sizeof(*state), GFP_KERNEL);
3825         if (!state)
3826                 return -ENOMEM;
3827
3828         state->context = dc_create_state(adev->dm.dc);
3829         if (!state->context) {
3830                 kfree(state);
3831                 return -ENOMEM;
3832         }
3833
3834         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3835
3836         drm_atomic_private_obj_init(adev_to_drm(adev),
3837                                     &adev->dm.atomic_obj,
3838                                     &state->base,
3839                                     &dm_atomic_state_funcs);
3840
3841         r = amdgpu_display_modeset_create_props(adev);
3842         if (r) {
3843                 dc_release_state(state->context);
3844                 kfree(state);
3845                 return r;
3846         }
3847
3848         r = amdgpu_dm_audio_init(adev);
3849         if (r) {
3850                 dc_release_state(state->context);
3851                 kfree(state);
3852                 return r;
3853         }
3854
3855         return 0;
3856 }
3857
3858 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3859 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3860 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3861
3862 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3863         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3864
3865 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3866                                             int bl_idx)
3867 {
3868 #if defined(CONFIG_ACPI)
3869         struct amdgpu_dm_backlight_caps caps;
3870
3871         memset(&caps, 0, sizeof(caps));
3872
3873         if (dm->backlight_caps[bl_idx].caps_valid)
3874                 return;
3875
3876         amdgpu_acpi_get_backlight_caps(&caps);
3877         if (caps.caps_valid) {
3878                 dm->backlight_caps[bl_idx].caps_valid = true;
3879                 if (caps.aux_support)
3880                         return;
3881                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3882                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3883         } else {
3884                 dm->backlight_caps[bl_idx].min_input_signal =
3885                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3886                 dm->backlight_caps[bl_idx].max_input_signal =
3887                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3888         }
3889 #else
3890         if (dm->backlight_caps[bl_idx].aux_support)
3891                 return;
3892
3893         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3894         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3895 #endif
3896 }
3897
3898 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3899                                 unsigned *min, unsigned *max)
3900 {
3901         if (!caps)
3902                 return 0;
3903
3904         if (caps->aux_support) {
3905                 // Firmware limits are in nits, DC API wants millinits.
3906                 *max = 1000 * caps->aux_max_input_signal;
3907                 *min = 1000 * caps->aux_min_input_signal;
3908         } else {
3909                 // Firmware limits are 8-bit, PWM control is 16-bit.
3910                 *max = 0x101 * caps->max_input_signal;
3911                 *min = 0x101 * caps->min_input_signal;
3912         }
3913         return 1;
3914 }
3915
3916 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3917                                         uint32_t brightness)
3918 {
3919         unsigned min, max;
3920
3921         if (!get_brightness_range(caps, &min, &max))
3922                 return brightness;
3923
3924         // Rescale 0..255 to min..max
3925         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3926                                        AMDGPU_MAX_BL_LEVEL);
3927 }
3928
3929 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3930                                       uint32_t brightness)
3931 {
3932         unsigned min, max;
3933
3934         if (!get_brightness_range(caps, &min, &max))
3935                 return brightness;
3936
3937         if (brightness < min)
3938                 return 0;
3939         // Rescale min..max to 0..255
3940         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3941                                  max - min);
3942 }
3943
3944 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3945                                          int bl_idx,
3946                                          u32 user_brightness)
3947 {
3948         struct amdgpu_dm_backlight_caps caps;
3949         struct dc_link *link;
3950         u32 brightness;
3951         bool rc;
3952
3953         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3954         caps = dm->backlight_caps[bl_idx];
3955
3956         dm->brightness[bl_idx] = user_brightness;
3957         /* update scratch register */
3958         if (bl_idx == 0)
3959                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3960         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3961         link = (struct dc_link *)dm->backlight_link[bl_idx];
3962
3963         /* Change brightness based on AUX property */
3964         if (caps.aux_support) {
3965                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3966                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3967                 if (!rc)
3968                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3969         } else {
3970                 rc = dc_link_set_backlight_level(link, brightness, 0);
3971                 if (!rc)
3972                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3973         }
3974
3975         return rc ? 0 : 1;
3976 }
3977
3978 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3979 {
3980         struct amdgpu_display_manager *dm = bl_get_data(bd);
3981         int i;
3982
3983         for (i = 0; i < dm->num_of_edps; i++) {
3984                 if (bd == dm->backlight_dev[i])
3985                         break;
3986         }
3987         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3988                 i = 0;
3989         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3990
3991         return 0;
3992 }
3993
3994 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3995                                          int bl_idx)
3996 {
3997         struct amdgpu_dm_backlight_caps caps;
3998         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3999
4000         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4001         caps = dm->backlight_caps[bl_idx];
4002
4003         if (caps.aux_support) {
4004                 u32 avg, peak;
4005                 bool rc;
4006
4007                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4008                 if (!rc)
4009                         return dm->brightness[bl_idx];
4010                 return convert_brightness_to_user(&caps, avg);
4011         } else {
4012                 int ret = dc_link_get_backlight_level(link);
4013
4014                 if (ret == DC_ERROR_UNEXPECTED)
4015                         return dm->brightness[bl_idx];
4016                 return convert_brightness_to_user(&caps, ret);
4017         }
4018 }
4019
4020 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4021 {
4022         struct amdgpu_display_manager *dm = bl_get_data(bd);
4023         int i;
4024
4025         for (i = 0; i < dm->num_of_edps; i++) {
4026                 if (bd == dm->backlight_dev[i])
4027                         break;
4028         }
4029         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4030                 i = 0;
4031         return amdgpu_dm_backlight_get_level(dm, i);
4032 }
4033
4034 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4035         .options = BL_CORE_SUSPENDRESUME,
4036         .get_brightness = amdgpu_dm_backlight_get_brightness,
4037         .update_status  = amdgpu_dm_backlight_update_status,
4038 };
4039
4040 static void
4041 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4042 {
4043         char bl_name[16];
4044         struct backlight_properties props = { 0 };
4045
4046         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4047         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4048
4049         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4050         props.brightness = AMDGPU_MAX_BL_LEVEL;
4051         props.type = BACKLIGHT_RAW;
4052
4053         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4054                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4055
4056         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4057                                                                        adev_to_drm(dm->adev)->dev,
4058                                                                        dm,
4059                                                                        &amdgpu_dm_backlight_ops,
4060                                                                        &props);
4061
4062         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4063                 DRM_ERROR("DM: Backlight registration failed!\n");
4064         else
4065                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4066 }
4067 #endif
4068
4069 static int initialize_plane(struct amdgpu_display_manager *dm,
4070                             struct amdgpu_mode_info *mode_info, int plane_id,
4071                             enum drm_plane_type plane_type,
4072                             const struct dc_plane_cap *plane_cap)
4073 {
4074         struct drm_plane *plane;
4075         unsigned long possible_crtcs;
4076         int ret = 0;
4077
4078         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4079         if (!plane) {
4080                 DRM_ERROR("KMS: Failed to allocate plane\n");
4081                 return -ENOMEM;
4082         }
4083         plane->type = plane_type;
4084
4085         /*
4086          * HACK: IGT tests expect that the primary plane for a CRTC
4087          * can only have one possible CRTC. Only expose support for
4088          * any CRTC if they're not going to be used as a primary plane
4089          * for a CRTC - like overlay or underlay planes.
4090          */
4091         possible_crtcs = 1 << plane_id;
4092         if (plane_id >= dm->dc->caps.max_streams)
4093                 possible_crtcs = 0xff;
4094
4095         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4096
4097         if (ret) {
4098                 DRM_ERROR("KMS: Failed to initialize plane\n");
4099                 kfree(plane);
4100                 return ret;
4101         }
4102
4103         if (mode_info)
4104                 mode_info->planes[plane_id] = plane;
4105
4106         return ret;
4107 }
4108
4109
4110 static void register_backlight_device(struct amdgpu_display_manager *dm,
4111                                       struct dc_link *link)
4112 {
4113 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4114         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4115
4116         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4117             link->type != dc_connection_none) {
4118                 /*
4119                  * Event if registration failed, we should continue with
4120                  * DM initialization because not having a backlight control
4121                  * is better then a black screen.
4122                  */
4123                 if (!dm->backlight_dev[dm->num_of_edps])
4124                         amdgpu_dm_register_backlight_device(dm);
4125
4126                 if (dm->backlight_dev[dm->num_of_edps]) {
4127                         dm->backlight_link[dm->num_of_edps] = link;
4128                         dm->num_of_edps++;
4129                 }
4130         }
4131 #endif
4132 }
4133
4134
4135 /*
4136  * In this architecture, the association
4137  * connector -> encoder -> crtc
4138  * id not really requried. The crtc and connector will hold the
4139  * display_index as an abstraction to use with DAL component
4140  *
4141  * Returns 0 on success
4142  */
4143 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4144 {
4145         struct amdgpu_display_manager *dm = &adev->dm;
4146         int32_t i;
4147         struct amdgpu_dm_connector *aconnector = NULL;
4148         struct amdgpu_encoder *aencoder = NULL;
4149         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4150         uint32_t link_cnt;
4151         int32_t primary_planes;
4152         enum dc_connection_type new_connection_type = dc_connection_none;
4153         const struct dc_plane_cap *plane;
4154         bool psr_feature_enabled = false;
4155
4156         dm->display_indexes_num = dm->dc->caps.max_streams;
4157         /* Update the actual used number of crtc */
4158         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4159
4160         link_cnt = dm->dc->caps.max_links;
4161         if (amdgpu_dm_mode_config_init(dm->adev)) {
4162                 DRM_ERROR("DM: Failed to initialize mode config\n");
4163                 return -EINVAL;
4164         }
4165
4166         /* There is one primary plane per CRTC */
4167         primary_planes = dm->dc->caps.max_streams;
4168         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4169
4170         /*
4171          * Initialize primary planes, implicit planes for legacy IOCTLS.
4172          * Order is reversed to match iteration order in atomic check.
4173          */
4174         for (i = (primary_planes - 1); i >= 0; i--) {
4175                 plane = &dm->dc->caps.planes[i];
4176
4177                 if (initialize_plane(dm, mode_info, i,
4178                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4179                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4180                         goto fail;
4181                 }
4182         }
4183
4184         /*
4185          * Initialize overlay planes, index starting after primary planes.
4186          * These planes have a higher DRM index than the primary planes since
4187          * they should be considered as having a higher z-order.
4188          * Order is reversed to match iteration order in atomic check.
4189          *
4190          * Only support DCN for now, and only expose one so we don't encourage
4191          * userspace to use up all the pipes.
4192          */
4193         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4194                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4195
4196                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4197                         continue;
4198
4199                 if (!plane->blends_with_above || !plane->blends_with_below)
4200                         continue;
4201
4202                 if (!plane->pixel_format_support.argb8888)
4203                         continue;
4204
4205                 if (initialize_plane(dm, NULL, primary_planes + i,
4206                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4207                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4208                         goto fail;
4209                 }
4210
4211                 /* Only create one overlay plane. */
4212                 break;
4213         }
4214
4215         for (i = 0; i < dm->dc->caps.max_streams; i++)
4216                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4217                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4218                         goto fail;
4219                 }
4220
4221 #if defined(CONFIG_DRM_AMD_DC_DCN)
4222         /* Use Outbox interrupt */
4223         switch (adev->ip_versions[DCE_HWIP][0]) {
4224         case IP_VERSION(3, 0, 0):
4225         case IP_VERSION(3, 1, 2):
4226         case IP_VERSION(3, 1, 3):
4227         case IP_VERSION(2, 1, 0):
4228                 if (register_outbox_irq_handlers(dm->adev)) {
4229                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4230                         goto fail;
4231                 }
4232                 break;
4233         default:
4234                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4235                               adev->ip_versions[DCE_HWIP][0]);
4236         }
4237
4238         /* Determine whether to enable PSR support by default. */
4239         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4240                 switch (adev->ip_versions[DCE_HWIP][0]) {
4241                 case IP_VERSION(3, 1, 2):
4242                 case IP_VERSION(3, 1, 3):
4243                         psr_feature_enabled = true;
4244                         break;
4245                 default:
4246                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4247                         break;
4248                 }
4249         }
4250 #endif
4251
4252         /* Disable vblank IRQs aggressively for power-saving. */
4253         adev_to_drm(adev)->vblank_disable_immediate = true;
4254
4255         /* loops over all connectors on the board */
4256         for (i = 0; i < link_cnt; i++) {
4257                 struct dc_link *link = NULL;
4258
4259                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4260                         DRM_ERROR(
4261                                 "KMS: Cannot support more than %d display indexes\n",
4262                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4263                         continue;
4264                 }
4265
4266                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4267                 if (!aconnector)
4268                         goto fail;
4269
4270                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4271                 if (!aencoder)
4272                         goto fail;
4273
4274                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4275                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4276                         goto fail;
4277                 }
4278
4279                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4280                         DRM_ERROR("KMS: Failed to initialize connector\n");
4281                         goto fail;
4282                 }
4283
4284                 link = dc_get_link_at_index(dm->dc, i);
4285
4286                 if (!dc_link_detect_sink(link, &new_connection_type))
4287                         DRM_ERROR("KMS: Failed to detect connector\n");
4288
4289                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4290                         emulated_link_detect(link);
4291                         amdgpu_dm_update_connector_after_detect(aconnector);
4292
4293                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4294                         amdgpu_dm_update_connector_after_detect(aconnector);
4295                         register_backlight_device(dm, link);
4296                         if (dm->num_of_edps)
4297                                 update_connector_ext_caps(aconnector);
4298                         if (psr_feature_enabled)
4299                                 amdgpu_dm_set_psr_caps(link);
4300
4301                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4302                          * PSR is also supported.
4303                          */
4304                         if (link->psr_settings.psr_feature_enabled)
4305                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4306                 }
4307
4308
4309         }
4310
4311         /* Software is initialized. Now we can register interrupt handlers. */
4312         switch (adev->asic_type) {
4313 #if defined(CONFIG_DRM_AMD_DC_SI)
4314         case CHIP_TAHITI:
4315         case CHIP_PITCAIRN:
4316         case CHIP_VERDE:
4317         case CHIP_OLAND:
4318                 if (dce60_register_irq_handlers(dm->adev)) {
4319                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4320                         goto fail;
4321                 }
4322                 break;
4323 #endif
4324         case CHIP_BONAIRE:
4325         case CHIP_HAWAII:
4326         case CHIP_KAVERI:
4327         case CHIP_KABINI:
4328         case CHIP_MULLINS:
4329         case CHIP_TONGA:
4330         case CHIP_FIJI:
4331         case CHIP_CARRIZO:
4332         case CHIP_STONEY:
4333         case CHIP_POLARIS11:
4334         case CHIP_POLARIS10:
4335         case CHIP_POLARIS12:
4336         case CHIP_VEGAM:
4337         case CHIP_VEGA10:
4338         case CHIP_VEGA12:
4339         case CHIP_VEGA20:
4340                 if (dce110_register_irq_handlers(dm->adev)) {
4341                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4342                         goto fail;
4343                 }
4344                 break;
4345         default:
4346 #if defined(CONFIG_DRM_AMD_DC_DCN)
4347                 switch (adev->ip_versions[DCE_HWIP][0]) {
4348                 case IP_VERSION(1, 0, 0):
4349                 case IP_VERSION(1, 0, 1):
4350                 case IP_VERSION(2, 0, 2):
4351                 case IP_VERSION(2, 0, 3):
4352                 case IP_VERSION(2, 0, 0):
4353                 case IP_VERSION(2, 1, 0):
4354                 case IP_VERSION(3, 0, 0):
4355                 case IP_VERSION(3, 0, 2):
4356                 case IP_VERSION(3, 0, 3):
4357                 case IP_VERSION(3, 0, 1):
4358                 case IP_VERSION(3, 1, 2):
4359                 case IP_VERSION(3, 1, 3):
4360                         if (dcn10_register_irq_handlers(dm->adev)) {
4361                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4362                                 goto fail;
4363                         }
4364                         break;
4365                 default:
4366                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4367                                         adev->ip_versions[DCE_HWIP][0]);
4368                         goto fail;
4369                 }
4370 #endif
4371                 break;
4372         }
4373
4374         return 0;
4375 fail:
4376         kfree(aencoder);
4377         kfree(aconnector);
4378
4379         return -EINVAL;
4380 }
4381
4382 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4383 {
4384         drm_atomic_private_obj_fini(&dm->atomic_obj);
4385         return;
4386 }
4387
4388 /******************************************************************************
4389  * amdgpu_display_funcs functions
4390  *****************************************************************************/
4391
4392 /*
4393  * dm_bandwidth_update - program display watermarks
4394  *
4395  * @adev: amdgpu_device pointer
4396  *
4397  * Calculate and program the display watermarks and line buffer allocation.
4398  */
4399 static void dm_bandwidth_update(struct amdgpu_device *adev)
4400 {
4401         /* TODO: implement later */
4402 }
4403
4404 static const struct amdgpu_display_funcs dm_display_funcs = {
4405         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4406         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4407         .backlight_set_level = NULL, /* never called for DC */
4408         .backlight_get_level = NULL, /* never called for DC */
4409         .hpd_sense = NULL,/* called unconditionally */
4410         .hpd_set_polarity = NULL, /* called unconditionally */
4411         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4412         .page_flip_get_scanoutpos =
4413                 dm_crtc_get_scanoutpos,/* called unconditionally */
4414         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4415         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4416 };
4417
4418 #if defined(CONFIG_DEBUG_KERNEL_DC)
4419
4420 static ssize_t s3_debug_store(struct device *device,
4421                               struct device_attribute *attr,
4422                               const char *buf,
4423                               size_t count)
4424 {
4425         int ret;
4426         int s3_state;
4427         struct drm_device *drm_dev = dev_get_drvdata(device);
4428         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4429
4430         ret = kstrtoint(buf, 0, &s3_state);
4431
4432         if (ret == 0) {
4433                 if (s3_state) {
4434                         dm_resume(adev);
4435                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4436                 } else
4437                         dm_suspend(adev);
4438         }
4439
4440         return ret == 0 ? count : 0;
4441 }
4442
4443 DEVICE_ATTR_WO(s3_debug);
4444
4445 #endif
4446
4447 static int dm_early_init(void *handle)
4448 {
4449         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4450
4451         switch (adev->asic_type) {
4452 #if defined(CONFIG_DRM_AMD_DC_SI)
4453         case CHIP_TAHITI:
4454         case CHIP_PITCAIRN:
4455         case CHIP_VERDE:
4456                 adev->mode_info.num_crtc = 6;
4457                 adev->mode_info.num_hpd = 6;
4458                 adev->mode_info.num_dig = 6;
4459                 break;
4460         case CHIP_OLAND:
4461                 adev->mode_info.num_crtc = 2;
4462                 adev->mode_info.num_hpd = 2;
4463                 adev->mode_info.num_dig = 2;
4464                 break;
4465 #endif
4466         case CHIP_BONAIRE:
4467         case CHIP_HAWAII:
4468                 adev->mode_info.num_crtc = 6;
4469                 adev->mode_info.num_hpd = 6;
4470                 adev->mode_info.num_dig = 6;
4471                 break;
4472         case CHIP_KAVERI:
4473                 adev->mode_info.num_crtc = 4;
4474                 adev->mode_info.num_hpd = 6;
4475                 adev->mode_info.num_dig = 7;
4476                 break;
4477         case CHIP_KABINI:
4478         case CHIP_MULLINS:
4479                 adev->mode_info.num_crtc = 2;
4480                 adev->mode_info.num_hpd = 6;
4481                 adev->mode_info.num_dig = 6;
4482                 break;
4483         case CHIP_FIJI:
4484         case CHIP_TONGA:
4485                 adev->mode_info.num_crtc = 6;
4486                 adev->mode_info.num_hpd = 6;
4487                 adev->mode_info.num_dig = 7;
4488                 break;
4489         case CHIP_CARRIZO:
4490                 adev->mode_info.num_crtc = 3;
4491                 adev->mode_info.num_hpd = 6;
4492                 adev->mode_info.num_dig = 9;
4493                 break;
4494         case CHIP_STONEY:
4495                 adev->mode_info.num_crtc = 2;
4496                 adev->mode_info.num_hpd = 6;
4497                 adev->mode_info.num_dig = 9;
4498                 break;
4499         case CHIP_POLARIS11:
4500         case CHIP_POLARIS12:
4501                 adev->mode_info.num_crtc = 5;
4502                 adev->mode_info.num_hpd = 5;
4503                 adev->mode_info.num_dig = 5;
4504                 break;
4505         case CHIP_POLARIS10:
4506         case CHIP_VEGAM:
4507                 adev->mode_info.num_crtc = 6;
4508                 adev->mode_info.num_hpd = 6;
4509                 adev->mode_info.num_dig = 6;
4510                 break;
4511         case CHIP_VEGA10:
4512         case CHIP_VEGA12:
4513         case CHIP_VEGA20:
4514                 adev->mode_info.num_crtc = 6;
4515                 adev->mode_info.num_hpd = 6;
4516                 adev->mode_info.num_dig = 6;
4517                 break;
4518         default:
4519 #if defined(CONFIG_DRM_AMD_DC_DCN)
4520                 switch (adev->ip_versions[DCE_HWIP][0]) {
4521                 case IP_VERSION(2, 0, 2):
4522                 case IP_VERSION(3, 0, 0):
4523                         adev->mode_info.num_crtc = 6;
4524                         adev->mode_info.num_hpd = 6;
4525                         adev->mode_info.num_dig = 6;
4526                         break;
4527                 case IP_VERSION(2, 0, 0):
4528                 case IP_VERSION(3, 0, 2):
4529                         adev->mode_info.num_crtc = 5;
4530                         adev->mode_info.num_hpd = 5;
4531                         adev->mode_info.num_dig = 5;
4532                         break;
4533                 case IP_VERSION(2, 0, 3):
4534                 case IP_VERSION(3, 0, 3):
4535                         adev->mode_info.num_crtc = 2;
4536                         adev->mode_info.num_hpd = 2;
4537                         adev->mode_info.num_dig = 2;
4538                         break;
4539                 case IP_VERSION(1, 0, 0):
4540                 case IP_VERSION(1, 0, 1):
4541                 case IP_VERSION(3, 0, 1):
4542                 case IP_VERSION(2, 1, 0):
4543                 case IP_VERSION(3, 1, 2):
4544                 case IP_VERSION(3, 1, 3):
4545                         adev->mode_info.num_crtc = 4;
4546                         adev->mode_info.num_hpd = 4;
4547                         adev->mode_info.num_dig = 4;
4548                         break;
4549                 default:
4550                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4551                                         adev->ip_versions[DCE_HWIP][0]);
4552                         return -EINVAL;
4553                 }
4554 #endif
4555                 break;
4556         }
4557
4558         amdgpu_dm_set_irq_funcs(adev);
4559
4560         if (adev->mode_info.funcs == NULL)
4561                 adev->mode_info.funcs = &dm_display_funcs;
4562
4563         /*
4564          * Note: Do NOT change adev->audio_endpt_rreg and
4565          * adev->audio_endpt_wreg because they are initialised in
4566          * amdgpu_device_init()
4567          */
4568 #if defined(CONFIG_DEBUG_KERNEL_DC)
4569         device_create_file(
4570                 adev_to_drm(adev)->dev,
4571                 &dev_attr_s3_debug);
4572 #endif
4573
4574         return 0;
4575 }
4576
4577 static bool modeset_required(struct drm_crtc_state *crtc_state,
4578                              struct dc_stream_state *new_stream,
4579                              struct dc_stream_state *old_stream)
4580 {
4581         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4582 }
4583
4584 static bool modereset_required(struct drm_crtc_state *crtc_state)
4585 {
4586         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4587 }
4588
4589 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4590 {
4591         drm_encoder_cleanup(encoder);
4592         kfree(encoder);
4593 }
4594
4595 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4596         .destroy = amdgpu_dm_encoder_destroy,
4597 };
4598
4599
4600 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4601                                          struct drm_framebuffer *fb,
4602                                          int *min_downscale, int *max_upscale)
4603 {
4604         struct amdgpu_device *adev = drm_to_adev(dev);
4605         struct dc *dc = adev->dm.dc;
4606         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4607         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4608
4609         switch (fb->format->format) {
4610         case DRM_FORMAT_P010:
4611         case DRM_FORMAT_NV12:
4612         case DRM_FORMAT_NV21:
4613                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4614                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4615                 break;
4616
4617         case DRM_FORMAT_XRGB16161616F:
4618         case DRM_FORMAT_ARGB16161616F:
4619         case DRM_FORMAT_XBGR16161616F:
4620         case DRM_FORMAT_ABGR16161616F:
4621                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4622                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4623                 break;
4624
4625         default:
4626                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4627                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4628                 break;
4629         }
4630
4631         /*
4632          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4633          * scaling factor of 1.0 == 1000 units.
4634          */
4635         if (*max_upscale == 1)
4636                 *max_upscale = 1000;
4637
4638         if (*min_downscale == 1)
4639                 *min_downscale = 1000;
4640 }
4641
4642
4643 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4644                                 const struct drm_plane_state *state,
4645                                 struct dc_scaling_info *scaling_info)
4646 {
4647         int scale_w, scale_h, min_downscale, max_upscale;
4648
4649         memset(scaling_info, 0, sizeof(*scaling_info));
4650
4651         /* Source is fixed 16.16 but we ignore mantissa for now... */
4652         scaling_info->src_rect.x = state->src_x >> 16;
4653         scaling_info->src_rect.y = state->src_y >> 16;
4654
4655         /*
4656          * For reasons we don't (yet) fully understand a non-zero
4657          * src_y coordinate into an NV12 buffer can cause a
4658          * system hang on DCN1x.
4659          * To avoid hangs (and maybe be overly cautious)
4660          * let's reject both non-zero src_x and src_y.
4661          *
4662          * We currently know of only one use-case to reproduce a
4663          * scenario with non-zero src_x and src_y for NV12, which
4664          * is to gesture the YouTube Android app into full screen
4665          * on ChromeOS.
4666          */
4667         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4668             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4669             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4670             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4671                 return -EINVAL;
4672
4673         scaling_info->src_rect.width = state->src_w >> 16;
4674         if (scaling_info->src_rect.width == 0)
4675                 return -EINVAL;
4676
4677         scaling_info->src_rect.height = state->src_h >> 16;
4678         if (scaling_info->src_rect.height == 0)
4679                 return -EINVAL;
4680
4681         scaling_info->dst_rect.x = state->crtc_x;
4682         scaling_info->dst_rect.y = state->crtc_y;
4683
4684         if (state->crtc_w == 0)
4685                 return -EINVAL;
4686
4687         scaling_info->dst_rect.width = state->crtc_w;
4688
4689         if (state->crtc_h == 0)
4690                 return -EINVAL;
4691
4692         scaling_info->dst_rect.height = state->crtc_h;
4693
4694         /* DRM doesn't specify clipping on destination output. */
4695         scaling_info->clip_rect = scaling_info->dst_rect;
4696
4697         /* Validate scaling per-format with DC plane caps */
4698         if (state->plane && state->plane->dev && state->fb) {
4699                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4700                                              &min_downscale, &max_upscale);
4701         } else {
4702                 min_downscale = 250;
4703                 max_upscale = 16000;
4704         }
4705
4706         scale_w = scaling_info->dst_rect.width * 1000 /
4707                   scaling_info->src_rect.width;
4708
4709         if (scale_w < min_downscale || scale_w > max_upscale)
4710                 return -EINVAL;
4711
4712         scale_h = scaling_info->dst_rect.height * 1000 /
4713                   scaling_info->src_rect.height;
4714
4715         if (scale_h < min_downscale || scale_h > max_upscale)
4716                 return -EINVAL;
4717
4718         /*
4719          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4720          * assume reasonable defaults based on the format.
4721          */
4722
4723         return 0;
4724 }
4725
4726 static void
4727 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4728                                  uint64_t tiling_flags)
4729 {
4730         /* Fill GFX8 params */
4731         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4732                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4733
4734                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4735                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4736                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4737                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4738                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4739
4740                 /* XXX fix me for VI */
4741                 tiling_info->gfx8.num_banks = num_banks;
4742                 tiling_info->gfx8.array_mode =
4743                                 DC_ARRAY_2D_TILED_THIN1;
4744                 tiling_info->gfx8.tile_split = tile_split;
4745                 tiling_info->gfx8.bank_width = bankw;
4746                 tiling_info->gfx8.bank_height = bankh;
4747                 tiling_info->gfx8.tile_aspect = mtaspect;
4748                 tiling_info->gfx8.tile_mode =
4749                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4750         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4751                         == DC_ARRAY_1D_TILED_THIN1) {
4752                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4753         }
4754
4755         tiling_info->gfx8.pipe_config =
4756                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4757 }
4758
4759 static void
4760 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4761                                   union dc_tiling_info *tiling_info)
4762 {
4763         tiling_info->gfx9.num_pipes =
4764                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4765         tiling_info->gfx9.num_banks =
4766                 adev->gfx.config.gb_addr_config_fields.num_banks;
4767         tiling_info->gfx9.pipe_interleave =
4768                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4769         tiling_info->gfx9.num_shader_engines =
4770                 adev->gfx.config.gb_addr_config_fields.num_se;
4771         tiling_info->gfx9.max_compressed_frags =
4772                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4773         tiling_info->gfx9.num_rb_per_se =
4774                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4775         tiling_info->gfx9.shaderEnable = 1;
4776         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4777                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4778 }
4779
4780 static int
4781 validate_dcc(struct amdgpu_device *adev,
4782              const enum surface_pixel_format format,
4783              const enum dc_rotation_angle rotation,
4784              const union dc_tiling_info *tiling_info,
4785              const struct dc_plane_dcc_param *dcc,
4786              const struct dc_plane_address *address,
4787              const struct plane_size *plane_size)
4788 {
4789         struct dc *dc = adev->dm.dc;
4790         struct dc_dcc_surface_param input;
4791         struct dc_surface_dcc_cap output;
4792
4793         memset(&input, 0, sizeof(input));
4794         memset(&output, 0, sizeof(output));
4795
4796         if (!dcc->enable)
4797                 return 0;
4798
4799         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4800             !dc->cap_funcs.get_dcc_compression_cap)
4801                 return -EINVAL;
4802
4803         input.format = format;
4804         input.surface_size.width = plane_size->surface_size.width;
4805         input.surface_size.height = plane_size->surface_size.height;
4806         input.swizzle_mode = tiling_info->gfx9.swizzle;
4807
4808         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4809                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4810         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4811                 input.scan = SCAN_DIRECTION_VERTICAL;
4812
4813         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4814                 return -EINVAL;
4815
4816         if (!output.capable)
4817                 return -EINVAL;
4818
4819         if (dcc->independent_64b_blks == 0 &&
4820             output.grph.rgb.independent_64b_blks != 0)
4821                 return -EINVAL;
4822
4823         return 0;
4824 }
4825
4826 static bool
4827 modifier_has_dcc(uint64_t modifier)
4828 {
4829         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4830 }
4831
4832 static unsigned
4833 modifier_gfx9_swizzle_mode(uint64_t modifier)
4834 {
4835         if (modifier == DRM_FORMAT_MOD_LINEAR)
4836                 return 0;
4837
4838         return AMD_FMT_MOD_GET(TILE, modifier);
4839 }
4840
4841 static const struct drm_format_info *
4842 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4843 {
4844         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4845 }
4846
4847 static void
4848 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4849                                     union dc_tiling_info *tiling_info,
4850                                     uint64_t modifier)
4851 {
4852         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4853         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4854         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4855         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4856
4857         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4858
4859         if (!IS_AMD_FMT_MOD(modifier))
4860                 return;
4861
4862         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4863         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4864
4865         if (adev->family >= AMDGPU_FAMILY_NV) {
4866                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4867         } else {
4868                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4869
4870                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4871         }
4872 }
4873
4874 enum dm_micro_swizzle {
4875         MICRO_SWIZZLE_Z = 0,
4876         MICRO_SWIZZLE_S = 1,
4877         MICRO_SWIZZLE_D = 2,
4878         MICRO_SWIZZLE_R = 3
4879 };
4880
4881 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4882                                           uint32_t format,
4883                                           uint64_t modifier)
4884 {
4885         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4886         const struct drm_format_info *info = drm_format_info(format);
4887         int i;
4888
4889         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4890
4891         if (!info)
4892                 return false;
4893
4894         /*
4895          * We always have to allow these modifiers:
4896          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4897          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4898          */
4899         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4900             modifier == DRM_FORMAT_MOD_INVALID) {
4901                 return true;
4902         }
4903
4904         /* Check that the modifier is on the list of the plane's supported modifiers. */
4905         for (i = 0; i < plane->modifier_count; i++) {
4906                 if (modifier == plane->modifiers[i])
4907                         break;
4908         }
4909         if (i == plane->modifier_count)
4910                 return false;
4911
4912         /*
4913          * For D swizzle the canonical modifier depends on the bpp, so check
4914          * it here.
4915          */
4916         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4917             adev->family >= AMDGPU_FAMILY_NV) {
4918                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4919                         return false;
4920         }
4921
4922         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4923             info->cpp[0] < 8)
4924                 return false;
4925
4926         if (modifier_has_dcc(modifier)) {
4927                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4928                 if (info->cpp[0] != 4)
4929                         return false;
4930                 /* We support multi-planar formats, but not when combined with
4931                  * additional DCC metadata planes. */
4932                 if (info->num_planes > 1)
4933                         return false;
4934         }
4935
4936         return true;
4937 }
4938
4939 static void
4940 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4941 {
4942         if (!*mods)
4943                 return;
4944
4945         if (*cap - *size < 1) {
4946                 uint64_t new_cap = *cap * 2;
4947                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4948
4949                 if (!new_mods) {
4950                         kfree(*mods);
4951                         *mods = NULL;
4952                         return;
4953                 }
4954
4955                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4956                 kfree(*mods);
4957                 *mods = new_mods;
4958                 *cap = new_cap;
4959         }
4960
4961         (*mods)[*size] = mod;
4962         *size += 1;
4963 }
4964
4965 static void
4966 add_gfx9_modifiers(const struct amdgpu_device *adev,
4967                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4968 {
4969         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4970         int pipe_xor_bits = min(8, pipes +
4971                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4972         int bank_xor_bits = min(8 - pipe_xor_bits,
4973                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4974         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4975                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4976
4977
4978         if (adev->family == AMDGPU_FAMILY_RV) {
4979                 /* Raven2 and later */
4980                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4981
4982                 /*
4983                  * No _D DCC swizzles yet because we only allow 32bpp, which
4984                  * doesn't support _D on DCN
4985                  */
4986
4987                 if (has_constant_encode) {
4988                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4989                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4990                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4991                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4992                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4993                                     AMD_FMT_MOD_SET(DCC, 1) |
4994                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4995                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4996                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4997                 }
4998
4999                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5001                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5002                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5004                             AMD_FMT_MOD_SET(DCC, 1) |
5005                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5006                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5007                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5008
5009                 if (has_constant_encode) {
5010                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5011                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5012                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5013                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5014                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5015                                     AMD_FMT_MOD_SET(DCC, 1) |
5016                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5017                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5018                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5019
5020                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5021                                     AMD_FMT_MOD_SET(RB, rb) |
5022                                     AMD_FMT_MOD_SET(PIPE, pipes));
5023                 }
5024
5025                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5026                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5027                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5028                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5029                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5030                             AMD_FMT_MOD_SET(DCC, 1) |
5031                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5032                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5033                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5034                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5035                             AMD_FMT_MOD_SET(RB, rb) |
5036                             AMD_FMT_MOD_SET(PIPE, pipes));
5037         }
5038
5039         /*
5040          * Only supported for 64bpp on Raven, will be filtered on format in
5041          * dm_plane_format_mod_supported.
5042          */
5043         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5044                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5045                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5046                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5047                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5048
5049         if (adev->family == AMDGPU_FAMILY_RV) {
5050                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5051                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5052                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5053                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5054                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5055         }
5056
5057         /*
5058          * Only supported for 64bpp on Raven, will be filtered on format in
5059          * dm_plane_format_mod_supported.
5060          */
5061         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5062                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5063                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5064
5065         if (adev->family == AMDGPU_FAMILY_RV) {
5066                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5067                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5068                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5069         }
5070 }
5071
5072 static void
5073 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5074                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5075 {
5076         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5077
5078         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5080                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5081                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5082                     AMD_FMT_MOD_SET(DCC, 1) |
5083                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5084                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5085                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5086
5087         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5088                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5089                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5090                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5091                     AMD_FMT_MOD_SET(DCC, 1) |
5092                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5093                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5094                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5095                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5096
5097         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5098                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5099                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5100                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5101
5102         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5104                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5105                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5106
5107
5108         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5109         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5111                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5112
5113         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5115                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5116 }
5117
5118 static void
5119 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5120                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5121 {
5122         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5123         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5124
5125         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5126                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5127                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5128                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5129                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5130                     AMD_FMT_MOD_SET(DCC, 1) |
5131                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5132                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5133                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5134                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5135
5136         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5137                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5138                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5139                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5140                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5141                     AMD_FMT_MOD_SET(DCC, 1) |
5142                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5143                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5144                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5145
5146         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5147                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5148                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5149                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5150                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5151                     AMD_FMT_MOD_SET(DCC, 1) |
5152                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5153                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5154                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5155                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5156                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5157
5158         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5159                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5160                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5161                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5162                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5163                     AMD_FMT_MOD_SET(DCC, 1) |
5164                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5165                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5166                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5167                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5168
5169         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5170                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5171                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5172                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5173                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5174
5175         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5176                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5177                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5178                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5179                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5180
5181         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5182         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5183                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5184                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5185
5186         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5187                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5188                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5189 }
5190
5191 static int
5192 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5193 {
5194         uint64_t size = 0, capacity = 128;
5195         *mods = NULL;
5196
5197         /* We have not hooked up any pre-GFX9 modifiers. */
5198         if (adev->family < AMDGPU_FAMILY_AI)
5199                 return 0;
5200
5201         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5202
5203         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5204                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5205                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5206                 return *mods ? 0 : -ENOMEM;
5207         }
5208
5209         switch (adev->family) {
5210         case AMDGPU_FAMILY_AI:
5211         case AMDGPU_FAMILY_RV:
5212                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5213                 break;
5214         case AMDGPU_FAMILY_NV:
5215         case AMDGPU_FAMILY_VGH:
5216         case AMDGPU_FAMILY_YC:
5217                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5218                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5219                 else
5220                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5221                 break;
5222         }
5223
5224         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5225
5226         /* INVALID marks the end of the list. */
5227         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5228
5229         if (!*mods)
5230                 return -ENOMEM;
5231
5232         return 0;
5233 }
5234
5235 static int
5236 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5237                                           const struct amdgpu_framebuffer *afb,
5238                                           const enum surface_pixel_format format,
5239                                           const enum dc_rotation_angle rotation,
5240                                           const struct plane_size *plane_size,
5241                                           union dc_tiling_info *tiling_info,
5242                                           struct dc_plane_dcc_param *dcc,
5243                                           struct dc_plane_address *address,
5244                                           const bool force_disable_dcc)
5245 {
5246         const uint64_t modifier = afb->base.modifier;
5247         int ret = 0;
5248
5249         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5250         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5251
5252         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5253                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5254                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5255                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5256
5257                 dcc->enable = 1;
5258                 dcc->meta_pitch = afb->base.pitches[1];
5259                 dcc->independent_64b_blks = independent_64b_blks;
5260                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5261                         if (independent_64b_blks && independent_128b_blks)
5262                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5263                         else if (independent_128b_blks)
5264                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5265                         else if (independent_64b_blks && !independent_128b_blks)
5266                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5267                         else
5268                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5269                 } else {
5270                         if (independent_64b_blks)
5271                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5272                         else
5273                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5274                 }
5275
5276                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5277                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5278         }
5279
5280         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5281         if (ret)
5282                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5283
5284         return ret;
5285 }
5286
5287 static int
5288 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5289                              const struct amdgpu_framebuffer *afb,
5290                              const enum surface_pixel_format format,
5291                              const enum dc_rotation_angle rotation,
5292                              const uint64_t tiling_flags,
5293                              union dc_tiling_info *tiling_info,
5294                              struct plane_size *plane_size,
5295                              struct dc_plane_dcc_param *dcc,
5296                              struct dc_plane_address *address,
5297                              bool tmz_surface,
5298                              bool force_disable_dcc)
5299 {
5300         const struct drm_framebuffer *fb = &afb->base;
5301         int ret;
5302
5303         memset(tiling_info, 0, sizeof(*tiling_info));
5304         memset(plane_size, 0, sizeof(*plane_size));
5305         memset(dcc, 0, sizeof(*dcc));
5306         memset(address, 0, sizeof(*address));
5307
5308         address->tmz_surface = tmz_surface;
5309
5310         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5311                 uint64_t addr = afb->address + fb->offsets[0];
5312
5313                 plane_size->surface_size.x = 0;
5314                 plane_size->surface_size.y = 0;
5315                 plane_size->surface_size.width = fb->width;
5316                 plane_size->surface_size.height = fb->height;
5317                 plane_size->surface_pitch =
5318                         fb->pitches[0] / fb->format->cpp[0];
5319
5320                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5321                 address->grph.addr.low_part = lower_32_bits(addr);
5322                 address->grph.addr.high_part = upper_32_bits(addr);
5323         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5324                 uint64_t luma_addr = afb->address + fb->offsets[0];
5325                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5326
5327                 plane_size->surface_size.x = 0;
5328                 plane_size->surface_size.y = 0;
5329                 plane_size->surface_size.width = fb->width;
5330                 plane_size->surface_size.height = fb->height;
5331                 plane_size->surface_pitch =
5332                         fb->pitches[0] / fb->format->cpp[0];
5333
5334                 plane_size->chroma_size.x = 0;
5335                 plane_size->chroma_size.y = 0;
5336                 /* TODO: set these based on surface format */
5337                 plane_size->chroma_size.width = fb->width / 2;
5338                 plane_size->chroma_size.height = fb->height / 2;
5339
5340                 plane_size->chroma_pitch =
5341                         fb->pitches[1] / fb->format->cpp[1];
5342
5343                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5344                 address->video_progressive.luma_addr.low_part =
5345                         lower_32_bits(luma_addr);
5346                 address->video_progressive.luma_addr.high_part =
5347                         upper_32_bits(luma_addr);
5348                 address->video_progressive.chroma_addr.low_part =
5349                         lower_32_bits(chroma_addr);
5350                 address->video_progressive.chroma_addr.high_part =
5351                         upper_32_bits(chroma_addr);
5352         }
5353
5354         if (adev->family >= AMDGPU_FAMILY_AI) {
5355                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5356                                                                 rotation, plane_size,
5357                                                                 tiling_info, dcc,
5358                                                                 address,
5359                                                                 force_disable_dcc);
5360                 if (ret)
5361                         return ret;
5362         } else {
5363                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5364         }
5365
5366         return 0;
5367 }
5368
5369 static void
5370 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5371                                bool *per_pixel_alpha, bool *global_alpha,
5372                                int *global_alpha_value)
5373 {
5374         *per_pixel_alpha = false;
5375         *global_alpha = false;
5376         *global_alpha_value = 0xff;
5377
5378         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5379                 return;
5380
5381         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5382                 static const uint32_t alpha_formats[] = {
5383                         DRM_FORMAT_ARGB8888,
5384                         DRM_FORMAT_RGBA8888,
5385                         DRM_FORMAT_ABGR8888,
5386                 };
5387                 uint32_t format = plane_state->fb->format->format;
5388                 unsigned int i;
5389
5390                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5391                         if (format == alpha_formats[i]) {
5392                                 *per_pixel_alpha = true;
5393                                 break;
5394                         }
5395                 }
5396         }
5397
5398         if (plane_state->alpha < 0xffff) {
5399                 *global_alpha = true;
5400                 *global_alpha_value = plane_state->alpha >> 8;
5401         }
5402 }
5403
5404 static int
5405 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5406                             const enum surface_pixel_format format,
5407                             enum dc_color_space *color_space)
5408 {
5409         bool full_range;
5410
5411         *color_space = COLOR_SPACE_SRGB;
5412
5413         /* DRM color properties only affect non-RGB formats. */
5414         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5415                 return 0;
5416
5417         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5418
5419         switch (plane_state->color_encoding) {
5420         case DRM_COLOR_YCBCR_BT601:
5421                 if (full_range)
5422                         *color_space = COLOR_SPACE_YCBCR601;
5423                 else
5424                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5425                 break;
5426
5427         case DRM_COLOR_YCBCR_BT709:
5428                 if (full_range)
5429                         *color_space = COLOR_SPACE_YCBCR709;
5430                 else
5431                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5432                 break;
5433
5434         case DRM_COLOR_YCBCR_BT2020:
5435                 if (full_range)
5436                         *color_space = COLOR_SPACE_2020_YCBCR;
5437                 else
5438                         return -EINVAL;
5439                 break;
5440
5441         default:
5442                 return -EINVAL;
5443         }
5444
5445         return 0;
5446 }
5447
5448 static int
5449 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5450                             const struct drm_plane_state *plane_state,
5451                             const uint64_t tiling_flags,
5452                             struct dc_plane_info *plane_info,
5453                             struct dc_plane_address *address,
5454                             bool tmz_surface,
5455                             bool force_disable_dcc)
5456 {
5457         const struct drm_framebuffer *fb = plane_state->fb;
5458         const struct amdgpu_framebuffer *afb =
5459                 to_amdgpu_framebuffer(plane_state->fb);
5460         int ret;
5461
5462         memset(plane_info, 0, sizeof(*plane_info));
5463
5464         switch (fb->format->format) {
5465         case DRM_FORMAT_C8:
5466                 plane_info->format =
5467                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5468                 break;
5469         case DRM_FORMAT_RGB565:
5470                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5471                 break;
5472         case DRM_FORMAT_XRGB8888:
5473         case DRM_FORMAT_ARGB8888:
5474                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5475                 break;
5476         case DRM_FORMAT_XRGB2101010:
5477         case DRM_FORMAT_ARGB2101010:
5478                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5479                 break;
5480         case DRM_FORMAT_XBGR2101010:
5481         case DRM_FORMAT_ABGR2101010:
5482                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5483                 break;
5484         case DRM_FORMAT_XBGR8888:
5485         case DRM_FORMAT_ABGR8888:
5486                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5487                 break;
5488         case DRM_FORMAT_NV21:
5489                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5490                 break;
5491         case DRM_FORMAT_NV12:
5492                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5493                 break;
5494         case DRM_FORMAT_P010:
5495                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5496                 break;
5497         case DRM_FORMAT_XRGB16161616F:
5498         case DRM_FORMAT_ARGB16161616F:
5499                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5500                 break;
5501         case DRM_FORMAT_XBGR16161616F:
5502         case DRM_FORMAT_ABGR16161616F:
5503                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5504                 break;
5505         case DRM_FORMAT_XRGB16161616:
5506         case DRM_FORMAT_ARGB16161616:
5507                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5508                 break;
5509         case DRM_FORMAT_XBGR16161616:
5510         case DRM_FORMAT_ABGR16161616:
5511                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5512                 break;
5513         default:
5514                 DRM_ERROR(
5515                         "Unsupported screen format %p4cc\n",
5516                         &fb->format->format);
5517                 return -EINVAL;
5518         }
5519
5520         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5521         case DRM_MODE_ROTATE_0:
5522                 plane_info->rotation = ROTATION_ANGLE_0;
5523                 break;
5524         case DRM_MODE_ROTATE_90:
5525                 plane_info->rotation = ROTATION_ANGLE_90;
5526                 break;
5527         case DRM_MODE_ROTATE_180:
5528                 plane_info->rotation = ROTATION_ANGLE_180;
5529                 break;
5530         case DRM_MODE_ROTATE_270:
5531                 plane_info->rotation = ROTATION_ANGLE_270;
5532                 break;
5533         default:
5534                 plane_info->rotation = ROTATION_ANGLE_0;
5535                 break;
5536         }
5537
5538         plane_info->visible = true;
5539         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5540
5541         plane_info->layer_index = 0;
5542
5543         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5544                                           &plane_info->color_space);
5545         if (ret)
5546                 return ret;
5547
5548         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5549                                            plane_info->rotation, tiling_flags,
5550                                            &plane_info->tiling_info,
5551                                            &plane_info->plane_size,
5552                                            &plane_info->dcc, address, tmz_surface,
5553                                            force_disable_dcc);
5554         if (ret)
5555                 return ret;
5556
5557         fill_blending_from_plane_state(
5558                 plane_state, &plane_info->per_pixel_alpha,
5559                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5560
5561         return 0;
5562 }
5563
5564 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5565                                     struct dc_plane_state *dc_plane_state,
5566                                     struct drm_plane_state *plane_state,
5567                                     struct drm_crtc_state *crtc_state)
5568 {
5569         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5570         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5571         struct dc_scaling_info scaling_info;
5572         struct dc_plane_info plane_info;
5573         int ret;
5574         bool force_disable_dcc = false;
5575
5576         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5577         if (ret)
5578                 return ret;
5579
5580         dc_plane_state->src_rect = scaling_info.src_rect;
5581         dc_plane_state->dst_rect = scaling_info.dst_rect;
5582         dc_plane_state->clip_rect = scaling_info.clip_rect;
5583         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5584
5585         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5586         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5587                                           afb->tiling_flags,
5588                                           &plane_info,
5589                                           &dc_plane_state->address,
5590                                           afb->tmz_surface,
5591                                           force_disable_dcc);
5592         if (ret)
5593                 return ret;
5594
5595         dc_plane_state->format = plane_info.format;
5596         dc_plane_state->color_space = plane_info.color_space;
5597         dc_plane_state->format = plane_info.format;
5598         dc_plane_state->plane_size = plane_info.plane_size;
5599         dc_plane_state->rotation = plane_info.rotation;
5600         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5601         dc_plane_state->stereo_format = plane_info.stereo_format;
5602         dc_plane_state->tiling_info = plane_info.tiling_info;
5603         dc_plane_state->visible = plane_info.visible;
5604         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5605         dc_plane_state->global_alpha = plane_info.global_alpha;
5606         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5607         dc_plane_state->dcc = plane_info.dcc;
5608         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5609         dc_plane_state->flip_int_enabled = true;
5610
5611         /*
5612          * Always set input transfer function, since plane state is refreshed
5613          * every time.
5614          */
5615         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5616         if (ret)
5617                 return ret;
5618
5619         return 0;
5620 }
5621
5622 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5623                                            const struct dm_connector_state *dm_state,
5624                                            struct dc_stream_state *stream)
5625 {
5626         enum amdgpu_rmx_type rmx_type;
5627
5628         struct rect src = { 0 }; /* viewport in composition space*/
5629         struct rect dst = { 0 }; /* stream addressable area */
5630
5631         /* no mode. nothing to be done */
5632         if (!mode)
5633                 return;
5634
5635         /* Full screen scaling by default */
5636         src.width = mode->hdisplay;
5637         src.height = mode->vdisplay;
5638         dst.width = stream->timing.h_addressable;
5639         dst.height = stream->timing.v_addressable;
5640
5641         if (dm_state) {
5642                 rmx_type = dm_state->scaling;
5643                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5644                         if (src.width * dst.height <
5645                                         src.height * dst.width) {
5646                                 /* height needs less upscaling/more downscaling */
5647                                 dst.width = src.width *
5648                                                 dst.height / src.height;
5649                         } else {
5650                                 /* width needs less upscaling/more downscaling */
5651                                 dst.height = src.height *
5652                                                 dst.width / src.width;
5653                         }
5654                 } else if (rmx_type == RMX_CENTER) {
5655                         dst = src;
5656                 }
5657
5658                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5659                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5660
5661                 if (dm_state->underscan_enable) {
5662                         dst.x += dm_state->underscan_hborder / 2;
5663                         dst.y += dm_state->underscan_vborder / 2;
5664                         dst.width -= dm_state->underscan_hborder;
5665                         dst.height -= dm_state->underscan_vborder;
5666                 }
5667         }
5668
5669         stream->src = src;
5670         stream->dst = dst;
5671
5672         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5673                       dst.x, dst.y, dst.width, dst.height);
5674
5675 }
5676
5677 static enum dc_color_depth
5678 convert_color_depth_from_display_info(const struct drm_connector *connector,
5679                                       bool is_y420, int requested_bpc)
5680 {
5681         uint8_t bpc;
5682
5683         if (is_y420) {
5684                 bpc = 8;
5685
5686                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5687                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5688                         bpc = 16;
5689                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5690                         bpc = 12;
5691                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5692                         bpc = 10;
5693         } else {
5694                 bpc = (uint8_t)connector->display_info.bpc;
5695                 /* Assume 8 bpc by default if no bpc is specified. */
5696                 bpc = bpc ? bpc : 8;
5697         }
5698
5699         if (requested_bpc > 0) {
5700                 /*
5701                  * Cap display bpc based on the user requested value.
5702                  *
5703                  * The value for state->max_bpc may not correctly updated
5704                  * depending on when the connector gets added to the state
5705                  * or if this was called outside of atomic check, so it
5706                  * can't be used directly.
5707                  */
5708                 bpc = min_t(u8, bpc, requested_bpc);
5709
5710                 /* Round down to the nearest even number. */
5711                 bpc = bpc - (bpc & 1);
5712         }
5713
5714         switch (bpc) {
5715         case 0:
5716                 /*
5717                  * Temporary Work around, DRM doesn't parse color depth for
5718                  * EDID revision before 1.4
5719                  * TODO: Fix edid parsing
5720                  */
5721                 return COLOR_DEPTH_888;
5722         case 6:
5723                 return COLOR_DEPTH_666;
5724         case 8:
5725                 return COLOR_DEPTH_888;
5726         case 10:
5727                 return COLOR_DEPTH_101010;
5728         case 12:
5729                 return COLOR_DEPTH_121212;
5730         case 14:
5731                 return COLOR_DEPTH_141414;
5732         case 16:
5733                 return COLOR_DEPTH_161616;
5734         default:
5735                 return COLOR_DEPTH_UNDEFINED;
5736         }
5737 }
5738
5739 static enum dc_aspect_ratio
5740 get_aspect_ratio(const struct drm_display_mode *mode_in)
5741 {
5742         /* 1-1 mapping, since both enums follow the HDMI spec. */
5743         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5744 }
5745
5746 static enum dc_color_space
5747 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5748 {
5749         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5750
5751         switch (dc_crtc_timing->pixel_encoding) {
5752         case PIXEL_ENCODING_YCBCR422:
5753         case PIXEL_ENCODING_YCBCR444:
5754         case PIXEL_ENCODING_YCBCR420:
5755         {
5756                 /*
5757                  * 27030khz is the separation point between HDTV and SDTV
5758                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5759                  * respectively
5760                  */
5761                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5762                         if (dc_crtc_timing->flags.Y_ONLY)
5763                                 color_space =
5764                                         COLOR_SPACE_YCBCR709_LIMITED;
5765                         else
5766                                 color_space = COLOR_SPACE_YCBCR709;
5767                 } else {
5768                         if (dc_crtc_timing->flags.Y_ONLY)
5769                                 color_space =
5770                                         COLOR_SPACE_YCBCR601_LIMITED;
5771                         else
5772                                 color_space = COLOR_SPACE_YCBCR601;
5773                 }
5774
5775         }
5776         break;
5777         case PIXEL_ENCODING_RGB:
5778                 color_space = COLOR_SPACE_SRGB;
5779                 break;
5780
5781         default:
5782                 WARN_ON(1);
5783                 break;
5784         }
5785
5786         return color_space;
5787 }
5788
5789 static bool adjust_colour_depth_from_display_info(
5790         struct dc_crtc_timing *timing_out,
5791         const struct drm_display_info *info)
5792 {
5793         enum dc_color_depth depth = timing_out->display_color_depth;
5794         int normalized_clk;
5795         do {
5796                 normalized_clk = timing_out->pix_clk_100hz / 10;
5797                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5798                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5799                         normalized_clk /= 2;
5800                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5801                 switch (depth) {
5802                 case COLOR_DEPTH_888:
5803                         break;
5804                 case COLOR_DEPTH_101010:
5805                         normalized_clk = (normalized_clk * 30) / 24;
5806                         break;
5807                 case COLOR_DEPTH_121212:
5808                         normalized_clk = (normalized_clk * 36) / 24;
5809                         break;
5810                 case COLOR_DEPTH_161616:
5811                         normalized_clk = (normalized_clk * 48) / 24;
5812                         break;
5813                 default:
5814                         /* The above depths are the only ones valid for HDMI. */
5815                         return false;
5816                 }
5817                 if (normalized_clk <= info->max_tmds_clock) {
5818                         timing_out->display_color_depth = depth;
5819                         return true;
5820                 }
5821         } while (--depth > COLOR_DEPTH_666);
5822         return false;
5823 }
5824
5825 static void fill_stream_properties_from_drm_display_mode(
5826         struct dc_stream_state *stream,
5827         const struct drm_display_mode *mode_in,
5828         const struct drm_connector *connector,
5829         const struct drm_connector_state *connector_state,
5830         const struct dc_stream_state *old_stream,
5831         int requested_bpc)
5832 {
5833         struct dc_crtc_timing *timing_out = &stream->timing;
5834         const struct drm_display_info *info = &connector->display_info;
5835         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5836         struct hdmi_vendor_infoframe hv_frame;
5837         struct hdmi_avi_infoframe avi_frame;
5838
5839         memset(&hv_frame, 0, sizeof(hv_frame));
5840         memset(&avi_frame, 0, sizeof(avi_frame));
5841
5842         timing_out->h_border_left = 0;
5843         timing_out->h_border_right = 0;
5844         timing_out->v_border_top = 0;
5845         timing_out->v_border_bottom = 0;
5846         /* TODO: un-hardcode */
5847         if (drm_mode_is_420_only(info, mode_in)
5848                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5849                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5850         else if (drm_mode_is_420_also(info, mode_in)
5851                         && aconnector->force_yuv420_output)
5852                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5853         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5854                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5855                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5856         else
5857                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5858
5859         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5860         timing_out->display_color_depth = convert_color_depth_from_display_info(
5861                 connector,
5862                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5863                 requested_bpc);
5864         timing_out->scan_type = SCANNING_TYPE_NODATA;
5865         timing_out->hdmi_vic = 0;
5866
5867         if(old_stream) {
5868                 timing_out->vic = old_stream->timing.vic;
5869                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5870                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5871         } else {
5872                 timing_out->vic = drm_match_cea_mode(mode_in);
5873                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5874                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5875                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5876                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5877         }
5878
5879         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5880                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5881                 timing_out->vic = avi_frame.video_code;
5882                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5883                 timing_out->hdmi_vic = hv_frame.vic;
5884         }
5885
5886         if (is_freesync_video_mode(mode_in, aconnector)) {
5887                 timing_out->h_addressable = mode_in->hdisplay;
5888                 timing_out->h_total = mode_in->htotal;
5889                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5890                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5891                 timing_out->v_total = mode_in->vtotal;
5892                 timing_out->v_addressable = mode_in->vdisplay;
5893                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5894                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5895                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5896         } else {
5897                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5898                 timing_out->h_total = mode_in->crtc_htotal;
5899                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5900                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5901                 timing_out->v_total = mode_in->crtc_vtotal;
5902                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5903                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5904                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5905                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5906         }
5907
5908         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5909
5910         stream->output_color_space = get_output_color_space(timing_out);
5911
5912         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5913         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5914         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5915                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5916                     drm_mode_is_420_also(info, mode_in) &&
5917                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5918                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5919                         adjust_colour_depth_from_display_info(timing_out, info);
5920                 }
5921         }
5922 }
5923
5924 static void fill_audio_info(struct audio_info *audio_info,
5925                             const struct drm_connector *drm_connector,
5926                             const struct dc_sink *dc_sink)
5927 {
5928         int i = 0;
5929         int cea_revision = 0;
5930         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5931
5932         audio_info->manufacture_id = edid_caps->manufacturer_id;
5933         audio_info->product_id = edid_caps->product_id;
5934
5935         cea_revision = drm_connector->display_info.cea_rev;
5936
5937         strscpy(audio_info->display_name,
5938                 edid_caps->display_name,
5939                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5940
5941         if (cea_revision >= 3) {
5942                 audio_info->mode_count = edid_caps->audio_mode_count;
5943
5944                 for (i = 0; i < audio_info->mode_count; ++i) {
5945                         audio_info->modes[i].format_code =
5946                                         (enum audio_format_code)
5947                                         (edid_caps->audio_modes[i].format_code);
5948                         audio_info->modes[i].channel_count =
5949                                         edid_caps->audio_modes[i].channel_count;
5950                         audio_info->modes[i].sample_rates.all =
5951                                         edid_caps->audio_modes[i].sample_rate;
5952                         audio_info->modes[i].sample_size =
5953                                         edid_caps->audio_modes[i].sample_size;
5954                 }
5955         }
5956
5957         audio_info->flags.all = edid_caps->speaker_flags;
5958
5959         /* TODO: We only check for the progressive mode, check for interlace mode too */
5960         if (drm_connector->latency_present[0]) {
5961                 audio_info->video_latency = drm_connector->video_latency[0];
5962                 audio_info->audio_latency = drm_connector->audio_latency[0];
5963         }
5964
5965         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5966
5967 }
5968
5969 static void
5970 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5971                                       struct drm_display_mode *dst_mode)
5972 {
5973         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5974         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5975         dst_mode->crtc_clock = src_mode->crtc_clock;
5976         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5977         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5978         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5979         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5980         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5981         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5982         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5983         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5984         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5985         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5986         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5987 }
5988
5989 static void
5990 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5991                                         const struct drm_display_mode *native_mode,
5992                                         bool scale_enabled)
5993 {
5994         if (scale_enabled) {
5995                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5996         } else if (native_mode->clock == drm_mode->clock &&
5997                         native_mode->htotal == drm_mode->htotal &&
5998                         native_mode->vtotal == drm_mode->vtotal) {
5999                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6000         } else {
6001                 /* no scaling nor amdgpu inserted, no need to patch */
6002         }
6003 }
6004
6005 static struct dc_sink *
6006 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6007 {
6008         struct dc_sink_init_data sink_init_data = { 0 };
6009         struct dc_sink *sink = NULL;
6010         sink_init_data.link = aconnector->dc_link;
6011         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6012
6013         sink = dc_sink_create(&sink_init_data);
6014         if (!sink) {
6015                 DRM_ERROR("Failed to create sink!\n");
6016                 return NULL;
6017         }
6018         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6019
6020         return sink;
6021 }
6022
6023 static void set_multisync_trigger_params(
6024                 struct dc_stream_state *stream)
6025 {
6026         struct dc_stream_state *master = NULL;
6027
6028         if (stream->triggered_crtc_reset.enabled) {
6029                 master = stream->triggered_crtc_reset.event_source;
6030                 stream->triggered_crtc_reset.event =
6031                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6032                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6033                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6034         }
6035 }
6036
6037 static void set_master_stream(struct dc_stream_state *stream_set[],
6038                               int stream_count)
6039 {
6040         int j, highest_rfr = 0, master_stream = 0;
6041
6042         for (j = 0;  j < stream_count; j++) {
6043                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6044                         int refresh_rate = 0;
6045
6046                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6047                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6048                         if (refresh_rate > highest_rfr) {
6049                                 highest_rfr = refresh_rate;
6050                                 master_stream = j;
6051                         }
6052                 }
6053         }
6054         for (j = 0;  j < stream_count; j++) {
6055                 if (stream_set[j])
6056                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6057         }
6058 }
6059
6060 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6061 {
6062         int i = 0;
6063         struct dc_stream_state *stream;
6064
6065         if (context->stream_count < 2)
6066                 return;
6067         for (i = 0; i < context->stream_count ; i++) {
6068                 if (!context->streams[i])
6069                         continue;
6070                 /*
6071                  * TODO: add a function to read AMD VSDB bits and set
6072                  * crtc_sync_master.multi_sync_enabled flag
6073                  * For now it's set to false
6074                  */
6075         }
6076
6077         set_master_stream(context->streams, context->stream_count);
6078
6079         for (i = 0; i < context->stream_count ; i++) {
6080                 stream = context->streams[i];
6081
6082                 if (!stream)
6083                         continue;
6084
6085                 set_multisync_trigger_params(stream);
6086         }
6087 }
6088
6089 #if defined(CONFIG_DRM_AMD_DC_DCN)
6090 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6091                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6092                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6093 {
6094         stream->timing.flags.DSC = 0;
6095         dsc_caps->is_dsc_supported = false;
6096
6097         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6098                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6099                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6100                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6101                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6102                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6103                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6104                                 dsc_caps);
6105         }
6106 }
6107
6108 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6109                                     struct dc_sink *sink, struct dc_stream_state *stream,
6110                                     struct dsc_dec_dpcd_caps *dsc_caps,
6111                                     uint32_t max_dsc_target_bpp_limit_override)
6112 {
6113         const struct dc_link_settings *verified_link_cap = NULL;
6114         uint32_t link_bw_in_kbps;
6115         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6116         struct dc *dc = sink->ctx->dc;
6117         struct dc_dsc_bw_range bw_range = {0};
6118         struct dc_dsc_config dsc_cfg = {0};
6119
6120         verified_link_cap = dc_link_get_link_cap(stream->link);
6121         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6122         edp_min_bpp_x16 = 8 * 16;
6123         edp_max_bpp_x16 = 8 * 16;
6124
6125         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6126                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6127
6128         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6129                 edp_min_bpp_x16 = edp_max_bpp_x16;
6130
6131         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6132                                 dc->debug.dsc_min_slice_height_override,
6133                                 edp_min_bpp_x16, edp_max_bpp_x16,
6134                                 dsc_caps,
6135                                 &stream->timing,
6136                                 &bw_range)) {
6137
6138                 if (bw_range.max_kbps < link_bw_in_kbps) {
6139                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6140                                         dsc_caps,
6141                                         dc->debug.dsc_min_slice_height_override,
6142                                         max_dsc_target_bpp_limit_override,
6143                                         0,
6144                                         &stream->timing,
6145                                         &dsc_cfg)) {
6146                                 stream->timing.dsc_cfg = dsc_cfg;
6147                                 stream->timing.flags.DSC = 1;
6148                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6149                         }
6150                         return;
6151                 }
6152         }
6153
6154         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6155                                 dsc_caps,
6156                                 dc->debug.dsc_min_slice_height_override,
6157                                 max_dsc_target_bpp_limit_override,
6158                                 link_bw_in_kbps,
6159                                 &stream->timing,
6160                                 &dsc_cfg)) {
6161                 stream->timing.dsc_cfg = dsc_cfg;
6162                 stream->timing.flags.DSC = 1;
6163         }
6164 }
6165
6166 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6167                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6168                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6169 {
6170         struct drm_connector *drm_connector = &aconnector->base;
6171         uint32_t link_bandwidth_kbps;
6172         uint32_t max_dsc_target_bpp_limit_override = 0;
6173         struct dc *dc = sink->ctx->dc;
6174         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6175         uint32_t dsc_max_supported_bw_in_kbps;
6176
6177         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6178                                                         dc_link_get_link_cap(aconnector->dc_link));
6179
6180         if (stream->link && stream->link->local_sink)
6181                 max_dsc_target_bpp_limit_override =
6182                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6183         
6184         /* Set DSC policy according to dsc_clock_en */
6185         dc_dsc_policy_set_enable_dsc_when_not_needed(
6186                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6187
6188         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6189             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6190
6191                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6192
6193         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6194                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6195                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6196                                                 dsc_caps,
6197                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6198                                                 max_dsc_target_bpp_limit_override,
6199                                                 link_bandwidth_kbps,
6200                                                 &stream->timing,
6201                                                 &stream->timing.dsc_cfg)) {
6202                                 stream->timing.flags.DSC = 1;
6203                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6204                                                                  __func__, drm_connector->name);
6205                         }
6206                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6207                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6208                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6209                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6210
6211                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6212                                         max_supported_bw_in_kbps > 0 &&
6213                                         dsc_max_supported_bw_in_kbps > 0)
6214                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6215                                                 dsc_caps,
6216                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6217                                                 max_dsc_target_bpp_limit_override,
6218                                                 dsc_max_supported_bw_in_kbps,
6219                                                 &stream->timing,
6220                                                 &stream->timing.dsc_cfg)) {
6221                                         stream->timing.flags.DSC = 1;
6222                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6223                                                                          __func__, drm_connector->name);
6224                                 }
6225                 }
6226         }
6227
6228         /* Overwrite the stream flag if DSC is enabled through debugfs */
6229         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6230                 stream->timing.flags.DSC = 1;
6231
6232         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6233                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6234
6235         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6236                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6237
6238         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6239                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6240 }
6241 #endif /* CONFIG_DRM_AMD_DC_DCN */
6242
6243 /**
6244  * DOC: FreeSync Video
6245  *
6246  * When a userspace application wants to play a video, the content follows a
6247  * standard format definition that usually specifies the FPS for that format.
6248  * The below list illustrates some video format and the expected FPS,
6249  * respectively:
6250  *
6251  * - TV/NTSC (23.976 FPS)
6252  * - Cinema (24 FPS)
6253  * - TV/PAL (25 FPS)
6254  * - TV/NTSC (29.97 FPS)
6255  * - TV/NTSC (30 FPS)
6256  * - Cinema HFR (48 FPS)
6257  * - TV/PAL (50 FPS)
6258  * - Commonly used (60 FPS)
6259  * - Multiples of 24 (48,72,96,120 FPS)
6260  *
6261  * The list of standards video format is not huge and can be added to the
6262  * connector modeset list beforehand. With that, userspace can leverage
6263  * FreeSync to extends the front porch in order to attain the target refresh
6264  * rate. Such a switch will happen seamlessly, without screen blanking or
6265  * reprogramming of the output in any other way. If the userspace requests a
6266  * modesetting change compatible with FreeSync modes that only differ in the
6267  * refresh rate, DC will skip the full update and avoid blink during the
6268  * transition. For example, the video player can change the modesetting from
6269  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6270  * causing any display blink. This same concept can be applied to a mode
6271  * setting change.
6272  */
6273 static struct drm_display_mode *
6274 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6275                           bool use_probed_modes)
6276 {
6277         struct drm_display_mode *m, *m_pref = NULL;
6278         u16 current_refresh, highest_refresh;
6279         struct list_head *list_head = use_probed_modes ?
6280                                                     &aconnector->base.probed_modes :
6281                                                     &aconnector->base.modes;
6282
6283         if (aconnector->freesync_vid_base.clock != 0)
6284                 return &aconnector->freesync_vid_base;
6285
6286         /* Find the preferred mode */
6287         list_for_each_entry (m, list_head, head) {
6288                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6289                         m_pref = m;
6290                         break;
6291                 }
6292         }
6293
6294         if (!m_pref) {
6295                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6296                 m_pref = list_first_entry_or_null(
6297                         &aconnector->base.modes, struct drm_display_mode, head);
6298                 if (!m_pref) {
6299                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6300                         return NULL;
6301                 }
6302         }
6303
6304         highest_refresh = drm_mode_vrefresh(m_pref);
6305
6306         /*
6307          * Find the mode with highest refresh rate with same resolution.
6308          * For some monitors, preferred mode is not the mode with highest
6309          * supported refresh rate.
6310          */
6311         list_for_each_entry (m, list_head, head) {
6312                 current_refresh  = drm_mode_vrefresh(m);
6313
6314                 if (m->hdisplay == m_pref->hdisplay &&
6315                     m->vdisplay == m_pref->vdisplay &&
6316                     highest_refresh < current_refresh) {
6317                         highest_refresh = current_refresh;
6318                         m_pref = m;
6319                 }
6320         }
6321
6322         aconnector->freesync_vid_base = *m_pref;
6323         return m_pref;
6324 }
6325
6326 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6327                                    struct amdgpu_dm_connector *aconnector)
6328 {
6329         struct drm_display_mode *high_mode;
6330         int timing_diff;
6331
6332         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6333         if (!high_mode || !mode)
6334                 return false;
6335
6336         timing_diff = high_mode->vtotal - mode->vtotal;
6337
6338         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6339             high_mode->hdisplay != mode->hdisplay ||
6340             high_mode->vdisplay != mode->vdisplay ||
6341             high_mode->hsync_start != mode->hsync_start ||
6342             high_mode->hsync_end != mode->hsync_end ||
6343             high_mode->htotal != mode->htotal ||
6344             high_mode->hskew != mode->hskew ||
6345             high_mode->vscan != mode->vscan ||
6346             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6347             high_mode->vsync_end - mode->vsync_end != timing_diff)
6348                 return false;
6349         else
6350                 return true;
6351 }
6352
6353 struct dc_stream_state *
6354 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6355                        const struct drm_display_mode *drm_mode,
6356                        const struct dm_connector_state *dm_state,
6357                        const struct dc_stream_state *old_stream,
6358                        int requested_bpc)
6359 {
6360         struct drm_display_mode *preferred_mode = NULL;
6361         struct drm_connector *drm_connector;
6362         const struct drm_connector_state *con_state =
6363                 dm_state ? &dm_state->base : NULL;
6364         struct dc_stream_state *stream = NULL;
6365         struct drm_display_mode mode = *drm_mode;
6366         struct drm_display_mode saved_mode;
6367         struct drm_display_mode *freesync_mode = NULL;
6368         bool native_mode_found = false;
6369         bool recalculate_timing = false;
6370         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6371         int mode_refresh;
6372         int preferred_refresh = 0;
6373 #if defined(CONFIG_DRM_AMD_DC_DCN)
6374         struct dsc_dec_dpcd_caps dsc_caps;
6375 #endif
6376         struct dc_sink *sink = NULL;
6377
6378         memset(&saved_mode, 0, sizeof(saved_mode));
6379
6380         if (aconnector == NULL) {
6381                 DRM_ERROR("aconnector is NULL!\n");
6382                 return stream;
6383         }
6384
6385         drm_connector = &aconnector->base;
6386
6387         if (!aconnector->dc_sink) {
6388                 sink = create_fake_sink(aconnector);
6389                 if (!sink)
6390                         return stream;
6391         } else {
6392                 sink = aconnector->dc_sink;
6393                 dc_sink_retain(sink);
6394         }
6395
6396         stream = dc_create_stream_for_sink(sink);
6397
6398         if (stream == NULL) {
6399                 DRM_ERROR("Failed to create stream for sink!\n");
6400                 goto finish;
6401         }
6402
6403         stream->dm_stream_context = aconnector;
6404
6405         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6406                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6407
6408         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6409                 /* Search for preferred mode */
6410                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6411                         native_mode_found = true;
6412                         break;
6413                 }
6414         }
6415         if (!native_mode_found)
6416                 preferred_mode = list_first_entry_or_null(
6417                                 &aconnector->base.modes,
6418                                 struct drm_display_mode,
6419                                 head);
6420
6421         mode_refresh = drm_mode_vrefresh(&mode);
6422
6423         if (preferred_mode == NULL) {
6424                 /*
6425                  * This may not be an error, the use case is when we have no
6426                  * usermode calls to reset and set mode upon hotplug. In this
6427                  * case, we call set mode ourselves to restore the previous mode
6428                  * and the modelist may not be filled in in time.
6429                  */
6430                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6431         } else {
6432                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6433                 if (recalculate_timing) {
6434                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6435                         saved_mode = mode;
6436                         mode = *freesync_mode;
6437                 } else {
6438                         decide_crtc_timing_for_drm_display_mode(
6439                                 &mode, preferred_mode, scale);
6440
6441                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6442                 }
6443         }
6444
6445         if (recalculate_timing)
6446                 drm_mode_set_crtcinfo(&saved_mode, 0);
6447         else if (!dm_state)
6448                 drm_mode_set_crtcinfo(&mode, 0);
6449
6450        /*
6451         * If scaling is enabled and refresh rate didn't change
6452         * we copy the vic and polarities of the old timings
6453         */
6454         if (!scale || mode_refresh != preferred_refresh)
6455                 fill_stream_properties_from_drm_display_mode(
6456                         stream, &mode, &aconnector->base, con_state, NULL,
6457                         requested_bpc);
6458         else
6459                 fill_stream_properties_from_drm_display_mode(
6460                         stream, &mode, &aconnector->base, con_state, old_stream,
6461                         requested_bpc);
6462
6463 #if defined(CONFIG_DRM_AMD_DC_DCN)
6464         /* SST DSC determination policy */
6465         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6466         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6467                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6468 #endif
6469
6470         update_stream_scaling_settings(&mode, dm_state, stream);
6471
6472         fill_audio_info(
6473                 &stream->audio_info,
6474                 drm_connector,
6475                 sink);
6476
6477         update_stream_signal(stream, sink);
6478
6479         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6480                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6481
6482         if (stream->link->psr_settings.psr_feature_enabled) {
6483                 //
6484                 // should decide stream support vsc sdp colorimetry capability
6485                 // before building vsc info packet
6486                 //
6487                 stream->use_vsc_sdp_for_colorimetry = false;
6488                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6489                         stream->use_vsc_sdp_for_colorimetry =
6490                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6491                 } else {
6492                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6493                                 stream->use_vsc_sdp_for_colorimetry = true;
6494                 }
6495                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6496                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6497
6498         }
6499 finish:
6500         dc_sink_release(sink);
6501
6502         return stream;
6503 }
6504
6505 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6506 {
6507         drm_crtc_cleanup(crtc);
6508         kfree(crtc);
6509 }
6510
6511 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6512                                   struct drm_crtc_state *state)
6513 {
6514         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6515
6516         /* TODO Destroy dc_stream objects are stream object is flattened */
6517         if (cur->stream)
6518                 dc_stream_release(cur->stream);
6519
6520
6521         __drm_atomic_helper_crtc_destroy_state(state);
6522
6523
6524         kfree(state);
6525 }
6526
6527 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6528 {
6529         struct dm_crtc_state *state;
6530
6531         if (crtc->state)
6532                 dm_crtc_destroy_state(crtc, crtc->state);
6533
6534         state = kzalloc(sizeof(*state), GFP_KERNEL);
6535         if (WARN_ON(!state))
6536                 return;
6537
6538         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6539 }
6540
6541 static struct drm_crtc_state *
6542 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6543 {
6544         struct dm_crtc_state *state, *cur;
6545
6546         cur = to_dm_crtc_state(crtc->state);
6547
6548         if (WARN_ON(!crtc->state))
6549                 return NULL;
6550
6551         state = kzalloc(sizeof(*state), GFP_KERNEL);
6552         if (!state)
6553                 return NULL;
6554
6555         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6556
6557         if (cur->stream) {
6558                 state->stream = cur->stream;
6559                 dc_stream_retain(state->stream);
6560         }
6561
6562         state->active_planes = cur->active_planes;
6563         state->vrr_infopacket = cur->vrr_infopacket;
6564         state->abm_level = cur->abm_level;
6565         state->vrr_supported = cur->vrr_supported;
6566         state->freesync_config = cur->freesync_config;
6567         state->cm_has_degamma = cur->cm_has_degamma;
6568         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6569         state->force_dpms_off = cur->force_dpms_off;
6570         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6571
6572         return &state->base;
6573 }
6574
6575 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6576 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6577 {
6578         crtc_debugfs_init(crtc);
6579
6580         return 0;
6581 }
6582 #endif
6583
6584 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6585 {
6586         enum dc_irq_source irq_source;
6587         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6588         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6589         int rc;
6590
6591         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6592
6593         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6594
6595         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6596                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6597         return rc;
6598 }
6599
6600 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6601 {
6602         enum dc_irq_source irq_source;
6603         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6604         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6605         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6606 #if defined(CONFIG_DRM_AMD_DC_DCN)
6607         struct amdgpu_display_manager *dm = &adev->dm;
6608         struct vblank_control_work *work;
6609 #endif
6610         int rc = 0;
6611
6612         if (enable) {
6613                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6614                 if (amdgpu_dm_vrr_active(acrtc_state))
6615                         rc = dm_set_vupdate_irq(crtc, true);
6616         } else {
6617                 /* vblank irq off -> vupdate irq off */
6618                 rc = dm_set_vupdate_irq(crtc, false);
6619         }
6620
6621         if (rc)
6622                 return rc;
6623
6624         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6625
6626         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6627                 return -EBUSY;
6628
6629         if (amdgpu_in_reset(adev))
6630                 return 0;
6631
6632 #if defined(CONFIG_DRM_AMD_DC_DCN)
6633         if (dm->vblank_control_workqueue) {
6634                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6635                 if (!work)
6636                         return -ENOMEM;
6637
6638                 INIT_WORK(&work->work, vblank_control_worker);
6639                 work->dm = dm;
6640                 work->acrtc = acrtc;
6641                 work->enable = enable;
6642
6643                 if (acrtc_state->stream) {
6644                         dc_stream_retain(acrtc_state->stream);
6645                         work->stream = acrtc_state->stream;
6646                 }
6647
6648                 queue_work(dm->vblank_control_workqueue, &work->work);
6649         }
6650 #endif
6651
6652         return 0;
6653 }
6654
6655 static int dm_enable_vblank(struct drm_crtc *crtc)
6656 {
6657         return dm_set_vblank(crtc, true);
6658 }
6659
6660 static void dm_disable_vblank(struct drm_crtc *crtc)
6661 {
6662         dm_set_vblank(crtc, false);
6663 }
6664
6665 /* Implemented only the options currently availible for the driver */
6666 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6667         .reset = dm_crtc_reset_state,
6668         .destroy = amdgpu_dm_crtc_destroy,
6669         .set_config = drm_atomic_helper_set_config,
6670         .page_flip = drm_atomic_helper_page_flip,
6671         .atomic_duplicate_state = dm_crtc_duplicate_state,
6672         .atomic_destroy_state = dm_crtc_destroy_state,
6673         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6674         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6675         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6676         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6677         .enable_vblank = dm_enable_vblank,
6678         .disable_vblank = dm_disable_vblank,
6679         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6680 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6681         .late_register = amdgpu_dm_crtc_late_register,
6682 #endif
6683 };
6684
6685 static enum drm_connector_status
6686 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6687 {
6688         bool connected;
6689         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6690
6691         /*
6692          * Notes:
6693          * 1. This interface is NOT called in context of HPD irq.
6694          * 2. This interface *is called* in context of user-mode ioctl. Which
6695          * makes it a bad place for *any* MST-related activity.
6696          */
6697
6698         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6699             !aconnector->fake_enable)
6700                 connected = (aconnector->dc_sink != NULL);
6701         else
6702                 connected = (aconnector->base.force == DRM_FORCE_ON);
6703
6704         update_subconnector_property(aconnector);
6705
6706         return (connected ? connector_status_connected :
6707                         connector_status_disconnected);
6708 }
6709
6710 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6711                                             struct drm_connector_state *connector_state,
6712                                             struct drm_property *property,
6713                                             uint64_t val)
6714 {
6715         struct drm_device *dev = connector->dev;
6716         struct amdgpu_device *adev = drm_to_adev(dev);
6717         struct dm_connector_state *dm_old_state =
6718                 to_dm_connector_state(connector->state);
6719         struct dm_connector_state *dm_new_state =
6720                 to_dm_connector_state(connector_state);
6721
6722         int ret = -EINVAL;
6723
6724         if (property == dev->mode_config.scaling_mode_property) {
6725                 enum amdgpu_rmx_type rmx_type;
6726
6727                 switch (val) {
6728                 case DRM_MODE_SCALE_CENTER:
6729                         rmx_type = RMX_CENTER;
6730                         break;
6731                 case DRM_MODE_SCALE_ASPECT:
6732                         rmx_type = RMX_ASPECT;
6733                         break;
6734                 case DRM_MODE_SCALE_FULLSCREEN:
6735                         rmx_type = RMX_FULL;
6736                         break;
6737                 case DRM_MODE_SCALE_NONE:
6738                 default:
6739                         rmx_type = RMX_OFF;
6740                         break;
6741                 }
6742
6743                 if (dm_old_state->scaling == rmx_type)
6744                         return 0;
6745
6746                 dm_new_state->scaling = rmx_type;
6747                 ret = 0;
6748         } else if (property == adev->mode_info.underscan_hborder_property) {
6749                 dm_new_state->underscan_hborder = val;
6750                 ret = 0;
6751         } else if (property == adev->mode_info.underscan_vborder_property) {
6752                 dm_new_state->underscan_vborder = val;
6753                 ret = 0;
6754         } else if (property == adev->mode_info.underscan_property) {
6755                 dm_new_state->underscan_enable = val;
6756                 ret = 0;
6757         } else if (property == adev->mode_info.abm_level_property) {
6758                 dm_new_state->abm_level = val;
6759                 ret = 0;
6760         }
6761
6762         return ret;
6763 }
6764
6765 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6766                                             const struct drm_connector_state *state,
6767                                             struct drm_property *property,
6768                                             uint64_t *val)
6769 {
6770         struct drm_device *dev = connector->dev;
6771         struct amdgpu_device *adev = drm_to_adev(dev);
6772         struct dm_connector_state *dm_state =
6773                 to_dm_connector_state(state);
6774         int ret = -EINVAL;
6775
6776         if (property == dev->mode_config.scaling_mode_property) {
6777                 switch (dm_state->scaling) {
6778                 case RMX_CENTER:
6779                         *val = DRM_MODE_SCALE_CENTER;
6780                         break;
6781                 case RMX_ASPECT:
6782                         *val = DRM_MODE_SCALE_ASPECT;
6783                         break;
6784                 case RMX_FULL:
6785                         *val = DRM_MODE_SCALE_FULLSCREEN;
6786                         break;
6787                 case RMX_OFF:
6788                 default:
6789                         *val = DRM_MODE_SCALE_NONE;
6790                         break;
6791                 }
6792                 ret = 0;
6793         } else if (property == adev->mode_info.underscan_hborder_property) {
6794                 *val = dm_state->underscan_hborder;
6795                 ret = 0;
6796         } else if (property == adev->mode_info.underscan_vborder_property) {
6797                 *val = dm_state->underscan_vborder;
6798                 ret = 0;
6799         } else if (property == adev->mode_info.underscan_property) {
6800                 *val = dm_state->underscan_enable;
6801                 ret = 0;
6802         } else if (property == adev->mode_info.abm_level_property) {
6803                 *val = dm_state->abm_level;
6804                 ret = 0;
6805         }
6806
6807         return ret;
6808 }
6809
6810 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6811 {
6812         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6813
6814         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6815 }
6816
6817 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6818 {
6819         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6820         const struct dc_link *link = aconnector->dc_link;
6821         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6822         struct amdgpu_display_manager *dm = &adev->dm;
6823         int i;
6824
6825         /*
6826          * Call only if mst_mgr was iniitalized before since it's not done
6827          * for all connector types.
6828          */
6829         if (aconnector->mst_mgr.dev)
6830                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6831
6832 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6833         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6834         for (i = 0; i < dm->num_of_edps; i++) {
6835                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6836                         backlight_device_unregister(dm->backlight_dev[i]);
6837                         dm->backlight_dev[i] = NULL;
6838                 }
6839         }
6840 #endif
6841
6842         if (aconnector->dc_em_sink)
6843                 dc_sink_release(aconnector->dc_em_sink);
6844         aconnector->dc_em_sink = NULL;
6845         if (aconnector->dc_sink)
6846                 dc_sink_release(aconnector->dc_sink);
6847         aconnector->dc_sink = NULL;
6848
6849         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6850         drm_connector_unregister(connector);
6851         drm_connector_cleanup(connector);
6852         if (aconnector->i2c) {
6853                 i2c_del_adapter(&aconnector->i2c->base);
6854                 kfree(aconnector->i2c);
6855         }
6856         kfree(aconnector->dm_dp_aux.aux.name);
6857
6858         kfree(connector);
6859 }
6860
6861 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6862 {
6863         struct dm_connector_state *state =
6864                 to_dm_connector_state(connector->state);
6865
6866         if (connector->state)
6867                 __drm_atomic_helper_connector_destroy_state(connector->state);
6868
6869         kfree(state);
6870
6871         state = kzalloc(sizeof(*state), GFP_KERNEL);
6872
6873         if (state) {
6874                 state->scaling = RMX_OFF;
6875                 state->underscan_enable = false;
6876                 state->underscan_hborder = 0;
6877                 state->underscan_vborder = 0;
6878                 state->base.max_requested_bpc = 8;
6879                 state->vcpi_slots = 0;
6880                 state->pbn = 0;
6881                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6882                         state->abm_level = amdgpu_dm_abm_level;
6883
6884                 __drm_atomic_helper_connector_reset(connector, &state->base);
6885         }
6886 }
6887
6888 struct drm_connector_state *
6889 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6890 {
6891         struct dm_connector_state *state =
6892                 to_dm_connector_state(connector->state);
6893
6894         struct dm_connector_state *new_state =
6895                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6896
6897         if (!new_state)
6898                 return NULL;
6899
6900         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6901
6902         new_state->freesync_capable = state->freesync_capable;
6903         new_state->abm_level = state->abm_level;
6904         new_state->scaling = state->scaling;
6905         new_state->underscan_enable = state->underscan_enable;
6906         new_state->underscan_hborder = state->underscan_hborder;
6907         new_state->underscan_vborder = state->underscan_vborder;
6908         new_state->vcpi_slots = state->vcpi_slots;
6909         new_state->pbn = state->pbn;
6910         return &new_state->base;
6911 }
6912
6913 static int
6914 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6915 {
6916         struct amdgpu_dm_connector *amdgpu_dm_connector =
6917                 to_amdgpu_dm_connector(connector);
6918         int r;
6919
6920         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6921             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6922                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6923                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6924                 if (r)
6925                         return r;
6926         }
6927
6928 #if defined(CONFIG_DEBUG_FS)
6929         connector_debugfs_init(amdgpu_dm_connector);
6930 #endif
6931
6932         return 0;
6933 }
6934
6935 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6936         .reset = amdgpu_dm_connector_funcs_reset,
6937         .detect = amdgpu_dm_connector_detect,
6938         .fill_modes = drm_helper_probe_single_connector_modes,
6939         .destroy = amdgpu_dm_connector_destroy,
6940         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6941         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6942         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6943         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6944         .late_register = amdgpu_dm_connector_late_register,
6945         .early_unregister = amdgpu_dm_connector_unregister
6946 };
6947
6948 static int get_modes(struct drm_connector *connector)
6949 {
6950         return amdgpu_dm_connector_get_modes(connector);
6951 }
6952
6953 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6954 {
6955         struct dc_sink_init_data init_params = {
6956                         .link = aconnector->dc_link,
6957                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6958         };
6959         struct edid *edid;
6960
6961         if (!aconnector->base.edid_blob_ptr) {
6962                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6963                                 aconnector->base.name);
6964
6965                 aconnector->base.force = DRM_FORCE_OFF;
6966                 aconnector->base.override_edid = false;
6967                 return;
6968         }
6969
6970         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6971
6972         aconnector->edid = edid;
6973
6974         aconnector->dc_em_sink = dc_link_add_remote_sink(
6975                 aconnector->dc_link,
6976                 (uint8_t *)edid,
6977                 (edid->extensions + 1) * EDID_LENGTH,
6978                 &init_params);
6979
6980         if (aconnector->base.force == DRM_FORCE_ON) {
6981                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6982                 aconnector->dc_link->local_sink :
6983                 aconnector->dc_em_sink;
6984                 dc_sink_retain(aconnector->dc_sink);
6985         }
6986 }
6987
6988 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6989 {
6990         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6991
6992         /*
6993          * In case of headless boot with force on for DP managed connector
6994          * Those settings have to be != 0 to get initial modeset
6995          */
6996         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6997                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6998                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6999         }
7000
7001
7002         aconnector->base.override_edid = true;
7003         create_eml_sink(aconnector);
7004 }
7005
7006 struct dc_stream_state *
7007 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7008                                 const struct drm_display_mode *drm_mode,
7009                                 const struct dm_connector_state *dm_state,
7010                                 const struct dc_stream_state *old_stream)
7011 {
7012         struct drm_connector *connector = &aconnector->base;
7013         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7014         struct dc_stream_state *stream;
7015         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7016         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7017         enum dc_status dc_result = DC_OK;
7018
7019         do {
7020                 stream = create_stream_for_sink(aconnector, drm_mode,
7021                                                 dm_state, old_stream,
7022                                                 requested_bpc);
7023                 if (stream == NULL) {
7024                         DRM_ERROR("Failed to create stream for sink!\n");
7025                         break;
7026                 }
7027
7028                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7029
7030                 if (dc_result != DC_OK) {
7031                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7032                                       drm_mode->hdisplay,
7033                                       drm_mode->vdisplay,
7034                                       drm_mode->clock,
7035                                       dc_result,
7036                                       dc_status_to_str(dc_result));
7037
7038                         dc_stream_release(stream);
7039                         stream = NULL;
7040                         requested_bpc -= 2; /* lower bpc to retry validation */
7041                 }
7042
7043         } while (stream == NULL && requested_bpc >= 6);
7044
7045         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7046                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7047
7048                 aconnector->force_yuv420_output = true;
7049                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7050                                                 dm_state, old_stream);
7051                 aconnector->force_yuv420_output = false;
7052         }
7053
7054         return stream;
7055 }
7056
7057 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7058                                    struct drm_display_mode *mode)
7059 {
7060         int result = MODE_ERROR;
7061         struct dc_sink *dc_sink;
7062         /* TODO: Unhardcode stream count */
7063         struct dc_stream_state *stream;
7064         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7065
7066         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7067                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7068                 return result;
7069
7070         /*
7071          * Only run this the first time mode_valid is called to initilialize
7072          * EDID mgmt
7073          */
7074         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7075                 !aconnector->dc_em_sink)
7076                 handle_edid_mgmt(aconnector);
7077
7078         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7079
7080         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7081                                 aconnector->base.force != DRM_FORCE_ON) {
7082                 DRM_ERROR("dc_sink is NULL!\n");
7083                 goto fail;
7084         }
7085
7086         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7087         if (stream) {
7088                 dc_stream_release(stream);
7089                 result = MODE_OK;
7090         }
7091
7092 fail:
7093         /* TODO: error handling*/
7094         return result;
7095 }
7096
7097 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7098                                 struct dc_info_packet *out)
7099 {
7100         struct hdmi_drm_infoframe frame;
7101         unsigned char buf[30]; /* 26 + 4 */
7102         ssize_t len;
7103         int ret, i;
7104
7105         memset(out, 0, sizeof(*out));
7106
7107         if (!state->hdr_output_metadata)
7108                 return 0;
7109
7110         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7111         if (ret)
7112                 return ret;
7113
7114         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7115         if (len < 0)
7116                 return (int)len;
7117
7118         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7119         if (len != 30)
7120                 return -EINVAL;
7121
7122         /* Prepare the infopacket for DC. */
7123         switch (state->connector->connector_type) {
7124         case DRM_MODE_CONNECTOR_HDMIA:
7125                 out->hb0 = 0x87; /* type */
7126                 out->hb1 = 0x01; /* version */
7127                 out->hb2 = 0x1A; /* length */
7128                 out->sb[0] = buf[3]; /* checksum */
7129                 i = 1;
7130                 break;
7131
7132         case DRM_MODE_CONNECTOR_DisplayPort:
7133         case DRM_MODE_CONNECTOR_eDP:
7134                 out->hb0 = 0x00; /* sdp id, zero */
7135                 out->hb1 = 0x87; /* type */
7136                 out->hb2 = 0x1D; /* payload len - 1 */
7137                 out->hb3 = (0x13 << 2); /* sdp version */
7138                 out->sb[0] = 0x01; /* version */
7139                 out->sb[1] = 0x1A; /* length */
7140                 i = 2;
7141                 break;
7142
7143         default:
7144                 return -EINVAL;
7145         }
7146
7147         memcpy(&out->sb[i], &buf[4], 26);
7148         out->valid = true;
7149
7150         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7151                        sizeof(out->sb), false);
7152
7153         return 0;
7154 }
7155
7156 static int
7157 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7158                                  struct drm_atomic_state *state)
7159 {
7160         struct drm_connector_state *new_con_state =
7161                 drm_atomic_get_new_connector_state(state, conn);
7162         struct drm_connector_state *old_con_state =
7163                 drm_atomic_get_old_connector_state(state, conn);
7164         struct drm_crtc *crtc = new_con_state->crtc;
7165         struct drm_crtc_state *new_crtc_state;
7166         int ret;
7167
7168         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7169
7170         if (!crtc)
7171                 return 0;
7172
7173         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7174                 struct dc_info_packet hdr_infopacket;
7175
7176                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7177                 if (ret)
7178                         return ret;
7179
7180                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7181                 if (IS_ERR(new_crtc_state))
7182                         return PTR_ERR(new_crtc_state);
7183
7184                 /*
7185                  * DC considers the stream backends changed if the
7186                  * static metadata changes. Forcing the modeset also
7187                  * gives a simple way for userspace to switch from
7188                  * 8bpc to 10bpc when setting the metadata to enter
7189                  * or exit HDR.
7190                  *
7191                  * Changing the static metadata after it's been
7192                  * set is permissible, however. So only force a
7193                  * modeset if we're entering or exiting HDR.
7194                  */
7195                 new_crtc_state->mode_changed =
7196                         !old_con_state->hdr_output_metadata ||
7197                         !new_con_state->hdr_output_metadata;
7198         }
7199
7200         return 0;
7201 }
7202
7203 static const struct drm_connector_helper_funcs
7204 amdgpu_dm_connector_helper_funcs = {
7205         /*
7206          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7207          * modes will be filtered by drm_mode_validate_size(), and those modes
7208          * are missing after user start lightdm. So we need to renew modes list.
7209          * in get_modes call back, not just return the modes count
7210          */
7211         .get_modes = get_modes,
7212         .mode_valid = amdgpu_dm_connector_mode_valid,
7213         .atomic_check = amdgpu_dm_connector_atomic_check,
7214 };
7215
7216 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7217 {
7218 }
7219
7220 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7221 {
7222         struct drm_atomic_state *state = new_crtc_state->state;
7223         struct drm_plane *plane;
7224         int num_active = 0;
7225
7226         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7227                 struct drm_plane_state *new_plane_state;
7228
7229                 /* Cursor planes are "fake". */
7230                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7231                         continue;
7232
7233                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7234
7235                 if (!new_plane_state) {
7236                         /*
7237                          * The plane is enable on the CRTC and hasn't changed
7238                          * state. This means that it previously passed
7239                          * validation and is therefore enabled.
7240                          */
7241                         num_active += 1;
7242                         continue;
7243                 }
7244
7245                 /* We need a framebuffer to be considered enabled. */
7246                 num_active += (new_plane_state->fb != NULL);
7247         }
7248
7249         return num_active;
7250 }
7251
7252 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7253                                          struct drm_crtc_state *new_crtc_state)
7254 {
7255         struct dm_crtc_state *dm_new_crtc_state =
7256                 to_dm_crtc_state(new_crtc_state);
7257
7258         dm_new_crtc_state->active_planes = 0;
7259
7260         if (!dm_new_crtc_state->stream)
7261                 return;
7262
7263         dm_new_crtc_state->active_planes =
7264                 count_crtc_active_planes(new_crtc_state);
7265 }
7266
7267 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7268                                        struct drm_atomic_state *state)
7269 {
7270         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7271                                                                           crtc);
7272         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7273         struct dc *dc = adev->dm.dc;
7274         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7275         int ret = -EINVAL;
7276
7277         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7278
7279         dm_update_crtc_active_planes(crtc, crtc_state);
7280
7281         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7282                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7283                 return ret;
7284         }
7285
7286         /*
7287          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7288          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7289          * planes are disabled, which is not supported by the hardware. And there is legacy
7290          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7291          */
7292         if (crtc_state->enable &&
7293             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7294                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7295                 return -EINVAL;
7296         }
7297
7298         /* In some use cases, like reset, no stream is attached */
7299         if (!dm_crtc_state->stream)
7300                 return 0;
7301
7302         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7303                 return 0;
7304
7305         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7306         return ret;
7307 }
7308
7309 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7310                                       const struct drm_display_mode *mode,
7311                                       struct drm_display_mode *adjusted_mode)
7312 {
7313         return true;
7314 }
7315
7316 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7317         .disable = dm_crtc_helper_disable,
7318         .atomic_check = dm_crtc_helper_atomic_check,
7319         .mode_fixup = dm_crtc_helper_mode_fixup,
7320         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7321 };
7322
7323 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7324 {
7325
7326 }
7327
7328 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7329 {
7330         switch (display_color_depth) {
7331                 case COLOR_DEPTH_666:
7332                         return 6;
7333                 case COLOR_DEPTH_888:
7334                         return 8;
7335                 case COLOR_DEPTH_101010:
7336                         return 10;
7337                 case COLOR_DEPTH_121212:
7338                         return 12;
7339                 case COLOR_DEPTH_141414:
7340                         return 14;
7341                 case COLOR_DEPTH_161616:
7342                         return 16;
7343                 default:
7344                         break;
7345                 }
7346         return 0;
7347 }
7348
7349 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7350                                           struct drm_crtc_state *crtc_state,
7351                                           struct drm_connector_state *conn_state)
7352 {
7353         struct drm_atomic_state *state = crtc_state->state;
7354         struct drm_connector *connector = conn_state->connector;
7355         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7356         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7357         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7358         struct drm_dp_mst_topology_mgr *mst_mgr;
7359         struct drm_dp_mst_port *mst_port;
7360         enum dc_color_depth color_depth;
7361         int clock, bpp = 0;
7362         bool is_y420 = false;
7363
7364         if (!aconnector->port || !aconnector->dc_sink)
7365                 return 0;
7366
7367         mst_port = aconnector->port;
7368         mst_mgr = &aconnector->mst_port->mst_mgr;
7369
7370         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7371                 return 0;
7372
7373         if (!state->duplicated) {
7374                 int max_bpc = conn_state->max_requested_bpc;
7375                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7376                                 aconnector->force_yuv420_output;
7377                 color_depth = convert_color_depth_from_display_info(connector,
7378                                                                     is_y420,
7379                                                                     max_bpc);
7380                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7381                 clock = adjusted_mode->clock;
7382                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7383         }
7384         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7385                                                                            mst_mgr,
7386                                                                            mst_port,
7387                                                                            dm_new_connector_state->pbn,
7388                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7389         if (dm_new_connector_state->vcpi_slots < 0) {
7390                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7391                 return dm_new_connector_state->vcpi_slots;
7392         }
7393         return 0;
7394 }
7395
7396 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7397         .disable = dm_encoder_helper_disable,
7398         .atomic_check = dm_encoder_helper_atomic_check
7399 };
7400
7401 #if defined(CONFIG_DRM_AMD_DC_DCN)
7402 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7403                                             struct dc_state *dc_state,
7404                                             struct dsc_mst_fairness_vars *vars)
7405 {
7406         struct dc_stream_state *stream = NULL;
7407         struct drm_connector *connector;
7408         struct drm_connector_state *new_con_state;
7409         struct amdgpu_dm_connector *aconnector;
7410         struct dm_connector_state *dm_conn_state;
7411         int i, j;
7412         int vcpi, pbn_div, pbn, slot_num = 0;
7413
7414         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7415
7416                 aconnector = to_amdgpu_dm_connector(connector);
7417
7418                 if (!aconnector->port)
7419                         continue;
7420
7421                 if (!new_con_state || !new_con_state->crtc)
7422                         continue;
7423
7424                 dm_conn_state = to_dm_connector_state(new_con_state);
7425
7426                 for (j = 0; j < dc_state->stream_count; j++) {
7427                         stream = dc_state->streams[j];
7428                         if (!stream)
7429                                 continue;
7430
7431                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7432                                 break;
7433
7434                         stream = NULL;
7435                 }
7436
7437                 if (!stream)
7438                         continue;
7439
7440                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7441                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7442                 for (j = 0; j < dc_state->stream_count; j++) {
7443                         if (vars[j].aconnector == aconnector) {
7444                                 pbn = vars[j].pbn;
7445                                 break;
7446                         }
7447                 }
7448
7449                 if (j == dc_state->stream_count)
7450                         continue;
7451
7452                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7453
7454                 if (stream->timing.flags.DSC != 1) {
7455                         dm_conn_state->pbn = pbn;
7456                         dm_conn_state->vcpi_slots = slot_num;
7457
7458                         drm_dp_mst_atomic_enable_dsc(state,
7459                                                      aconnector->port,
7460                                                      dm_conn_state->pbn,
7461                                                      0,
7462                                                      false);
7463                         continue;
7464                 }
7465
7466                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7467                                                     aconnector->port,
7468                                                     pbn, pbn_div,
7469                                                     true);
7470                 if (vcpi < 0)
7471                         return vcpi;
7472
7473                 dm_conn_state->pbn = pbn;
7474                 dm_conn_state->vcpi_slots = vcpi;
7475         }
7476         return 0;
7477 }
7478 #endif
7479
7480 static void dm_drm_plane_reset(struct drm_plane *plane)
7481 {
7482         struct dm_plane_state *amdgpu_state = NULL;
7483
7484         if (plane->state)
7485                 plane->funcs->atomic_destroy_state(plane, plane->state);
7486
7487         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7488         WARN_ON(amdgpu_state == NULL);
7489
7490         if (amdgpu_state)
7491                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7492 }
7493
7494 static struct drm_plane_state *
7495 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7496 {
7497         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7498
7499         old_dm_plane_state = to_dm_plane_state(plane->state);
7500         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7501         if (!dm_plane_state)
7502                 return NULL;
7503
7504         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7505
7506         if (old_dm_plane_state->dc_state) {
7507                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7508                 dc_plane_state_retain(dm_plane_state->dc_state);
7509         }
7510
7511         return &dm_plane_state->base;
7512 }
7513
7514 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7515                                 struct drm_plane_state *state)
7516 {
7517         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7518
7519         if (dm_plane_state->dc_state)
7520                 dc_plane_state_release(dm_plane_state->dc_state);
7521
7522         drm_atomic_helper_plane_destroy_state(plane, state);
7523 }
7524
7525 static const struct drm_plane_funcs dm_plane_funcs = {
7526         .update_plane   = drm_atomic_helper_update_plane,
7527         .disable_plane  = drm_atomic_helper_disable_plane,
7528         .destroy        = drm_primary_helper_destroy,
7529         .reset = dm_drm_plane_reset,
7530         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7531         .atomic_destroy_state = dm_drm_plane_destroy_state,
7532         .format_mod_supported = dm_plane_format_mod_supported,
7533 };
7534
7535 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7536                                       struct drm_plane_state *new_state)
7537 {
7538         struct amdgpu_framebuffer *afb;
7539         struct drm_gem_object *obj;
7540         struct amdgpu_device *adev;
7541         struct amdgpu_bo *rbo;
7542         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7543         struct list_head list;
7544         struct ttm_validate_buffer tv;
7545         struct ww_acquire_ctx ticket;
7546         uint32_t domain;
7547         int r;
7548
7549         if (!new_state->fb) {
7550                 DRM_DEBUG_KMS("No FB bound\n");
7551                 return 0;
7552         }
7553
7554         afb = to_amdgpu_framebuffer(new_state->fb);
7555         obj = new_state->fb->obj[0];
7556         rbo = gem_to_amdgpu_bo(obj);
7557         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7558         INIT_LIST_HEAD(&list);
7559
7560         tv.bo = &rbo->tbo;
7561         tv.num_shared = 1;
7562         list_add(&tv.head, &list);
7563
7564         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7565         if (r) {
7566                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7567                 return r;
7568         }
7569
7570         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7571                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7572         else
7573                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7574
7575         r = amdgpu_bo_pin(rbo, domain);
7576         if (unlikely(r != 0)) {
7577                 if (r != -ERESTARTSYS)
7578                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7579                 ttm_eu_backoff_reservation(&ticket, &list);
7580                 return r;
7581         }
7582
7583         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7584         if (unlikely(r != 0)) {
7585                 amdgpu_bo_unpin(rbo);
7586                 ttm_eu_backoff_reservation(&ticket, &list);
7587                 DRM_ERROR("%p bind failed\n", rbo);
7588                 return r;
7589         }
7590
7591         ttm_eu_backoff_reservation(&ticket, &list);
7592
7593         afb->address = amdgpu_bo_gpu_offset(rbo);
7594
7595         amdgpu_bo_ref(rbo);
7596
7597         /**
7598          * We don't do surface updates on planes that have been newly created,
7599          * but we also don't have the afb->address during atomic check.
7600          *
7601          * Fill in buffer attributes depending on the address here, but only on
7602          * newly created planes since they're not being used by DC yet and this
7603          * won't modify global state.
7604          */
7605         dm_plane_state_old = to_dm_plane_state(plane->state);
7606         dm_plane_state_new = to_dm_plane_state(new_state);
7607
7608         if (dm_plane_state_new->dc_state &&
7609             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7610                 struct dc_plane_state *plane_state =
7611                         dm_plane_state_new->dc_state;
7612                 bool force_disable_dcc = !plane_state->dcc.enable;
7613
7614                 fill_plane_buffer_attributes(
7615                         adev, afb, plane_state->format, plane_state->rotation,
7616                         afb->tiling_flags,
7617                         &plane_state->tiling_info, &plane_state->plane_size,
7618                         &plane_state->dcc, &plane_state->address,
7619                         afb->tmz_surface, force_disable_dcc);
7620         }
7621
7622         return 0;
7623 }
7624
7625 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7626                                        struct drm_plane_state *old_state)
7627 {
7628         struct amdgpu_bo *rbo;
7629         int r;
7630
7631         if (!old_state->fb)
7632                 return;
7633
7634         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7635         r = amdgpu_bo_reserve(rbo, false);
7636         if (unlikely(r)) {
7637                 DRM_ERROR("failed to reserve rbo before unpin\n");
7638                 return;
7639         }
7640
7641         amdgpu_bo_unpin(rbo);
7642         amdgpu_bo_unreserve(rbo);
7643         amdgpu_bo_unref(&rbo);
7644 }
7645
7646 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7647                                        struct drm_crtc_state *new_crtc_state)
7648 {
7649         struct drm_framebuffer *fb = state->fb;
7650         int min_downscale, max_upscale;
7651         int min_scale = 0;
7652         int max_scale = INT_MAX;
7653
7654         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7655         if (fb && state->crtc) {
7656                 /* Validate viewport to cover the case when only the position changes */
7657                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7658                         int viewport_width = state->crtc_w;
7659                         int viewport_height = state->crtc_h;
7660
7661                         if (state->crtc_x < 0)
7662                                 viewport_width += state->crtc_x;
7663                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7664                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7665
7666                         if (state->crtc_y < 0)
7667                                 viewport_height += state->crtc_y;
7668                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7669                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7670
7671                         if (viewport_width < 0 || viewport_height < 0) {
7672                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7673                                 return -EINVAL;
7674                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7675                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7676                                 return -EINVAL;
7677                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7678                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7679                                 return -EINVAL;
7680                         }
7681
7682                 }
7683
7684                 /* Get min/max allowed scaling factors from plane caps. */
7685                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7686                                              &min_downscale, &max_upscale);
7687                 /*
7688                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7689                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7690                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7691                  */
7692                 min_scale = (1000 << 16) / max_upscale;
7693                 max_scale = (1000 << 16) / min_downscale;
7694         }
7695
7696         return drm_atomic_helper_check_plane_state(
7697                 state, new_crtc_state, min_scale, max_scale, true, true);
7698 }
7699
7700 static int dm_plane_atomic_check(struct drm_plane *plane,
7701                                  struct drm_atomic_state *state)
7702 {
7703         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7704                                                                                  plane);
7705         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7706         struct dc *dc = adev->dm.dc;
7707         struct dm_plane_state *dm_plane_state;
7708         struct dc_scaling_info scaling_info;
7709         struct drm_crtc_state *new_crtc_state;
7710         int ret;
7711
7712         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7713
7714         dm_plane_state = to_dm_plane_state(new_plane_state);
7715
7716         if (!dm_plane_state->dc_state)
7717                 return 0;
7718
7719         new_crtc_state =
7720                 drm_atomic_get_new_crtc_state(state,
7721                                               new_plane_state->crtc);
7722         if (!new_crtc_state)
7723                 return -EINVAL;
7724
7725         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7726         if (ret)
7727                 return ret;
7728
7729         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7730         if (ret)
7731                 return ret;
7732
7733         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7734                 return 0;
7735
7736         return -EINVAL;
7737 }
7738
7739 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7740                                        struct drm_atomic_state *state)
7741 {
7742         /* Only support async updates on cursor planes. */
7743         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7744                 return -EINVAL;
7745
7746         return 0;
7747 }
7748
7749 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7750                                          struct drm_atomic_state *state)
7751 {
7752         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7753                                                                            plane);
7754         struct drm_plane_state *old_state =
7755                 drm_atomic_get_old_plane_state(state, plane);
7756
7757         trace_amdgpu_dm_atomic_update_cursor(new_state);
7758
7759         swap(plane->state->fb, new_state->fb);
7760
7761         plane->state->src_x = new_state->src_x;
7762         plane->state->src_y = new_state->src_y;
7763         plane->state->src_w = new_state->src_w;
7764         plane->state->src_h = new_state->src_h;
7765         plane->state->crtc_x = new_state->crtc_x;
7766         plane->state->crtc_y = new_state->crtc_y;
7767         plane->state->crtc_w = new_state->crtc_w;
7768         plane->state->crtc_h = new_state->crtc_h;
7769
7770         handle_cursor_update(plane, old_state);
7771 }
7772
7773 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7774         .prepare_fb = dm_plane_helper_prepare_fb,
7775         .cleanup_fb = dm_plane_helper_cleanup_fb,
7776         .atomic_check = dm_plane_atomic_check,
7777         .atomic_async_check = dm_plane_atomic_async_check,
7778         .atomic_async_update = dm_plane_atomic_async_update
7779 };
7780
7781 /*
7782  * TODO: these are currently initialized to rgb formats only.
7783  * For future use cases we should either initialize them dynamically based on
7784  * plane capabilities, or initialize this array to all formats, so internal drm
7785  * check will succeed, and let DC implement proper check
7786  */
7787 static const uint32_t rgb_formats[] = {
7788         DRM_FORMAT_XRGB8888,
7789         DRM_FORMAT_ARGB8888,
7790         DRM_FORMAT_RGBA8888,
7791         DRM_FORMAT_XRGB2101010,
7792         DRM_FORMAT_XBGR2101010,
7793         DRM_FORMAT_ARGB2101010,
7794         DRM_FORMAT_ABGR2101010,
7795         DRM_FORMAT_XRGB16161616,
7796         DRM_FORMAT_XBGR16161616,
7797         DRM_FORMAT_ARGB16161616,
7798         DRM_FORMAT_ABGR16161616,
7799         DRM_FORMAT_XBGR8888,
7800         DRM_FORMAT_ABGR8888,
7801         DRM_FORMAT_RGB565,
7802 };
7803
7804 static const uint32_t overlay_formats[] = {
7805         DRM_FORMAT_XRGB8888,
7806         DRM_FORMAT_ARGB8888,
7807         DRM_FORMAT_RGBA8888,
7808         DRM_FORMAT_XBGR8888,
7809         DRM_FORMAT_ABGR8888,
7810         DRM_FORMAT_RGB565
7811 };
7812
7813 static const u32 cursor_formats[] = {
7814         DRM_FORMAT_ARGB8888
7815 };
7816
7817 static int get_plane_formats(const struct drm_plane *plane,
7818                              const struct dc_plane_cap *plane_cap,
7819                              uint32_t *formats, int max_formats)
7820 {
7821         int i, num_formats = 0;
7822
7823         /*
7824          * TODO: Query support for each group of formats directly from
7825          * DC plane caps. This will require adding more formats to the
7826          * caps list.
7827          */
7828
7829         switch (plane->type) {
7830         case DRM_PLANE_TYPE_PRIMARY:
7831                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7832                         if (num_formats >= max_formats)
7833                                 break;
7834
7835                         formats[num_formats++] = rgb_formats[i];
7836                 }
7837
7838                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7839                         formats[num_formats++] = DRM_FORMAT_NV12;
7840                 if (plane_cap && plane_cap->pixel_format_support.p010)
7841                         formats[num_formats++] = DRM_FORMAT_P010;
7842                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7843                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7844                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7845                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7846                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7847                 }
7848                 break;
7849
7850         case DRM_PLANE_TYPE_OVERLAY:
7851                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7852                         if (num_formats >= max_formats)
7853                                 break;
7854
7855                         formats[num_formats++] = overlay_formats[i];
7856                 }
7857                 break;
7858
7859         case DRM_PLANE_TYPE_CURSOR:
7860                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7861                         if (num_formats >= max_formats)
7862                                 break;
7863
7864                         formats[num_formats++] = cursor_formats[i];
7865                 }
7866                 break;
7867         }
7868
7869         return num_formats;
7870 }
7871
7872 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7873                                 struct drm_plane *plane,
7874                                 unsigned long possible_crtcs,
7875                                 const struct dc_plane_cap *plane_cap)
7876 {
7877         uint32_t formats[32];
7878         int num_formats;
7879         int res = -EPERM;
7880         unsigned int supported_rotations;
7881         uint64_t *modifiers = NULL;
7882
7883         num_formats = get_plane_formats(plane, plane_cap, formats,
7884                                         ARRAY_SIZE(formats));
7885
7886         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7887         if (res)
7888                 return res;
7889
7890         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7891                                        &dm_plane_funcs, formats, num_formats,
7892                                        modifiers, plane->type, NULL);
7893         kfree(modifiers);
7894         if (res)
7895                 return res;
7896
7897         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7898             plane_cap && plane_cap->per_pixel_alpha) {
7899                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7900                                           BIT(DRM_MODE_BLEND_PREMULTI);
7901
7902                 drm_plane_create_alpha_property(plane);
7903                 drm_plane_create_blend_mode_property(plane, blend_caps);
7904         }
7905
7906         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7907             plane_cap &&
7908             (plane_cap->pixel_format_support.nv12 ||
7909              plane_cap->pixel_format_support.p010)) {
7910                 /* This only affects YUV formats. */
7911                 drm_plane_create_color_properties(
7912                         plane,
7913                         BIT(DRM_COLOR_YCBCR_BT601) |
7914                         BIT(DRM_COLOR_YCBCR_BT709) |
7915                         BIT(DRM_COLOR_YCBCR_BT2020),
7916                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7917                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7918                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7919         }
7920
7921         supported_rotations =
7922                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7923                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7924
7925         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7926             plane->type != DRM_PLANE_TYPE_CURSOR)
7927                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7928                                                    supported_rotations);
7929
7930         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7931
7932         /* Create (reset) the plane state */
7933         if (plane->funcs->reset)
7934                 plane->funcs->reset(plane);
7935
7936         return 0;
7937 }
7938
7939 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7940                                struct drm_plane *plane,
7941                                uint32_t crtc_index)
7942 {
7943         struct amdgpu_crtc *acrtc = NULL;
7944         struct drm_plane *cursor_plane;
7945
7946         int res = -ENOMEM;
7947
7948         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7949         if (!cursor_plane)
7950                 goto fail;
7951
7952         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7953         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7954
7955         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7956         if (!acrtc)
7957                 goto fail;
7958
7959         res = drm_crtc_init_with_planes(
7960                         dm->ddev,
7961                         &acrtc->base,
7962                         plane,
7963                         cursor_plane,
7964                         &amdgpu_dm_crtc_funcs, NULL);
7965
7966         if (res)
7967                 goto fail;
7968
7969         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7970
7971         /* Create (reset) the plane state */
7972         if (acrtc->base.funcs->reset)
7973                 acrtc->base.funcs->reset(&acrtc->base);
7974
7975         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7976         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7977
7978         acrtc->crtc_id = crtc_index;
7979         acrtc->base.enabled = false;
7980         acrtc->otg_inst = -1;
7981
7982         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7983         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7984                                    true, MAX_COLOR_LUT_ENTRIES);
7985         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7986
7987         return 0;
7988
7989 fail:
7990         kfree(acrtc);
7991         kfree(cursor_plane);
7992         return res;
7993 }
7994
7995
7996 static int to_drm_connector_type(enum signal_type st)
7997 {
7998         switch (st) {
7999         case SIGNAL_TYPE_HDMI_TYPE_A:
8000                 return DRM_MODE_CONNECTOR_HDMIA;
8001         case SIGNAL_TYPE_EDP:
8002                 return DRM_MODE_CONNECTOR_eDP;
8003         case SIGNAL_TYPE_LVDS:
8004                 return DRM_MODE_CONNECTOR_LVDS;
8005         case SIGNAL_TYPE_RGB:
8006                 return DRM_MODE_CONNECTOR_VGA;
8007         case SIGNAL_TYPE_DISPLAY_PORT:
8008         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8009                 return DRM_MODE_CONNECTOR_DisplayPort;
8010         case SIGNAL_TYPE_DVI_DUAL_LINK:
8011         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8012                 return DRM_MODE_CONNECTOR_DVID;
8013         case SIGNAL_TYPE_VIRTUAL:
8014                 return DRM_MODE_CONNECTOR_VIRTUAL;
8015
8016         default:
8017                 return DRM_MODE_CONNECTOR_Unknown;
8018         }
8019 }
8020
8021 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8022 {
8023         struct drm_encoder *encoder;
8024
8025         /* There is only one encoder per connector */
8026         drm_connector_for_each_possible_encoder(connector, encoder)
8027                 return encoder;
8028
8029         return NULL;
8030 }
8031
8032 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8033 {
8034         struct drm_encoder *encoder;
8035         struct amdgpu_encoder *amdgpu_encoder;
8036
8037         encoder = amdgpu_dm_connector_to_encoder(connector);
8038
8039         if (encoder == NULL)
8040                 return;
8041
8042         amdgpu_encoder = to_amdgpu_encoder(encoder);
8043
8044         amdgpu_encoder->native_mode.clock = 0;
8045
8046         if (!list_empty(&connector->probed_modes)) {
8047                 struct drm_display_mode *preferred_mode = NULL;
8048
8049                 list_for_each_entry(preferred_mode,
8050                                     &connector->probed_modes,
8051                                     head) {
8052                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8053                                 amdgpu_encoder->native_mode = *preferred_mode;
8054
8055                         break;
8056                 }
8057
8058         }
8059 }
8060
8061 static struct drm_display_mode *
8062 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8063                              char *name,
8064                              int hdisplay, int vdisplay)
8065 {
8066         struct drm_device *dev = encoder->dev;
8067         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8068         struct drm_display_mode *mode = NULL;
8069         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8070
8071         mode = drm_mode_duplicate(dev, native_mode);
8072
8073         if (mode == NULL)
8074                 return NULL;
8075
8076         mode->hdisplay = hdisplay;
8077         mode->vdisplay = vdisplay;
8078         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8079         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8080
8081         return mode;
8082
8083 }
8084
8085 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8086                                                  struct drm_connector *connector)
8087 {
8088         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8089         struct drm_display_mode *mode = NULL;
8090         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8091         struct amdgpu_dm_connector *amdgpu_dm_connector =
8092                                 to_amdgpu_dm_connector(connector);
8093         int i;
8094         int n;
8095         struct mode_size {
8096                 char name[DRM_DISPLAY_MODE_LEN];
8097                 int w;
8098                 int h;
8099         } common_modes[] = {
8100                 {  "640x480",  640,  480},
8101                 {  "800x600",  800,  600},
8102                 { "1024x768", 1024,  768},
8103                 { "1280x720", 1280,  720},
8104                 { "1280x800", 1280,  800},
8105                 {"1280x1024", 1280, 1024},
8106                 { "1440x900", 1440,  900},
8107                 {"1680x1050", 1680, 1050},
8108                 {"1600x1200", 1600, 1200},
8109                 {"1920x1080", 1920, 1080},
8110                 {"1920x1200", 1920, 1200}
8111         };
8112
8113         n = ARRAY_SIZE(common_modes);
8114
8115         for (i = 0; i < n; i++) {
8116                 struct drm_display_mode *curmode = NULL;
8117                 bool mode_existed = false;
8118
8119                 if (common_modes[i].w > native_mode->hdisplay ||
8120                     common_modes[i].h > native_mode->vdisplay ||
8121                    (common_modes[i].w == native_mode->hdisplay &&
8122                     common_modes[i].h == native_mode->vdisplay))
8123                         continue;
8124
8125                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8126                         if (common_modes[i].w == curmode->hdisplay &&
8127                             common_modes[i].h == curmode->vdisplay) {
8128                                 mode_existed = true;
8129                                 break;
8130                         }
8131                 }
8132
8133                 if (mode_existed)
8134                         continue;
8135
8136                 mode = amdgpu_dm_create_common_mode(encoder,
8137                                 common_modes[i].name, common_modes[i].w,
8138                                 common_modes[i].h);
8139                 if (!mode)
8140                         continue;
8141
8142                 drm_mode_probed_add(connector, mode);
8143                 amdgpu_dm_connector->num_modes++;
8144         }
8145 }
8146
8147 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8148 {
8149         struct drm_encoder *encoder;
8150         struct amdgpu_encoder *amdgpu_encoder;
8151         const struct drm_display_mode *native_mode;
8152
8153         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8154             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8155                 return;
8156
8157         encoder = amdgpu_dm_connector_to_encoder(connector);
8158         if (!encoder)
8159                 return;
8160
8161         amdgpu_encoder = to_amdgpu_encoder(encoder);
8162
8163         native_mode = &amdgpu_encoder->native_mode;
8164         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8165                 return;
8166
8167         drm_connector_set_panel_orientation_with_quirk(connector,
8168                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8169                                                        native_mode->hdisplay,
8170                                                        native_mode->vdisplay);
8171 }
8172
8173 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8174                                               struct edid *edid)
8175 {
8176         struct amdgpu_dm_connector *amdgpu_dm_connector =
8177                         to_amdgpu_dm_connector(connector);
8178
8179         if (edid) {
8180                 /* empty probed_modes */
8181                 INIT_LIST_HEAD(&connector->probed_modes);
8182                 amdgpu_dm_connector->num_modes =
8183                                 drm_add_edid_modes(connector, edid);
8184
8185                 /* sorting the probed modes before calling function
8186                  * amdgpu_dm_get_native_mode() since EDID can have
8187                  * more than one preferred mode. The modes that are
8188                  * later in the probed mode list could be of higher
8189                  * and preferred resolution. For example, 3840x2160
8190                  * resolution in base EDID preferred timing and 4096x2160
8191                  * preferred resolution in DID extension block later.
8192                  */
8193                 drm_mode_sort(&connector->probed_modes);
8194                 amdgpu_dm_get_native_mode(connector);
8195
8196                 /* Freesync capabilities are reset by calling
8197                  * drm_add_edid_modes() and need to be
8198                  * restored here.
8199                  */
8200                 amdgpu_dm_update_freesync_caps(connector, edid);
8201
8202                 amdgpu_set_panel_orientation(connector);
8203         } else {
8204                 amdgpu_dm_connector->num_modes = 0;
8205         }
8206 }
8207
8208 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8209                               struct drm_display_mode *mode)
8210 {
8211         struct drm_display_mode *m;
8212
8213         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8214                 if (drm_mode_equal(m, mode))
8215                         return true;
8216         }
8217
8218         return false;
8219 }
8220
8221 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8222 {
8223         const struct drm_display_mode *m;
8224         struct drm_display_mode *new_mode;
8225         uint i;
8226         uint32_t new_modes_count = 0;
8227
8228         /* Standard FPS values
8229          *
8230          * 23.976       - TV/NTSC
8231          * 24           - Cinema
8232          * 25           - TV/PAL
8233          * 29.97        - TV/NTSC
8234          * 30           - TV/NTSC
8235          * 48           - Cinema HFR
8236          * 50           - TV/PAL
8237          * 60           - Commonly used
8238          * 48,72,96,120 - Multiples of 24
8239          */
8240         static const uint32_t common_rates[] = {
8241                 23976, 24000, 25000, 29970, 30000,
8242                 48000, 50000, 60000, 72000, 96000, 120000
8243         };
8244
8245         /*
8246          * Find mode with highest refresh rate with the same resolution
8247          * as the preferred mode. Some monitors report a preferred mode
8248          * with lower resolution than the highest refresh rate supported.
8249          */
8250
8251         m = get_highest_refresh_rate_mode(aconnector, true);
8252         if (!m)
8253                 return 0;
8254
8255         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8256                 uint64_t target_vtotal, target_vtotal_diff;
8257                 uint64_t num, den;
8258
8259                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8260                         continue;
8261
8262                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8263                     common_rates[i] > aconnector->max_vfreq * 1000)
8264                         continue;
8265
8266                 num = (unsigned long long)m->clock * 1000 * 1000;
8267                 den = common_rates[i] * (unsigned long long)m->htotal;
8268                 target_vtotal = div_u64(num, den);
8269                 target_vtotal_diff = target_vtotal - m->vtotal;
8270
8271                 /* Check for illegal modes */
8272                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8273                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8274                     m->vtotal + target_vtotal_diff < m->vsync_end)
8275                         continue;
8276
8277                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8278                 if (!new_mode)
8279                         goto out;
8280
8281                 new_mode->vtotal += (u16)target_vtotal_diff;
8282                 new_mode->vsync_start += (u16)target_vtotal_diff;
8283                 new_mode->vsync_end += (u16)target_vtotal_diff;
8284                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8285                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8286
8287                 if (!is_duplicate_mode(aconnector, new_mode)) {
8288                         drm_mode_probed_add(&aconnector->base, new_mode);
8289                         new_modes_count += 1;
8290                 } else
8291                         drm_mode_destroy(aconnector->base.dev, new_mode);
8292         }
8293  out:
8294         return new_modes_count;
8295 }
8296
8297 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8298                                                    struct edid *edid)
8299 {
8300         struct amdgpu_dm_connector *amdgpu_dm_connector =
8301                 to_amdgpu_dm_connector(connector);
8302
8303         if (!edid)
8304                 return;
8305
8306         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8307                 amdgpu_dm_connector->num_modes +=
8308                         add_fs_modes(amdgpu_dm_connector);
8309 }
8310
8311 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8312 {
8313         struct amdgpu_dm_connector *amdgpu_dm_connector =
8314                         to_amdgpu_dm_connector(connector);
8315         struct drm_encoder *encoder;
8316         struct edid *edid = amdgpu_dm_connector->edid;
8317
8318         encoder = amdgpu_dm_connector_to_encoder(connector);
8319
8320         if (!drm_edid_is_valid(edid)) {
8321                 amdgpu_dm_connector->num_modes =
8322                                 drm_add_modes_noedid(connector, 640, 480);
8323         } else {
8324                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8325                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8326                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8327         }
8328         amdgpu_dm_fbc_init(connector);
8329
8330         return amdgpu_dm_connector->num_modes;
8331 }
8332
8333 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8334                                      struct amdgpu_dm_connector *aconnector,
8335                                      int connector_type,
8336                                      struct dc_link *link,
8337                                      int link_index)
8338 {
8339         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8340
8341         /*
8342          * Some of the properties below require access to state, like bpc.
8343          * Allocate some default initial connector state with our reset helper.
8344          */
8345         if (aconnector->base.funcs->reset)
8346                 aconnector->base.funcs->reset(&aconnector->base);
8347
8348         aconnector->connector_id = link_index;
8349         aconnector->dc_link = link;
8350         aconnector->base.interlace_allowed = false;
8351         aconnector->base.doublescan_allowed = false;
8352         aconnector->base.stereo_allowed = false;
8353         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8354         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8355         aconnector->audio_inst = -1;
8356         mutex_init(&aconnector->hpd_lock);
8357
8358         /*
8359          * configure support HPD hot plug connector_>polled default value is 0
8360          * which means HPD hot plug not supported
8361          */
8362         switch (connector_type) {
8363         case DRM_MODE_CONNECTOR_HDMIA:
8364                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8365                 aconnector->base.ycbcr_420_allowed =
8366                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8367                 break;
8368         case DRM_MODE_CONNECTOR_DisplayPort:
8369                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8370                 link->link_enc = link_enc_cfg_get_link_enc(link);
8371                 ASSERT(link->link_enc);
8372                 if (link->link_enc)
8373                         aconnector->base.ycbcr_420_allowed =
8374                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8375                 break;
8376         case DRM_MODE_CONNECTOR_DVID:
8377                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8378                 break;
8379         default:
8380                 break;
8381         }
8382
8383         drm_object_attach_property(&aconnector->base.base,
8384                                 dm->ddev->mode_config.scaling_mode_property,
8385                                 DRM_MODE_SCALE_NONE);
8386
8387         drm_object_attach_property(&aconnector->base.base,
8388                                 adev->mode_info.underscan_property,
8389                                 UNDERSCAN_OFF);
8390         drm_object_attach_property(&aconnector->base.base,
8391                                 adev->mode_info.underscan_hborder_property,
8392                                 0);
8393         drm_object_attach_property(&aconnector->base.base,
8394                                 adev->mode_info.underscan_vborder_property,
8395                                 0);
8396
8397         if (!aconnector->mst_port)
8398                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8399
8400         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8401         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8402         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8403
8404         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8405             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8406                 drm_object_attach_property(&aconnector->base.base,
8407                                 adev->mode_info.abm_level_property, 0);
8408         }
8409
8410         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8411             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8412             connector_type == DRM_MODE_CONNECTOR_eDP) {
8413                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8414
8415                 if (!aconnector->mst_port)
8416                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8417
8418 #ifdef CONFIG_DRM_AMD_DC_HDCP
8419                 if (adev->dm.hdcp_workqueue)
8420                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8421 #endif
8422         }
8423 }
8424
8425 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8426                               struct i2c_msg *msgs, int num)
8427 {
8428         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8429         struct ddc_service *ddc_service = i2c->ddc_service;
8430         struct i2c_command cmd;
8431         int i;
8432         int result = -EIO;
8433
8434         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8435
8436         if (!cmd.payloads)
8437                 return result;
8438
8439         cmd.number_of_payloads = num;
8440         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8441         cmd.speed = 100;
8442
8443         for (i = 0; i < num; i++) {
8444                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8445                 cmd.payloads[i].address = msgs[i].addr;
8446                 cmd.payloads[i].length = msgs[i].len;
8447                 cmd.payloads[i].data = msgs[i].buf;
8448         }
8449
8450         if (dc_submit_i2c(
8451                         ddc_service->ctx->dc,
8452                         ddc_service->ddc_pin->hw_info.ddc_channel,
8453                         &cmd))
8454                 result = num;
8455
8456         kfree(cmd.payloads);
8457         return result;
8458 }
8459
8460 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8461 {
8462         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8463 }
8464
8465 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8466         .master_xfer = amdgpu_dm_i2c_xfer,
8467         .functionality = amdgpu_dm_i2c_func,
8468 };
8469
8470 static struct amdgpu_i2c_adapter *
8471 create_i2c(struct ddc_service *ddc_service,
8472            int link_index,
8473            int *res)
8474 {
8475         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8476         struct amdgpu_i2c_adapter *i2c;
8477
8478         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8479         if (!i2c)
8480                 return NULL;
8481         i2c->base.owner = THIS_MODULE;
8482         i2c->base.class = I2C_CLASS_DDC;
8483         i2c->base.dev.parent = &adev->pdev->dev;
8484         i2c->base.algo = &amdgpu_dm_i2c_algo;
8485         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8486         i2c_set_adapdata(&i2c->base, i2c);
8487         i2c->ddc_service = ddc_service;
8488         if (i2c->ddc_service->ddc_pin)
8489                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8490
8491         return i2c;
8492 }
8493
8494
8495 /*
8496  * Note: this function assumes that dc_link_detect() was called for the
8497  * dc_link which will be represented by this aconnector.
8498  */
8499 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8500                                     struct amdgpu_dm_connector *aconnector,
8501                                     uint32_t link_index,
8502                                     struct amdgpu_encoder *aencoder)
8503 {
8504         int res = 0;
8505         int connector_type;
8506         struct dc *dc = dm->dc;
8507         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8508         struct amdgpu_i2c_adapter *i2c;
8509
8510         link->priv = aconnector;
8511
8512         DRM_DEBUG_DRIVER("%s()\n", __func__);
8513
8514         i2c = create_i2c(link->ddc, link->link_index, &res);
8515         if (!i2c) {
8516                 DRM_ERROR("Failed to create i2c adapter data\n");
8517                 return -ENOMEM;
8518         }
8519
8520         aconnector->i2c = i2c;
8521         res = i2c_add_adapter(&i2c->base);
8522
8523         if (res) {
8524                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8525                 goto out_free;
8526         }
8527
8528         connector_type = to_drm_connector_type(link->connector_signal);
8529
8530         res = drm_connector_init_with_ddc(
8531                         dm->ddev,
8532                         &aconnector->base,
8533                         &amdgpu_dm_connector_funcs,
8534                         connector_type,
8535                         &i2c->base);
8536
8537         if (res) {
8538                 DRM_ERROR("connector_init failed\n");
8539                 aconnector->connector_id = -1;
8540                 goto out_free;
8541         }
8542
8543         drm_connector_helper_add(
8544                         &aconnector->base,
8545                         &amdgpu_dm_connector_helper_funcs);
8546
8547         amdgpu_dm_connector_init_helper(
8548                 dm,
8549                 aconnector,
8550                 connector_type,
8551                 link,
8552                 link_index);
8553
8554         drm_connector_attach_encoder(
8555                 &aconnector->base, &aencoder->base);
8556
8557         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8558                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8559                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8560
8561 out_free:
8562         if (res) {
8563                 kfree(i2c);
8564                 aconnector->i2c = NULL;
8565         }
8566         return res;
8567 }
8568
8569 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8570 {
8571         switch (adev->mode_info.num_crtc) {
8572         case 1:
8573                 return 0x1;
8574         case 2:
8575                 return 0x3;
8576         case 3:
8577                 return 0x7;
8578         case 4:
8579                 return 0xf;
8580         case 5:
8581                 return 0x1f;
8582         case 6:
8583         default:
8584                 return 0x3f;
8585         }
8586 }
8587
8588 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8589                                   struct amdgpu_encoder *aencoder,
8590                                   uint32_t link_index)
8591 {
8592         struct amdgpu_device *adev = drm_to_adev(dev);
8593
8594         int res = drm_encoder_init(dev,
8595                                    &aencoder->base,
8596                                    &amdgpu_dm_encoder_funcs,
8597                                    DRM_MODE_ENCODER_TMDS,
8598                                    NULL);
8599
8600         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8601
8602         if (!res)
8603                 aencoder->encoder_id = link_index;
8604         else
8605                 aencoder->encoder_id = -1;
8606
8607         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8608
8609         return res;
8610 }
8611
8612 static void manage_dm_interrupts(struct amdgpu_device *adev,
8613                                  struct amdgpu_crtc *acrtc,
8614                                  bool enable)
8615 {
8616         /*
8617          * We have no guarantee that the frontend index maps to the same
8618          * backend index - some even map to more than one.
8619          *
8620          * TODO: Use a different interrupt or check DC itself for the mapping.
8621          */
8622         int irq_type =
8623                 amdgpu_display_crtc_idx_to_irq_type(
8624                         adev,
8625                         acrtc->crtc_id);
8626
8627         if (enable) {
8628                 drm_crtc_vblank_on(&acrtc->base);
8629                 amdgpu_irq_get(
8630                         adev,
8631                         &adev->pageflip_irq,
8632                         irq_type);
8633 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8634                 amdgpu_irq_get(
8635                         adev,
8636                         &adev->vline0_irq,
8637                         irq_type);
8638 #endif
8639         } else {
8640 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8641                 amdgpu_irq_put(
8642                         adev,
8643                         &adev->vline0_irq,
8644                         irq_type);
8645 #endif
8646                 amdgpu_irq_put(
8647                         adev,
8648                         &adev->pageflip_irq,
8649                         irq_type);
8650                 drm_crtc_vblank_off(&acrtc->base);
8651         }
8652 }
8653
8654 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8655                                       struct amdgpu_crtc *acrtc)
8656 {
8657         int irq_type =
8658                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8659
8660         /**
8661          * This reads the current state for the IRQ and force reapplies
8662          * the setting to hardware.
8663          */
8664         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8665 }
8666
8667 static bool
8668 is_scaling_state_different(const struct dm_connector_state *dm_state,
8669                            const struct dm_connector_state *old_dm_state)
8670 {
8671         if (dm_state->scaling != old_dm_state->scaling)
8672                 return true;
8673         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8674                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8675                         return true;
8676         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8677                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8678                         return true;
8679         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8680                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8681                 return true;
8682         return false;
8683 }
8684
8685 #ifdef CONFIG_DRM_AMD_DC_HDCP
8686 static bool is_content_protection_different(struct drm_connector_state *state,
8687                                             const struct drm_connector_state *old_state,
8688                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8689 {
8690         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8691         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8692
8693         /* Handle: Type0/1 change */
8694         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8695             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8696                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8697                 return true;
8698         }
8699
8700         /* CP is being re enabled, ignore this
8701          *
8702          * Handles:     ENABLED -> DESIRED
8703          */
8704         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8705             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8706                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8707                 return false;
8708         }
8709
8710         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8711          *
8712          * Handles:     UNDESIRED -> ENABLED
8713          */
8714         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8715             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8716                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8717
8718         /* Stream removed and re-enabled
8719          *
8720          * Can sometimes overlap with the HPD case,
8721          * thus set update_hdcp to false to avoid
8722          * setting HDCP multiple times.
8723          *
8724          * Handles:     DESIRED -> DESIRED (Special case)
8725          */
8726         if (!(old_state->crtc && old_state->crtc->enabled) &&
8727                 state->crtc && state->crtc->enabled &&
8728                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8729                 dm_con_state->update_hdcp = false;
8730                 return true;
8731         }
8732
8733         /* Hot-plug, headless s3, dpms
8734          *
8735          * Only start HDCP if the display is connected/enabled.
8736          * update_hdcp flag will be set to false until the next
8737          * HPD comes in.
8738          *
8739          * Handles:     DESIRED -> DESIRED (Special case)
8740          */
8741         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8742             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8743                 dm_con_state->update_hdcp = false;
8744                 return true;
8745         }
8746
8747         /*
8748          * Handles:     UNDESIRED -> UNDESIRED
8749          *              DESIRED -> DESIRED
8750          *              ENABLED -> ENABLED
8751          */
8752         if (old_state->content_protection == state->content_protection)
8753                 return false;
8754
8755         /*
8756          * Handles:     UNDESIRED -> DESIRED
8757          *              DESIRED -> UNDESIRED
8758          *              ENABLED -> UNDESIRED
8759          */
8760         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8761                 return true;
8762
8763         /*
8764          * Handles:     DESIRED -> ENABLED
8765          */
8766         return false;
8767 }
8768
8769 #endif
8770 static void remove_stream(struct amdgpu_device *adev,
8771                           struct amdgpu_crtc *acrtc,
8772                           struct dc_stream_state *stream)
8773 {
8774         /* this is the update mode case */
8775
8776         acrtc->otg_inst = -1;
8777         acrtc->enabled = false;
8778 }
8779
8780 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8781                                struct dc_cursor_position *position)
8782 {
8783         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8784         int x, y;
8785         int xorigin = 0, yorigin = 0;
8786
8787         if (!crtc || !plane->state->fb)
8788                 return 0;
8789
8790         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8791             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8792                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8793                           __func__,
8794                           plane->state->crtc_w,
8795                           plane->state->crtc_h);
8796                 return -EINVAL;
8797         }
8798
8799         x = plane->state->crtc_x;
8800         y = plane->state->crtc_y;
8801
8802         if (x <= -amdgpu_crtc->max_cursor_width ||
8803             y <= -amdgpu_crtc->max_cursor_height)
8804                 return 0;
8805
8806         if (x < 0) {
8807                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8808                 x = 0;
8809         }
8810         if (y < 0) {
8811                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8812                 y = 0;
8813         }
8814         position->enable = true;
8815         position->translate_by_source = true;
8816         position->x = x;
8817         position->y = y;
8818         position->x_hotspot = xorigin;
8819         position->y_hotspot = yorigin;
8820
8821         return 0;
8822 }
8823
8824 static void handle_cursor_update(struct drm_plane *plane,
8825                                  struct drm_plane_state *old_plane_state)
8826 {
8827         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8828         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8829         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8830         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8831         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8832         uint64_t address = afb ? afb->address : 0;
8833         struct dc_cursor_position position = {0};
8834         struct dc_cursor_attributes attributes;
8835         int ret;
8836
8837         if (!plane->state->fb && !old_plane_state->fb)
8838                 return;
8839
8840         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8841                       __func__,
8842                       amdgpu_crtc->crtc_id,
8843                       plane->state->crtc_w,
8844                       plane->state->crtc_h);
8845
8846         ret = get_cursor_position(plane, crtc, &position);
8847         if (ret)
8848                 return;
8849
8850         if (!position.enable) {
8851                 /* turn off cursor */
8852                 if (crtc_state && crtc_state->stream) {
8853                         mutex_lock(&adev->dm.dc_lock);
8854                         dc_stream_set_cursor_position(crtc_state->stream,
8855                                                       &position);
8856                         mutex_unlock(&adev->dm.dc_lock);
8857                 }
8858                 return;
8859         }
8860
8861         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8862         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8863
8864         memset(&attributes, 0, sizeof(attributes));
8865         attributes.address.high_part = upper_32_bits(address);
8866         attributes.address.low_part  = lower_32_bits(address);
8867         attributes.width             = plane->state->crtc_w;
8868         attributes.height            = plane->state->crtc_h;
8869         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8870         attributes.rotation_angle    = 0;
8871         attributes.attribute_flags.value = 0;
8872
8873         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8874
8875         if (crtc_state->stream) {
8876                 mutex_lock(&adev->dm.dc_lock);
8877                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8878                                                          &attributes))
8879                         DRM_ERROR("DC failed to set cursor attributes\n");
8880
8881                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8882                                                    &position))
8883                         DRM_ERROR("DC failed to set cursor position\n");
8884                 mutex_unlock(&adev->dm.dc_lock);
8885         }
8886 }
8887
8888 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8889 {
8890
8891         assert_spin_locked(&acrtc->base.dev->event_lock);
8892         WARN_ON(acrtc->event);
8893
8894         acrtc->event = acrtc->base.state->event;
8895
8896         /* Set the flip status */
8897         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8898
8899         /* Mark this event as consumed */
8900         acrtc->base.state->event = NULL;
8901
8902         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8903                      acrtc->crtc_id);
8904 }
8905
8906 static void update_freesync_state_on_stream(
8907         struct amdgpu_display_manager *dm,
8908         struct dm_crtc_state *new_crtc_state,
8909         struct dc_stream_state *new_stream,
8910         struct dc_plane_state *surface,
8911         u32 flip_timestamp_in_us)
8912 {
8913         struct mod_vrr_params vrr_params;
8914         struct dc_info_packet vrr_infopacket = {0};
8915         struct amdgpu_device *adev = dm->adev;
8916         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8917         unsigned long flags;
8918         bool pack_sdp_v1_3 = false;
8919
8920         if (!new_stream)
8921                 return;
8922
8923         /*
8924          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8925          * For now it's sufficient to just guard against these conditions.
8926          */
8927
8928         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8929                 return;
8930
8931         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8932         vrr_params = acrtc->dm_irq_params.vrr_params;
8933
8934         if (surface) {
8935                 mod_freesync_handle_preflip(
8936                         dm->freesync_module,
8937                         surface,
8938                         new_stream,
8939                         flip_timestamp_in_us,
8940                         &vrr_params);
8941
8942                 if (adev->family < AMDGPU_FAMILY_AI &&
8943                     amdgpu_dm_vrr_active(new_crtc_state)) {
8944                         mod_freesync_handle_v_update(dm->freesync_module,
8945                                                      new_stream, &vrr_params);
8946
8947                         /* Need to call this before the frame ends. */
8948                         dc_stream_adjust_vmin_vmax(dm->dc,
8949                                                    new_crtc_state->stream,
8950                                                    &vrr_params.adjust);
8951                 }
8952         }
8953
8954         mod_freesync_build_vrr_infopacket(
8955                 dm->freesync_module,
8956                 new_stream,
8957                 &vrr_params,
8958                 PACKET_TYPE_VRR,
8959                 TRANSFER_FUNC_UNKNOWN,
8960                 &vrr_infopacket,
8961                 pack_sdp_v1_3);
8962
8963         new_crtc_state->freesync_timing_changed |=
8964                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8965                         &vrr_params.adjust,
8966                         sizeof(vrr_params.adjust)) != 0);
8967
8968         new_crtc_state->freesync_vrr_info_changed |=
8969                 (memcmp(&new_crtc_state->vrr_infopacket,
8970                         &vrr_infopacket,
8971                         sizeof(vrr_infopacket)) != 0);
8972
8973         acrtc->dm_irq_params.vrr_params = vrr_params;
8974         new_crtc_state->vrr_infopacket = vrr_infopacket;
8975
8976         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8977         new_stream->vrr_infopacket = vrr_infopacket;
8978
8979         if (new_crtc_state->freesync_vrr_info_changed)
8980                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8981                               new_crtc_state->base.crtc->base.id,
8982                               (int)new_crtc_state->base.vrr_enabled,
8983                               (int)vrr_params.state);
8984
8985         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8986 }
8987
8988 static void update_stream_irq_parameters(
8989         struct amdgpu_display_manager *dm,
8990         struct dm_crtc_state *new_crtc_state)
8991 {
8992         struct dc_stream_state *new_stream = new_crtc_state->stream;
8993         struct mod_vrr_params vrr_params;
8994         struct mod_freesync_config config = new_crtc_state->freesync_config;
8995         struct amdgpu_device *adev = dm->adev;
8996         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8997         unsigned long flags;
8998
8999         if (!new_stream)
9000                 return;
9001
9002         /*
9003          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9004          * For now it's sufficient to just guard against these conditions.
9005          */
9006         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9007                 return;
9008
9009         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9010         vrr_params = acrtc->dm_irq_params.vrr_params;
9011
9012         if (new_crtc_state->vrr_supported &&
9013             config.min_refresh_in_uhz &&
9014             config.max_refresh_in_uhz) {
9015                 /*
9016                  * if freesync compatible mode was set, config.state will be set
9017                  * in atomic check
9018                  */
9019                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9020                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9021                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9022                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9023                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9024                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9025                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9026                 } else {
9027                         config.state = new_crtc_state->base.vrr_enabled ?
9028                                                      VRR_STATE_ACTIVE_VARIABLE :
9029                                                      VRR_STATE_INACTIVE;
9030                 }
9031         } else {
9032                 config.state = VRR_STATE_UNSUPPORTED;
9033         }
9034
9035         mod_freesync_build_vrr_params(dm->freesync_module,
9036                                       new_stream,
9037                                       &config, &vrr_params);
9038
9039         new_crtc_state->freesync_timing_changed |=
9040                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9041                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9042
9043         new_crtc_state->freesync_config = config;
9044         /* Copy state for access from DM IRQ handler */
9045         acrtc->dm_irq_params.freesync_config = config;
9046         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9047         acrtc->dm_irq_params.vrr_params = vrr_params;
9048         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9049 }
9050
9051 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9052                                             struct dm_crtc_state *new_state)
9053 {
9054         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9055         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9056
9057         if (!old_vrr_active && new_vrr_active) {
9058                 /* Transition VRR inactive -> active:
9059                  * While VRR is active, we must not disable vblank irq, as a
9060                  * reenable after disable would compute bogus vblank/pflip
9061                  * timestamps if it likely happened inside display front-porch.
9062                  *
9063                  * We also need vupdate irq for the actual core vblank handling
9064                  * at end of vblank.
9065                  */
9066                 dm_set_vupdate_irq(new_state->base.crtc, true);
9067                 drm_crtc_vblank_get(new_state->base.crtc);
9068                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9069                                  __func__, new_state->base.crtc->base.id);
9070         } else if (old_vrr_active && !new_vrr_active) {
9071                 /* Transition VRR active -> inactive:
9072                  * Allow vblank irq disable again for fixed refresh rate.
9073                  */
9074                 dm_set_vupdate_irq(new_state->base.crtc, false);
9075                 drm_crtc_vblank_put(new_state->base.crtc);
9076                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9077                                  __func__, new_state->base.crtc->base.id);
9078         }
9079 }
9080
9081 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9082 {
9083         struct drm_plane *plane;
9084         struct drm_plane_state *old_plane_state;
9085         int i;
9086
9087         /*
9088          * TODO: Make this per-stream so we don't issue redundant updates for
9089          * commits with multiple streams.
9090          */
9091         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9092                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9093                         handle_cursor_update(plane, old_plane_state);
9094 }
9095
9096 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9097                                     struct dc_state *dc_state,
9098                                     struct drm_device *dev,
9099                                     struct amdgpu_display_manager *dm,
9100                                     struct drm_crtc *pcrtc,
9101                                     bool wait_for_vblank)
9102 {
9103         uint32_t i;
9104         uint64_t timestamp_ns;
9105         struct drm_plane *plane;
9106         struct drm_plane_state *old_plane_state, *new_plane_state;
9107         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9108         struct drm_crtc_state *new_pcrtc_state =
9109                         drm_atomic_get_new_crtc_state(state, pcrtc);
9110         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9111         struct dm_crtc_state *dm_old_crtc_state =
9112                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9113         int planes_count = 0, vpos, hpos;
9114         long r;
9115         unsigned long flags;
9116         struct amdgpu_bo *abo;
9117         uint32_t target_vblank, last_flip_vblank;
9118         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9119         bool pflip_present = false;
9120         struct {
9121                 struct dc_surface_update surface_updates[MAX_SURFACES];
9122                 struct dc_plane_info plane_infos[MAX_SURFACES];
9123                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9124                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9125                 struct dc_stream_update stream_update;
9126         } *bundle;
9127
9128         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9129
9130         if (!bundle) {
9131                 dm_error("Failed to allocate update bundle\n");
9132                 goto cleanup;
9133         }
9134
9135         /*
9136          * Disable the cursor first if we're disabling all the planes.
9137          * It'll remain on the screen after the planes are re-enabled
9138          * if we don't.
9139          */
9140         if (acrtc_state->active_planes == 0)
9141                 amdgpu_dm_commit_cursors(state);
9142
9143         /* update planes when needed */
9144         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9145                 struct drm_crtc *crtc = new_plane_state->crtc;
9146                 struct drm_crtc_state *new_crtc_state;
9147                 struct drm_framebuffer *fb = new_plane_state->fb;
9148                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9149                 bool plane_needs_flip;
9150                 struct dc_plane_state *dc_plane;
9151                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9152
9153                 /* Cursor plane is handled after stream updates */
9154                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9155                         continue;
9156
9157                 if (!fb || !crtc || pcrtc != crtc)
9158                         continue;
9159
9160                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9161                 if (!new_crtc_state->active)
9162                         continue;
9163
9164                 dc_plane = dm_new_plane_state->dc_state;
9165
9166                 bundle->surface_updates[planes_count].surface = dc_plane;
9167                 if (new_pcrtc_state->color_mgmt_changed) {
9168                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9169                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9170                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9171                 }
9172
9173                 fill_dc_scaling_info(dm->adev, new_plane_state,
9174                                      &bundle->scaling_infos[planes_count]);
9175
9176                 bundle->surface_updates[planes_count].scaling_info =
9177                         &bundle->scaling_infos[planes_count];
9178
9179                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9180
9181                 pflip_present = pflip_present || plane_needs_flip;
9182
9183                 if (!plane_needs_flip) {
9184                         planes_count += 1;
9185                         continue;
9186                 }
9187
9188                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9189
9190                 /*
9191                  * Wait for all fences on this FB. Do limited wait to avoid
9192                  * deadlock during GPU reset when this fence will not signal
9193                  * but we hold reservation lock for the BO.
9194                  */
9195                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9196                                           msecs_to_jiffies(5000));
9197                 if (unlikely(r <= 0))
9198                         DRM_ERROR("Waiting for fences timed out!");
9199
9200                 fill_dc_plane_info_and_addr(
9201                         dm->adev, new_plane_state,
9202                         afb->tiling_flags,
9203                         &bundle->plane_infos[planes_count],
9204                         &bundle->flip_addrs[planes_count].address,
9205                         afb->tmz_surface, false);
9206
9207                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9208                                  new_plane_state->plane->index,
9209                                  bundle->plane_infos[planes_count].dcc.enable);
9210
9211                 bundle->surface_updates[planes_count].plane_info =
9212                         &bundle->plane_infos[planes_count];
9213
9214                 /*
9215                  * Only allow immediate flips for fast updates that don't
9216                  * change FB pitch, DCC state, rotation or mirroing.
9217                  */
9218                 bundle->flip_addrs[planes_count].flip_immediate =
9219                         crtc->state->async_flip &&
9220                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9221
9222                 timestamp_ns = ktime_get_ns();
9223                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9224                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9225                 bundle->surface_updates[planes_count].surface = dc_plane;
9226
9227                 if (!bundle->surface_updates[planes_count].surface) {
9228                         DRM_ERROR("No surface for CRTC: id=%d\n",
9229                                         acrtc_attach->crtc_id);
9230                         continue;
9231                 }
9232
9233                 if (plane == pcrtc->primary)
9234                         update_freesync_state_on_stream(
9235                                 dm,
9236                                 acrtc_state,
9237                                 acrtc_state->stream,
9238                                 dc_plane,
9239                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9240
9241                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9242                                  __func__,
9243                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9244                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9245
9246                 planes_count += 1;
9247
9248         }
9249
9250         if (pflip_present) {
9251                 if (!vrr_active) {
9252                         /* Use old throttling in non-vrr fixed refresh rate mode
9253                          * to keep flip scheduling based on target vblank counts
9254                          * working in a backwards compatible way, e.g., for
9255                          * clients using the GLX_OML_sync_control extension or
9256                          * DRI3/Present extension with defined target_msc.
9257                          */
9258                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9259                 }
9260                 else {
9261                         /* For variable refresh rate mode only:
9262                          * Get vblank of last completed flip to avoid > 1 vrr
9263                          * flips per video frame by use of throttling, but allow
9264                          * flip programming anywhere in the possibly large
9265                          * variable vrr vblank interval for fine-grained flip
9266                          * timing control and more opportunity to avoid stutter
9267                          * on late submission of flips.
9268                          */
9269                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9270                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9271                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9272                 }
9273
9274                 target_vblank = last_flip_vblank + wait_for_vblank;
9275
9276                 /*
9277                  * Wait until we're out of the vertical blank period before the one
9278                  * targeted by the flip
9279                  */
9280                 while ((acrtc_attach->enabled &&
9281                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9282                                                             0, &vpos, &hpos, NULL,
9283                                                             NULL, &pcrtc->hwmode)
9284                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9285                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9286                         (int)(target_vblank -
9287                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9288                         usleep_range(1000, 1100);
9289                 }
9290
9291                 /**
9292                  * Prepare the flip event for the pageflip interrupt to handle.
9293                  *
9294                  * This only works in the case where we've already turned on the
9295                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9296                  * from 0 -> n planes we have to skip a hardware generated event
9297                  * and rely on sending it from software.
9298                  */
9299                 if (acrtc_attach->base.state->event &&
9300                     acrtc_state->active_planes > 0 &&
9301                     !acrtc_state->force_dpms_off) {
9302                         drm_crtc_vblank_get(pcrtc);
9303
9304                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9305
9306                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9307                         prepare_flip_isr(acrtc_attach);
9308
9309                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9310                 }
9311
9312                 if (acrtc_state->stream) {
9313                         if (acrtc_state->freesync_vrr_info_changed)
9314                                 bundle->stream_update.vrr_infopacket =
9315                                         &acrtc_state->stream->vrr_infopacket;
9316                 }
9317         }
9318
9319         /* Update the planes if changed or disable if we don't have any. */
9320         if ((planes_count || acrtc_state->active_planes == 0) &&
9321                 acrtc_state->stream) {
9322 #if defined(CONFIG_DRM_AMD_DC_DCN)
9323                 /*
9324                  * If PSR or idle optimizations are enabled then flush out
9325                  * any pending work before hardware programming.
9326                  */
9327                 if (dm->vblank_control_workqueue)
9328                         flush_workqueue(dm->vblank_control_workqueue);
9329 #endif
9330
9331                 bundle->stream_update.stream = acrtc_state->stream;
9332                 if (new_pcrtc_state->mode_changed) {
9333                         bundle->stream_update.src = acrtc_state->stream->src;
9334                         bundle->stream_update.dst = acrtc_state->stream->dst;
9335                 }
9336
9337                 if (new_pcrtc_state->color_mgmt_changed) {
9338                         /*
9339                          * TODO: This isn't fully correct since we've actually
9340                          * already modified the stream in place.
9341                          */
9342                         bundle->stream_update.gamut_remap =
9343                                 &acrtc_state->stream->gamut_remap_matrix;
9344                         bundle->stream_update.output_csc_transform =
9345                                 &acrtc_state->stream->csc_color_matrix;
9346                         bundle->stream_update.out_transfer_func =
9347                                 acrtc_state->stream->out_transfer_func;
9348                 }
9349
9350                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9351                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9352                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9353
9354                 /*
9355                  * If FreeSync state on the stream has changed then we need to
9356                  * re-adjust the min/max bounds now that DC doesn't handle this
9357                  * as part of commit.
9358                  */
9359                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9360                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9361                         dc_stream_adjust_vmin_vmax(
9362                                 dm->dc, acrtc_state->stream,
9363                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9364                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9365                 }
9366                 mutex_lock(&dm->dc_lock);
9367                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9368                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9369                         amdgpu_dm_psr_disable(acrtc_state->stream);
9370
9371                 dc_commit_updates_for_stream(dm->dc,
9372                                                      bundle->surface_updates,
9373                                                      planes_count,
9374                                                      acrtc_state->stream,
9375                                                      &bundle->stream_update,
9376                                                      dc_state);
9377
9378                 /**
9379                  * Enable or disable the interrupts on the backend.
9380                  *
9381                  * Most pipes are put into power gating when unused.
9382                  *
9383                  * When power gating is enabled on a pipe we lose the
9384                  * interrupt enablement state when power gating is disabled.
9385                  *
9386                  * So we need to update the IRQ control state in hardware
9387                  * whenever the pipe turns on (since it could be previously
9388                  * power gated) or off (since some pipes can't be power gated
9389                  * on some ASICs).
9390                  */
9391                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9392                         dm_update_pflip_irq_state(drm_to_adev(dev),
9393                                                   acrtc_attach);
9394
9395                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9396                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9397                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9398                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9399
9400                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9401                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9402                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9403                         struct amdgpu_dm_connector *aconn =
9404                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9405
9406                         if (aconn->psr_skip_count > 0)
9407                                 aconn->psr_skip_count--;
9408
9409                         /* Allow PSR when skip count is 0. */
9410                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9411                 } else {
9412                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9413                 }
9414
9415                 mutex_unlock(&dm->dc_lock);
9416         }
9417
9418         /*
9419          * Update cursor state *after* programming all the planes.
9420          * This avoids redundant programming in the case where we're going
9421          * to be disabling a single plane - those pipes are being disabled.
9422          */
9423         if (acrtc_state->active_planes)
9424                 amdgpu_dm_commit_cursors(state);
9425
9426 cleanup:
9427         kfree(bundle);
9428 }
9429
9430 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9431                                    struct drm_atomic_state *state)
9432 {
9433         struct amdgpu_device *adev = drm_to_adev(dev);
9434         struct amdgpu_dm_connector *aconnector;
9435         struct drm_connector *connector;
9436         struct drm_connector_state *old_con_state, *new_con_state;
9437         struct drm_crtc_state *new_crtc_state;
9438         struct dm_crtc_state *new_dm_crtc_state;
9439         const struct dc_stream_status *status;
9440         int i, inst;
9441
9442         /* Notify device removals. */
9443         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9444                 if (old_con_state->crtc != new_con_state->crtc) {
9445                         /* CRTC changes require notification. */
9446                         goto notify;
9447                 }
9448
9449                 if (!new_con_state->crtc)
9450                         continue;
9451
9452                 new_crtc_state = drm_atomic_get_new_crtc_state(
9453                         state, new_con_state->crtc);
9454
9455                 if (!new_crtc_state)
9456                         continue;
9457
9458                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9459                         continue;
9460
9461         notify:
9462                 aconnector = to_amdgpu_dm_connector(connector);
9463
9464                 mutex_lock(&adev->dm.audio_lock);
9465                 inst = aconnector->audio_inst;
9466                 aconnector->audio_inst = -1;
9467                 mutex_unlock(&adev->dm.audio_lock);
9468
9469                 amdgpu_dm_audio_eld_notify(adev, inst);
9470         }
9471
9472         /* Notify audio device additions. */
9473         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9474                 if (!new_con_state->crtc)
9475                         continue;
9476
9477                 new_crtc_state = drm_atomic_get_new_crtc_state(
9478                         state, new_con_state->crtc);
9479
9480                 if (!new_crtc_state)
9481                         continue;
9482
9483                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9484                         continue;
9485
9486                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9487                 if (!new_dm_crtc_state->stream)
9488                         continue;
9489
9490                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9491                 if (!status)
9492                         continue;
9493
9494                 aconnector = to_amdgpu_dm_connector(connector);
9495
9496                 mutex_lock(&adev->dm.audio_lock);
9497                 inst = status->audio_inst;
9498                 aconnector->audio_inst = inst;
9499                 mutex_unlock(&adev->dm.audio_lock);
9500
9501                 amdgpu_dm_audio_eld_notify(adev, inst);
9502         }
9503 }
9504
9505 /*
9506  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9507  * @crtc_state: the DRM CRTC state
9508  * @stream_state: the DC stream state.
9509  *
9510  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9511  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9512  */
9513 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9514                                                 struct dc_stream_state *stream_state)
9515 {
9516         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9517 }
9518
9519 /**
9520  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9521  * @state: The atomic state to commit
9522  *
9523  * This will tell DC to commit the constructed DC state from atomic_check,
9524  * programming the hardware. Any failures here implies a hardware failure, since
9525  * atomic check should have filtered anything non-kosher.
9526  */
9527 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9528 {
9529         struct drm_device *dev = state->dev;
9530         struct amdgpu_device *adev = drm_to_adev(dev);
9531         struct amdgpu_display_manager *dm = &adev->dm;
9532         struct dm_atomic_state *dm_state;
9533         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9534         uint32_t i, j;
9535         struct drm_crtc *crtc;
9536         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9537         unsigned long flags;
9538         bool wait_for_vblank = true;
9539         struct drm_connector *connector;
9540         struct drm_connector_state *old_con_state, *new_con_state;
9541         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9542         int crtc_disable_count = 0;
9543         bool mode_set_reset_required = false;
9544
9545         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9546
9547         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9548
9549         dm_state = dm_atomic_get_new_state(state);
9550         if (dm_state && dm_state->context) {
9551                 dc_state = dm_state->context;
9552         } else {
9553                 /* No state changes, retain current state. */
9554                 dc_state_temp = dc_create_state(dm->dc);
9555                 ASSERT(dc_state_temp);
9556                 dc_state = dc_state_temp;
9557                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9558         }
9559
9560         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9561                                        new_crtc_state, i) {
9562                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9563
9564                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9565
9566                 if (old_crtc_state->active &&
9567                     (!new_crtc_state->active ||
9568                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9569                         manage_dm_interrupts(adev, acrtc, false);
9570                         dc_stream_release(dm_old_crtc_state->stream);
9571                 }
9572         }
9573
9574         drm_atomic_helper_calc_timestamping_constants(state);
9575
9576         /* update changed items */
9577         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9578                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9579
9580                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9581                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9582
9583                 DRM_DEBUG_ATOMIC(
9584                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9585                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9586                         "connectors_changed:%d\n",
9587                         acrtc->crtc_id,
9588                         new_crtc_state->enable,
9589                         new_crtc_state->active,
9590                         new_crtc_state->planes_changed,
9591                         new_crtc_state->mode_changed,
9592                         new_crtc_state->active_changed,
9593                         new_crtc_state->connectors_changed);
9594
9595                 /* Disable cursor if disabling crtc */
9596                 if (old_crtc_state->active && !new_crtc_state->active) {
9597                         struct dc_cursor_position position;
9598
9599                         memset(&position, 0, sizeof(position));
9600                         mutex_lock(&dm->dc_lock);
9601                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9602                         mutex_unlock(&dm->dc_lock);
9603                 }
9604
9605                 /* Copy all transient state flags into dc state */
9606                 if (dm_new_crtc_state->stream) {
9607                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9608                                                             dm_new_crtc_state->stream);
9609                 }
9610
9611                 /* handles headless hotplug case, updating new_state and
9612                  * aconnector as needed
9613                  */
9614
9615                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9616
9617                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9618
9619                         if (!dm_new_crtc_state->stream) {
9620                                 /*
9621                                  * this could happen because of issues with
9622                                  * userspace notifications delivery.
9623                                  * In this case userspace tries to set mode on
9624                                  * display which is disconnected in fact.
9625                                  * dc_sink is NULL in this case on aconnector.
9626                                  * We expect reset mode will come soon.
9627                                  *
9628                                  * This can also happen when unplug is done
9629                                  * during resume sequence ended
9630                                  *
9631                                  * In this case, we want to pretend we still
9632                                  * have a sink to keep the pipe running so that
9633                                  * hw state is consistent with the sw state
9634                                  */
9635                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9636                                                 __func__, acrtc->base.base.id);
9637                                 continue;
9638                         }
9639
9640                         if (dm_old_crtc_state->stream)
9641                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9642
9643                         pm_runtime_get_noresume(dev->dev);
9644
9645                         acrtc->enabled = true;
9646                         acrtc->hw_mode = new_crtc_state->mode;
9647                         crtc->hwmode = new_crtc_state->mode;
9648                         mode_set_reset_required = true;
9649                 } else if (modereset_required(new_crtc_state)) {
9650                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9651                         /* i.e. reset mode */
9652                         if (dm_old_crtc_state->stream)
9653                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9654
9655                         mode_set_reset_required = true;
9656                 }
9657         } /* for_each_crtc_in_state() */
9658
9659         if (dc_state) {
9660                 /* if there mode set or reset, disable eDP PSR */
9661                 if (mode_set_reset_required) {
9662 #if defined(CONFIG_DRM_AMD_DC_DCN)
9663                         if (dm->vblank_control_workqueue)
9664                                 flush_workqueue(dm->vblank_control_workqueue);
9665 #endif
9666                         amdgpu_dm_psr_disable_all(dm);
9667                 }
9668
9669                 dm_enable_per_frame_crtc_master_sync(dc_state);
9670                 mutex_lock(&dm->dc_lock);
9671                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9672 #if defined(CONFIG_DRM_AMD_DC_DCN)
9673                /* Allow idle optimization when vblank count is 0 for display off */
9674                if (dm->active_vblank_irq_count == 0)
9675                    dc_allow_idle_optimizations(dm->dc,true);
9676 #endif
9677                 mutex_unlock(&dm->dc_lock);
9678         }
9679
9680         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9681                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9682
9683                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9684
9685                 if (dm_new_crtc_state->stream != NULL) {
9686                         const struct dc_stream_status *status =
9687                                         dc_stream_get_status(dm_new_crtc_state->stream);
9688
9689                         if (!status)
9690                                 status = dc_stream_get_status_from_state(dc_state,
9691                                                                          dm_new_crtc_state->stream);
9692                         if (!status)
9693                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9694                         else
9695                                 acrtc->otg_inst = status->primary_otg_inst;
9696                 }
9697         }
9698 #ifdef CONFIG_DRM_AMD_DC_HDCP
9699         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9700                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9701                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9702                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9703
9704                 new_crtc_state = NULL;
9705
9706                 if (acrtc)
9707                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9708
9709                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9710
9711                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9712                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9713                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9714                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9715                         dm_new_con_state->update_hdcp = true;
9716                         continue;
9717                 }
9718
9719                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9720                         hdcp_update_display(
9721                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9722                                 new_con_state->hdcp_content_type,
9723                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9724         }
9725 #endif
9726
9727         /* Handle connector state changes */
9728         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9729                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9730                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9731                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9732                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9733                 struct dc_stream_update stream_update;
9734                 struct dc_info_packet hdr_packet;
9735                 struct dc_stream_status *status = NULL;
9736                 bool abm_changed, hdr_changed, scaling_changed;
9737
9738                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9739                 memset(&stream_update, 0, sizeof(stream_update));
9740
9741                 if (acrtc) {
9742                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9743                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9744                 }
9745
9746                 /* Skip any modesets/resets */
9747                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9748                         continue;
9749
9750                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9751                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9752
9753                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9754                                                              dm_old_con_state);
9755
9756                 abm_changed = dm_new_crtc_state->abm_level !=
9757                               dm_old_crtc_state->abm_level;
9758
9759                 hdr_changed =
9760                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9761
9762                 if (!scaling_changed && !abm_changed && !hdr_changed)
9763                         continue;
9764
9765                 stream_update.stream = dm_new_crtc_state->stream;
9766                 if (scaling_changed) {
9767                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9768                                         dm_new_con_state, dm_new_crtc_state->stream);
9769
9770                         stream_update.src = dm_new_crtc_state->stream->src;
9771                         stream_update.dst = dm_new_crtc_state->stream->dst;
9772                 }
9773
9774                 if (abm_changed) {
9775                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9776
9777                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9778                 }
9779
9780                 if (hdr_changed) {
9781                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9782                         stream_update.hdr_static_metadata = &hdr_packet;
9783                 }
9784
9785                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9786
9787                 if (WARN_ON(!status))
9788                         continue;
9789
9790                 WARN_ON(!status->plane_count);
9791
9792                 /*
9793                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9794                  * Here we create an empty update on each plane.
9795                  * To fix this, DC should permit updating only stream properties.
9796                  */
9797                 for (j = 0; j < status->plane_count; j++)
9798                         dummy_updates[j].surface = status->plane_states[0];
9799
9800
9801                 mutex_lock(&dm->dc_lock);
9802                 dc_commit_updates_for_stream(dm->dc,
9803                                                      dummy_updates,
9804                                                      status->plane_count,
9805                                                      dm_new_crtc_state->stream,
9806                                                      &stream_update,
9807                                                      dc_state);
9808                 mutex_unlock(&dm->dc_lock);
9809         }
9810
9811         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9812         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9813                                       new_crtc_state, i) {
9814                 if (old_crtc_state->active && !new_crtc_state->active)
9815                         crtc_disable_count++;
9816
9817                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9818                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9819
9820                 /* For freesync config update on crtc state and params for irq */
9821                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9822
9823                 /* Handle vrr on->off / off->on transitions */
9824                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9825                                                 dm_new_crtc_state);
9826         }
9827
9828         /**
9829          * Enable interrupts for CRTCs that are newly enabled or went through
9830          * a modeset. It was intentionally deferred until after the front end
9831          * state was modified to wait until the OTG was on and so the IRQ
9832          * handlers didn't access stale or invalid state.
9833          */
9834         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9835                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9836 #ifdef CONFIG_DEBUG_FS
9837                 bool configure_crc = false;
9838                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9839 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9840                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9841 #endif
9842                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9843                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9844                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9845 #endif
9846                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9847
9848                 if (new_crtc_state->active &&
9849                     (!old_crtc_state->active ||
9850                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9851                         dc_stream_retain(dm_new_crtc_state->stream);
9852                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9853                         manage_dm_interrupts(adev, acrtc, true);
9854
9855 #ifdef CONFIG_DEBUG_FS
9856                         /**
9857                          * Frontend may have changed so reapply the CRC capture
9858                          * settings for the stream.
9859                          */
9860                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9861
9862                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9863                                 configure_crc = true;
9864 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9865                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9866                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9867                                         acrtc->dm_irq_params.crc_window.update_win = true;
9868                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9869                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9870                                         crc_rd_wrk->crtc = crtc;
9871                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9872                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9873                                 }
9874 #endif
9875                         }
9876
9877                         if (configure_crc)
9878                                 if (amdgpu_dm_crtc_configure_crc_source(
9879                                         crtc, dm_new_crtc_state, cur_crc_src))
9880                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9881 #endif
9882                 }
9883         }
9884
9885         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9886                 if (new_crtc_state->async_flip)
9887                         wait_for_vblank = false;
9888
9889         /* update planes when needed per crtc*/
9890         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9891                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9892
9893                 if (dm_new_crtc_state->stream)
9894                         amdgpu_dm_commit_planes(state, dc_state, dev,
9895                                                 dm, crtc, wait_for_vblank);
9896         }
9897
9898         /* Update audio instances for each connector. */
9899         amdgpu_dm_commit_audio(dev, state);
9900
9901 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9902         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9903         /* restore the backlight level */
9904         for (i = 0; i < dm->num_of_edps; i++) {
9905                 if (dm->backlight_dev[i] &&
9906                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9907                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9908         }
9909 #endif
9910         /*
9911          * send vblank event on all events not handled in flip and
9912          * mark consumed event for drm_atomic_helper_commit_hw_done
9913          */
9914         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9915         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9916
9917                 if (new_crtc_state->event)
9918                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9919
9920                 new_crtc_state->event = NULL;
9921         }
9922         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9923
9924         /* Signal HW programming completion */
9925         drm_atomic_helper_commit_hw_done(state);
9926
9927         if (wait_for_vblank)
9928                 drm_atomic_helper_wait_for_flip_done(dev, state);
9929
9930         drm_atomic_helper_cleanup_planes(dev, state);
9931
9932         /* return the stolen vga memory back to VRAM */
9933         if (!adev->mman.keep_stolen_vga_memory)
9934                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9935         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9936
9937         /*
9938          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9939          * so we can put the GPU into runtime suspend if we're not driving any
9940          * displays anymore
9941          */
9942         for (i = 0; i < crtc_disable_count; i++)
9943                 pm_runtime_put_autosuspend(dev->dev);
9944         pm_runtime_mark_last_busy(dev->dev);
9945
9946         if (dc_state_temp)
9947                 dc_release_state(dc_state_temp);
9948 }
9949
9950
9951 static int dm_force_atomic_commit(struct drm_connector *connector)
9952 {
9953         int ret = 0;
9954         struct drm_device *ddev = connector->dev;
9955         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9956         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9957         struct drm_plane *plane = disconnected_acrtc->base.primary;
9958         struct drm_connector_state *conn_state;
9959         struct drm_crtc_state *crtc_state;
9960         struct drm_plane_state *plane_state;
9961
9962         if (!state)
9963                 return -ENOMEM;
9964
9965         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9966
9967         /* Construct an atomic state to restore previous display setting */
9968
9969         /*
9970          * Attach connectors to drm_atomic_state
9971          */
9972         conn_state = drm_atomic_get_connector_state(state, connector);
9973
9974         ret = PTR_ERR_OR_ZERO(conn_state);
9975         if (ret)
9976                 goto out;
9977
9978         /* Attach crtc to drm_atomic_state*/
9979         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9980
9981         ret = PTR_ERR_OR_ZERO(crtc_state);
9982         if (ret)
9983                 goto out;
9984
9985         /* force a restore */
9986         crtc_state->mode_changed = true;
9987
9988         /* Attach plane to drm_atomic_state */
9989         plane_state = drm_atomic_get_plane_state(state, plane);
9990
9991         ret = PTR_ERR_OR_ZERO(plane_state);
9992         if (ret)
9993                 goto out;
9994
9995         /* Call commit internally with the state we just constructed */
9996         ret = drm_atomic_commit(state);
9997
9998 out:
9999         drm_atomic_state_put(state);
10000         if (ret)
10001                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10002
10003         return ret;
10004 }
10005
10006 /*
10007  * This function handles all cases when set mode does not come upon hotplug.
10008  * This includes when a display is unplugged then plugged back into the
10009  * same port and when running without usermode desktop manager supprot
10010  */
10011 void dm_restore_drm_connector_state(struct drm_device *dev,
10012                                     struct drm_connector *connector)
10013 {
10014         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10015         struct amdgpu_crtc *disconnected_acrtc;
10016         struct dm_crtc_state *acrtc_state;
10017
10018         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10019                 return;
10020
10021         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10022         if (!disconnected_acrtc)
10023                 return;
10024
10025         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10026         if (!acrtc_state->stream)
10027                 return;
10028
10029         /*
10030          * If the previous sink is not released and different from the current,
10031          * we deduce we are in a state where we can not rely on usermode call
10032          * to turn on the display, so we do it here
10033          */
10034         if (acrtc_state->stream->sink != aconnector->dc_sink)
10035                 dm_force_atomic_commit(&aconnector->base);
10036 }
10037
10038 /*
10039  * Grabs all modesetting locks to serialize against any blocking commits,
10040  * Waits for completion of all non blocking commits.
10041  */
10042 static int do_aquire_global_lock(struct drm_device *dev,
10043                                  struct drm_atomic_state *state)
10044 {
10045         struct drm_crtc *crtc;
10046         struct drm_crtc_commit *commit;
10047         long ret;
10048
10049         /*
10050          * Adding all modeset locks to aquire_ctx will
10051          * ensure that when the framework release it the
10052          * extra locks we are locking here will get released to
10053          */
10054         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10055         if (ret)
10056                 return ret;
10057
10058         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10059                 spin_lock(&crtc->commit_lock);
10060                 commit = list_first_entry_or_null(&crtc->commit_list,
10061                                 struct drm_crtc_commit, commit_entry);
10062                 if (commit)
10063                         drm_crtc_commit_get(commit);
10064                 spin_unlock(&crtc->commit_lock);
10065
10066                 if (!commit)
10067                         continue;
10068
10069                 /*
10070                  * Make sure all pending HW programming completed and
10071                  * page flips done
10072                  */
10073                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10074
10075                 if (ret > 0)
10076                         ret = wait_for_completion_interruptible_timeout(
10077                                         &commit->flip_done, 10*HZ);
10078
10079                 if (ret == 0)
10080                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10081                                   "timed out\n", crtc->base.id, crtc->name);
10082
10083                 drm_crtc_commit_put(commit);
10084         }
10085
10086         return ret < 0 ? ret : 0;
10087 }
10088
10089 static void get_freesync_config_for_crtc(
10090         struct dm_crtc_state *new_crtc_state,
10091         struct dm_connector_state *new_con_state)
10092 {
10093         struct mod_freesync_config config = {0};
10094         struct amdgpu_dm_connector *aconnector =
10095                         to_amdgpu_dm_connector(new_con_state->base.connector);
10096         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10097         int vrefresh = drm_mode_vrefresh(mode);
10098         bool fs_vid_mode = false;
10099
10100         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10101                                         vrefresh >= aconnector->min_vfreq &&
10102                                         vrefresh <= aconnector->max_vfreq;
10103
10104         if (new_crtc_state->vrr_supported) {
10105                 new_crtc_state->stream->ignore_msa_timing_param = true;
10106                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10107
10108                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10109                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10110                 config.vsif_supported = true;
10111                 config.btr = true;
10112
10113                 if (fs_vid_mode) {
10114                         config.state = VRR_STATE_ACTIVE_FIXED;
10115                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10116                         goto out;
10117                 } else if (new_crtc_state->base.vrr_enabled) {
10118                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10119                 } else {
10120                         config.state = VRR_STATE_INACTIVE;
10121                 }
10122         }
10123 out:
10124         new_crtc_state->freesync_config = config;
10125 }
10126
10127 static void reset_freesync_config_for_crtc(
10128         struct dm_crtc_state *new_crtc_state)
10129 {
10130         new_crtc_state->vrr_supported = false;
10131
10132         memset(&new_crtc_state->vrr_infopacket, 0,
10133                sizeof(new_crtc_state->vrr_infopacket));
10134 }
10135
10136 static bool
10137 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10138                                  struct drm_crtc_state *new_crtc_state)
10139 {
10140         struct drm_display_mode old_mode, new_mode;
10141
10142         if (!old_crtc_state || !new_crtc_state)
10143                 return false;
10144
10145         old_mode = old_crtc_state->mode;
10146         new_mode = new_crtc_state->mode;
10147
10148         if (old_mode.clock       == new_mode.clock &&
10149             old_mode.hdisplay    == new_mode.hdisplay &&
10150             old_mode.vdisplay    == new_mode.vdisplay &&
10151             old_mode.htotal      == new_mode.htotal &&
10152             old_mode.vtotal      != new_mode.vtotal &&
10153             old_mode.hsync_start == new_mode.hsync_start &&
10154             old_mode.vsync_start != new_mode.vsync_start &&
10155             old_mode.hsync_end   == new_mode.hsync_end &&
10156             old_mode.vsync_end   != new_mode.vsync_end &&
10157             old_mode.hskew       == new_mode.hskew &&
10158             old_mode.vscan       == new_mode.vscan &&
10159             (old_mode.vsync_end - old_mode.vsync_start) ==
10160             (new_mode.vsync_end - new_mode.vsync_start))
10161                 return true;
10162
10163         return false;
10164 }
10165
10166 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10167         uint64_t num, den, res;
10168         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10169
10170         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10171
10172         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10173         den = (unsigned long long)new_crtc_state->mode.htotal *
10174               (unsigned long long)new_crtc_state->mode.vtotal;
10175
10176         res = div_u64(num, den);
10177         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10178 }
10179
10180 int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10181                          struct drm_atomic_state *state,
10182                          struct drm_crtc *crtc,
10183                          struct drm_crtc_state *old_crtc_state,
10184                          struct drm_crtc_state *new_crtc_state,
10185                          bool enable,
10186                          bool *lock_and_validation_needed)
10187 {
10188         struct dm_atomic_state *dm_state = NULL;
10189         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10190         struct dc_stream_state *new_stream;
10191         int ret = 0;
10192
10193         /*
10194          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10195          * update changed items
10196          */
10197         struct amdgpu_crtc *acrtc = NULL;
10198         struct amdgpu_dm_connector *aconnector = NULL;
10199         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10200         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10201
10202         new_stream = NULL;
10203
10204         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10205         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10206         acrtc = to_amdgpu_crtc(crtc);
10207         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10208
10209         /* TODO This hack should go away */
10210         if (aconnector && enable) {
10211                 /* Make sure fake sink is created in plug-in scenario */
10212                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10213                                                             &aconnector->base);
10214                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10215                                                             &aconnector->base);
10216
10217                 if (IS_ERR(drm_new_conn_state)) {
10218                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10219                         goto fail;
10220                 }
10221
10222                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10223                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10224
10225                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10226                         goto skip_modeset;
10227
10228                 new_stream = create_validate_stream_for_sink(aconnector,
10229                                                              &new_crtc_state->mode,
10230                                                              dm_new_conn_state,
10231                                                              dm_old_crtc_state->stream);
10232
10233                 /*
10234                  * we can have no stream on ACTION_SET if a display
10235                  * was disconnected during S3, in this case it is not an
10236                  * error, the OS will be updated after detection, and
10237                  * will do the right thing on next atomic commit
10238                  */
10239
10240                 if (!new_stream) {
10241                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10242                                         __func__, acrtc->base.base.id);
10243                         ret = -ENOMEM;
10244                         goto fail;
10245                 }
10246
10247                 /*
10248                  * TODO: Check VSDB bits to decide whether this should
10249                  * be enabled or not.
10250                  */
10251                 new_stream->triggered_crtc_reset.enabled =
10252                         dm->force_timing_sync;
10253
10254                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10255
10256                 ret = fill_hdr_info_packet(drm_new_conn_state,
10257                                            &new_stream->hdr_static_metadata);
10258                 if (ret)
10259                         goto fail;
10260
10261                 /*
10262                  * If we already removed the old stream from the context
10263                  * (and set the new stream to NULL) then we can't reuse
10264                  * the old stream even if the stream and scaling are unchanged.
10265                  * We'll hit the BUG_ON and black screen.
10266                  *
10267                  * TODO: Refactor this function to allow this check to work
10268                  * in all conditions.
10269                  */
10270                 if (dm_new_crtc_state->stream &&
10271                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10272                         goto skip_modeset;
10273
10274                 if (dm_new_crtc_state->stream &&
10275                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10276                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10277                         new_crtc_state->mode_changed = false;
10278                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10279                                          new_crtc_state->mode_changed);
10280                 }
10281         }
10282
10283         /* mode_changed flag may get updated above, need to check again */
10284         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10285                 goto skip_modeset;
10286
10287         DRM_DEBUG_ATOMIC(
10288                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10289                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10290                 "connectors_changed:%d\n",
10291                 acrtc->crtc_id,
10292                 new_crtc_state->enable,
10293                 new_crtc_state->active,
10294                 new_crtc_state->planes_changed,
10295                 new_crtc_state->mode_changed,
10296                 new_crtc_state->active_changed,
10297                 new_crtc_state->connectors_changed);
10298
10299         /* Remove stream for any changed/disabled CRTC */
10300         if (!enable) {
10301
10302                 if (!dm_old_crtc_state->stream)
10303                         goto skip_modeset;
10304
10305                 if (dm_new_crtc_state->stream &&
10306                     is_timing_unchanged_for_freesync(new_crtc_state,
10307                                                      old_crtc_state)) {
10308                         new_crtc_state->mode_changed = false;
10309                         DRM_DEBUG_DRIVER(
10310                                 "Mode change not required for front porch change, "
10311                                 "setting mode_changed to %d",
10312                                 new_crtc_state->mode_changed);
10313
10314                         set_freesync_fixed_config(dm_new_crtc_state);
10315
10316                         goto skip_modeset;
10317                 } else if (aconnector &&
10318                            is_freesync_video_mode(&new_crtc_state->mode,
10319                                                   aconnector)) {
10320                         struct drm_display_mode *high_mode;
10321
10322                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10323                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10324                                 set_freesync_fixed_config(dm_new_crtc_state);
10325                         }
10326                 }
10327
10328                 ret = dm_atomic_get_state(state, &dm_state);
10329                 if (ret)
10330                         goto fail;
10331
10332                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10333                                 crtc->base.id);
10334
10335                 /* i.e. reset mode */
10336                 if (dc_remove_stream_from_ctx(
10337                                 dm->dc,
10338                                 dm_state->context,
10339                                 dm_old_crtc_state->stream) != DC_OK) {
10340                         ret = -EINVAL;
10341                         goto fail;
10342                 }
10343
10344                 dc_stream_release(dm_old_crtc_state->stream);
10345                 dm_new_crtc_state->stream = NULL;
10346
10347                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10348
10349                 *lock_and_validation_needed = true;
10350
10351         } else {/* Add stream for any updated/enabled CRTC */
10352                 /*
10353                  * Quick fix to prevent NULL pointer on new_stream when
10354                  * added MST connectors not found in existing crtc_state in the chained mode
10355                  * TODO: need to dig out the root cause of that
10356                  */
10357                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10358                         goto skip_modeset;
10359
10360                 if (modereset_required(new_crtc_state))
10361                         goto skip_modeset;
10362
10363                 if (modeset_required(new_crtc_state, new_stream,
10364                                      dm_old_crtc_state->stream)) {
10365
10366                         WARN_ON(dm_new_crtc_state->stream);
10367
10368                         ret = dm_atomic_get_state(state, &dm_state);
10369                         if (ret)
10370                                 goto fail;
10371
10372                         dm_new_crtc_state->stream = new_stream;
10373
10374                         dc_stream_retain(new_stream);
10375
10376                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10377                                          crtc->base.id);
10378
10379                         if (dc_add_stream_to_ctx(
10380                                         dm->dc,
10381                                         dm_state->context,
10382                                         dm_new_crtc_state->stream) != DC_OK) {
10383                                 ret = -EINVAL;
10384                                 goto fail;
10385                         }
10386
10387                         *lock_and_validation_needed = true;
10388                 }
10389         }
10390
10391 skip_modeset:
10392         /* Release extra reference */
10393         if (new_stream)
10394                  dc_stream_release(new_stream);
10395
10396         /*
10397          * We want to do dc stream updates that do not require a
10398          * full modeset below.
10399          */
10400         if (!(enable && aconnector && new_crtc_state->active))
10401                 return 0;
10402         /*
10403          * Given above conditions, the dc state cannot be NULL because:
10404          * 1. We're in the process of enabling CRTCs (just been added
10405          *    to the dc context, or already is on the context)
10406          * 2. Has a valid connector attached, and
10407          * 3. Is currently active and enabled.
10408          * => The dc stream state currently exists.
10409          */
10410         BUG_ON(dm_new_crtc_state->stream == NULL);
10411
10412         /* Scaling or underscan settings */
10413         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10414                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10415                 update_stream_scaling_settings(
10416                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10417
10418         /* ABM settings */
10419         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10420
10421         /*
10422          * Color management settings. We also update color properties
10423          * when a modeset is needed, to ensure it gets reprogrammed.
10424          */
10425         if (dm_new_crtc_state->base.color_mgmt_changed ||
10426             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10427                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10428                 if (ret)
10429                         goto fail;
10430         }
10431
10432         /* Update Freesync settings. */
10433         get_freesync_config_for_crtc(dm_new_crtc_state,
10434                                      dm_new_conn_state);
10435
10436         return ret;
10437
10438 fail:
10439         if (new_stream)
10440                 dc_stream_release(new_stream);
10441         return ret;
10442 }
10443
10444 static bool should_reset_plane(struct drm_atomic_state *state,
10445                                struct drm_plane *plane,
10446                                struct drm_plane_state *old_plane_state,
10447                                struct drm_plane_state *new_plane_state)
10448 {
10449         struct drm_plane *other;
10450         struct drm_plane_state *old_other_state, *new_other_state;
10451         struct drm_crtc_state *new_crtc_state;
10452         int i;
10453
10454         /*
10455          * TODO: Remove this hack once the checks below are sufficient
10456          * enough to determine when we need to reset all the planes on
10457          * the stream.
10458          */
10459         if (state->allow_modeset)
10460                 return true;
10461
10462         /* Exit early if we know that we're adding or removing the plane. */
10463         if (old_plane_state->crtc != new_plane_state->crtc)
10464                 return true;
10465
10466         /* old crtc == new_crtc == NULL, plane not in context. */
10467         if (!new_plane_state->crtc)
10468                 return false;
10469
10470         new_crtc_state =
10471                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10472
10473         if (!new_crtc_state)
10474                 return true;
10475
10476         /* CRTC Degamma changes currently require us to recreate planes. */
10477         if (new_crtc_state->color_mgmt_changed)
10478                 return true;
10479
10480         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10481                 return true;
10482
10483         /*
10484          * If there are any new primary or overlay planes being added or
10485          * removed then the z-order can potentially change. To ensure
10486          * correct z-order and pipe acquisition the current DC architecture
10487          * requires us to remove and recreate all existing planes.
10488          *
10489          * TODO: Come up with a more elegant solution for this.
10490          */
10491         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10492                 struct amdgpu_framebuffer *old_afb, *new_afb;
10493                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10494                         continue;
10495
10496                 if (old_other_state->crtc != new_plane_state->crtc &&
10497                     new_other_state->crtc != new_plane_state->crtc)
10498                         continue;
10499
10500                 if (old_other_state->crtc != new_other_state->crtc)
10501                         return true;
10502
10503                 /* Src/dst size and scaling updates. */
10504                 if (old_other_state->src_w != new_other_state->src_w ||
10505                     old_other_state->src_h != new_other_state->src_h ||
10506                     old_other_state->crtc_w != new_other_state->crtc_w ||
10507                     old_other_state->crtc_h != new_other_state->crtc_h)
10508                         return true;
10509
10510                 /* Rotation / mirroring updates. */
10511                 if (old_other_state->rotation != new_other_state->rotation)
10512                         return true;
10513
10514                 /* Blending updates. */
10515                 if (old_other_state->pixel_blend_mode !=
10516                     new_other_state->pixel_blend_mode)
10517                         return true;
10518
10519                 /* Alpha updates. */
10520                 if (old_other_state->alpha != new_other_state->alpha)
10521                         return true;
10522
10523                 /* Colorspace changes. */
10524                 if (old_other_state->color_range != new_other_state->color_range ||
10525                     old_other_state->color_encoding != new_other_state->color_encoding)
10526                         return true;
10527
10528                 /* Framebuffer checks fall at the end. */
10529                 if (!old_other_state->fb || !new_other_state->fb)
10530                         continue;
10531
10532                 /* Pixel format changes can require bandwidth updates. */
10533                 if (old_other_state->fb->format != new_other_state->fb->format)
10534                         return true;
10535
10536                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10537                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10538
10539                 /* Tiling and DCC changes also require bandwidth updates. */
10540                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10541                     old_afb->base.modifier != new_afb->base.modifier)
10542                         return true;
10543         }
10544
10545         return false;
10546 }
10547
10548 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10549                               struct drm_plane_state *new_plane_state,
10550                               struct drm_framebuffer *fb)
10551 {
10552         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10553         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10554         unsigned int pitch;
10555         bool linear;
10556
10557         if (fb->width > new_acrtc->max_cursor_width ||
10558             fb->height > new_acrtc->max_cursor_height) {
10559                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10560                                  new_plane_state->fb->width,
10561                                  new_plane_state->fb->height);
10562                 return -EINVAL;
10563         }
10564         if (new_plane_state->src_w != fb->width << 16 ||
10565             new_plane_state->src_h != fb->height << 16) {
10566                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10567                 return -EINVAL;
10568         }
10569
10570         /* Pitch in pixels */
10571         pitch = fb->pitches[0] / fb->format->cpp[0];
10572
10573         if (fb->width != pitch) {
10574                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10575                                  fb->width, pitch);
10576                 return -EINVAL;
10577         }
10578
10579         switch (pitch) {
10580         case 64:
10581         case 128:
10582         case 256:
10583                 /* FB pitch is supported by cursor plane */
10584                 break;
10585         default:
10586                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10587                 return -EINVAL;
10588         }
10589
10590         /* Core DRM takes care of checking FB modifiers, so we only need to
10591          * check tiling flags when the FB doesn't have a modifier. */
10592         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10593                 if (adev->family < AMDGPU_FAMILY_AI) {
10594                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10595                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10596                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10597                 } else {
10598                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10599                 }
10600                 if (!linear) {
10601                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10602                         return -EINVAL;
10603                 }
10604         }
10605
10606         return 0;
10607 }
10608
10609 static int dm_update_plane_state(struct dc *dc,
10610                                  struct drm_atomic_state *state,
10611                                  struct drm_plane *plane,
10612                                  struct drm_plane_state *old_plane_state,
10613                                  struct drm_plane_state *new_plane_state,
10614                                  bool enable,
10615                                  bool *lock_and_validation_needed)
10616 {
10617
10618         struct dm_atomic_state *dm_state = NULL;
10619         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10620         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10621         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10622         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10623         struct amdgpu_crtc *new_acrtc;
10624         bool needs_reset;
10625         int ret = 0;
10626
10627
10628         new_plane_crtc = new_plane_state->crtc;
10629         old_plane_crtc = old_plane_state->crtc;
10630         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10631         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10632
10633         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10634                 if (!enable || !new_plane_crtc ||
10635                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10636                         return 0;
10637
10638                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10639
10640                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10641                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10642                         return -EINVAL;
10643                 }
10644
10645                 if (new_plane_state->fb) {
10646                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10647                                                  new_plane_state->fb);
10648                         if (ret)
10649                                 return ret;
10650                 }
10651
10652                 return 0;
10653         }
10654
10655         needs_reset = should_reset_plane(state, plane, old_plane_state,
10656                                          new_plane_state);
10657
10658         /* Remove any changed/removed planes */
10659         if (!enable) {
10660                 if (!needs_reset)
10661                         return 0;
10662
10663                 if (!old_plane_crtc)
10664                         return 0;
10665
10666                 old_crtc_state = drm_atomic_get_old_crtc_state(
10667                                 state, old_plane_crtc);
10668                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10669
10670                 if (!dm_old_crtc_state->stream)
10671                         return 0;
10672
10673                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10674                                 plane->base.id, old_plane_crtc->base.id);
10675
10676                 ret = dm_atomic_get_state(state, &dm_state);
10677                 if (ret)
10678                         return ret;
10679
10680                 if (!dc_remove_plane_from_context(
10681                                 dc,
10682                                 dm_old_crtc_state->stream,
10683                                 dm_old_plane_state->dc_state,
10684                                 dm_state->context)) {
10685
10686                         return -EINVAL;
10687                 }
10688
10689
10690                 dc_plane_state_release(dm_old_plane_state->dc_state);
10691                 dm_new_plane_state->dc_state = NULL;
10692
10693                 *lock_and_validation_needed = true;
10694
10695         } else { /* Add new planes */
10696                 struct dc_plane_state *dc_new_plane_state;
10697
10698                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10699                         return 0;
10700
10701                 if (!new_plane_crtc)
10702                         return 0;
10703
10704                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10705                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10706
10707                 if (!dm_new_crtc_state->stream)
10708                         return 0;
10709
10710                 if (!needs_reset)
10711                         return 0;
10712
10713                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10714                 if (ret)
10715                         return ret;
10716
10717                 WARN_ON(dm_new_plane_state->dc_state);
10718
10719                 dc_new_plane_state = dc_create_plane_state(dc);
10720                 if (!dc_new_plane_state)
10721                         return -ENOMEM;
10722
10723                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10724                                  plane->base.id, new_plane_crtc->base.id);
10725
10726                 ret = fill_dc_plane_attributes(
10727                         drm_to_adev(new_plane_crtc->dev),
10728                         dc_new_plane_state,
10729                         new_plane_state,
10730                         new_crtc_state);
10731                 if (ret) {
10732                         dc_plane_state_release(dc_new_plane_state);
10733                         return ret;
10734                 }
10735
10736                 ret = dm_atomic_get_state(state, &dm_state);
10737                 if (ret) {
10738                         dc_plane_state_release(dc_new_plane_state);
10739                         return ret;
10740                 }
10741
10742                 /*
10743                  * Any atomic check errors that occur after this will
10744                  * not need a release. The plane state will be attached
10745                  * to the stream, and therefore part of the atomic
10746                  * state. It'll be released when the atomic state is
10747                  * cleaned.
10748                  */
10749                 if (!dc_add_plane_to_context(
10750                                 dc,
10751                                 dm_new_crtc_state->stream,
10752                                 dc_new_plane_state,
10753                                 dm_state->context)) {
10754
10755                         dc_plane_state_release(dc_new_plane_state);
10756                         return -EINVAL;
10757                 }
10758
10759                 dm_new_plane_state->dc_state = dc_new_plane_state;
10760
10761                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10762
10763                 /* Tell DC to do a full surface update every time there
10764                  * is a plane change. Inefficient, but works for now.
10765                  */
10766                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10767
10768                 *lock_and_validation_needed = true;
10769         }
10770
10771
10772         return ret;
10773 }
10774
10775 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10776                                        int *src_w, int *src_h)
10777 {
10778         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10779         case DRM_MODE_ROTATE_90:
10780         case DRM_MODE_ROTATE_270:
10781                 *src_w = plane_state->src_h >> 16;
10782                 *src_h = plane_state->src_w >> 16;
10783                 break;
10784         case DRM_MODE_ROTATE_0:
10785         case DRM_MODE_ROTATE_180:
10786         default:
10787                 *src_w = plane_state->src_w >> 16;
10788                 *src_h = plane_state->src_h >> 16;
10789                 break;
10790         }
10791 }
10792
10793 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10794                                 struct drm_crtc *crtc,
10795                                 struct drm_crtc_state *new_crtc_state)
10796 {
10797         struct drm_plane *cursor = crtc->cursor, *underlying;
10798         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10799         int i;
10800         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10801         int cursor_src_w, cursor_src_h;
10802         int underlying_src_w, underlying_src_h;
10803
10804         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10805          * cursor per pipe but it's going to inherit the scaling and
10806          * positioning from the underlying pipe. Check the cursor plane's
10807          * blending properties match the underlying planes'. */
10808
10809         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10810         if (!new_cursor_state || !new_cursor_state->fb) {
10811                 return 0;
10812         }
10813
10814         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10815         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10816         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10817
10818         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10819                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10820                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10821                         continue;
10822
10823                 /* Ignore disabled planes */
10824                 if (!new_underlying_state->fb)
10825                         continue;
10826
10827                 dm_get_oriented_plane_size(new_underlying_state,
10828                                            &underlying_src_w, &underlying_src_h);
10829                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10830                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10831
10832                 if (cursor_scale_w != underlying_scale_w ||
10833                     cursor_scale_h != underlying_scale_h) {
10834                         drm_dbg_atomic(crtc->dev,
10835                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10836                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10837                         return -EINVAL;
10838                 }
10839
10840                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10841                 if (new_underlying_state->crtc_x <= 0 &&
10842                     new_underlying_state->crtc_y <= 0 &&
10843                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10844                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10845                         break;
10846         }
10847
10848         return 0;
10849 }
10850
10851 #if defined(CONFIG_DRM_AMD_DC_DCN)
10852 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10853 {
10854         struct drm_connector *connector;
10855         struct drm_connector_state *conn_state, *old_conn_state;
10856         struct amdgpu_dm_connector *aconnector = NULL;
10857         int i;
10858         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10859                 if (!conn_state->crtc)
10860                         conn_state = old_conn_state;
10861
10862                 if (conn_state->crtc != crtc)
10863                         continue;
10864
10865                 aconnector = to_amdgpu_dm_connector(connector);
10866                 if (!aconnector->port || !aconnector->mst_port)
10867                         aconnector = NULL;
10868                 else
10869                         break;
10870         }
10871
10872         if (!aconnector)
10873                 return 0;
10874
10875         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10876 }
10877 #endif
10878
10879 /**
10880  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10881  * @dev: The DRM device
10882  * @state: The atomic state to commit
10883  *
10884  * Validate that the given atomic state is programmable by DC into hardware.
10885  * This involves constructing a &struct dc_state reflecting the new hardware
10886  * state we wish to commit, then querying DC to see if it is programmable. It's
10887  * important not to modify the existing DC state. Otherwise, atomic_check
10888  * may unexpectedly commit hardware changes.
10889  *
10890  * When validating the DC state, it's important that the right locks are
10891  * acquired. For full updates case which removes/adds/updates streams on one
10892  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10893  * that any such full update commit will wait for completion of any outstanding
10894  * flip using DRMs synchronization events.
10895  *
10896  * Note that DM adds the affected connectors for all CRTCs in state, when that
10897  * might not seem necessary. This is because DC stream creation requires the
10898  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10899  * be possible but non-trivial - a possible TODO item.
10900  *
10901  * Return: -Error code if validation failed.
10902  */
10903 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10904                                   struct drm_atomic_state *state)
10905 {
10906         struct amdgpu_device *adev = drm_to_adev(dev);
10907         struct dm_atomic_state *dm_state = NULL;
10908         struct dc *dc = adev->dm.dc;
10909         struct drm_connector *connector;
10910         struct drm_connector_state *old_con_state, *new_con_state;
10911         struct drm_crtc *crtc;
10912         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10913         struct drm_plane *plane;
10914         struct drm_plane_state *old_plane_state, *new_plane_state;
10915         enum dc_status status;
10916         int ret, i;
10917         bool lock_and_validation_needed = false;
10918         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10919 #if defined(CONFIG_DRM_AMD_DC_DCN)
10920         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10921         struct drm_dp_mst_topology_state *mst_state;
10922         struct drm_dp_mst_topology_mgr *mgr;
10923 #endif
10924
10925         trace_amdgpu_dm_atomic_check_begin(state);
10926
10927         ret = drm_atomic_helper_check_modeset(dev, state);
10928         if (ret) {
10929                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10930                 goto fail;
10931         }
10932
10933         /* Check connector changes */
10934         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10935                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10936                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10937
10938                 /* Skip connectors that are disabled or part of modeset already. */
10939                 if (!old_con_state->crtc && !new_con_state->crtc)
10940                         continue;
10941
10942                 if (!new_con_state->crtc)
10943                         continue;
10944
10945                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10946                 if (IS_ERR(new_crtc_state)) {
10947                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10948                         ret = PTR_ERR(new_crtc_state);
10949                         goto fail;
10950                 }
10951
10952                 if (dm_old_con_state->abm_level !=
10953                     dm_new_con_state->abm_level)
10954                         new_crtc_state->connectors_changed = true;
10955         }
10956
10957 #if defined(CONFIG_DRM_AMD_DC_DCN)
10958         if (dc_resource_is_dsc_encoding_supported(dc)) {
10959                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10960                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10961                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10962                                 if (ret) {
10963                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10964                                         goto fail;
10965                                 }
10966                         }
10967                 }
10968                 pre_validate_dsc(state, &dm_state, vars);
10969         }
10970 #endif
10971         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10972                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10973
10974                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10975                     !new_crtc_state->color_mgmt_changed &&
10976                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10977                         dm_old_crtc_state->dsc_force_changed == false)
10978                         continue;
10979
10980                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10981                 if (ret) {
10982                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10983                         goto fail;
10984                 }
10985
10986                 if (!new_crtc_state->enable)
10987                         continue;
10988
10989                 ret = drm_atomic_add_affected_connectors(state, crtc);
10990                 if (ret) {
10991                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10992                         goto fail;
10993                 }
10994
10995                 ret = drm_atomic_add_affected_planes(state, crtc);
10996                 if (ret) {
10997                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10998                         goto fail;
10999                 }
11000
11001                 if (dm_old_crtc_state->dsc_force_changed)
11002                         new_crtc_state->mode_changed = true;
11003         }
11004
11005         /*
11006          * Add all primary and overlay planes on the CRTC to the state
11007          * whenever a plane is enabled to maintain correct z-ordering
11008          * and to enable fast surface updates.
11009          */
11010         drm_for_each_crtc(crtc, dev) {
11011                 bool modified = false;
11012
11013                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11014                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11015                                 continue;
11016
11017                         if (new_plane_state->crtc == crtc ||
11018                             old_plane_state->crtc == crtc) {
11019                                 modified = true;
11020                                 break;
11021                         }
11022                 }
11023
11024                 if (!modified)
11025                         continue;
11026
11027                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11028                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11029                                 continue;
11030
11031                         new_plane_state =
11032                                 drm_atomic_get_plane_state(state, plane);
11033
11034                         if (IS_ERR(new_plane_state)) {
11035                                 ret = PTR_ERR(new_plane_state);
11036                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11037                                 goto fail;
11038                         }
11039                 }
11040         }
11041
11042         /* Remove exiting planes if they are modified */
11043         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11044                 ret = dm_update_plane_state(dc, state, plane,
11045                                             old_plane_state,
11046                                             new_plane_state,
11047                                             false,
11048                                             &lock_and_validation_needed);
11049                 if (ret) {
11050                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11051                         goto fail;
11052                 }
11053         }
11054
11055         /* Disable all crtcs which require disable */
11056         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11057                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11058                                            old_crtc_state,
11059                                            new_crtc_state,
11060                                            false,
11061                                            &lock_and_validation_needed);
11062                 if (ret) {
11063                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11064                         goto fail;
11065                 }
11066         }
11067
11068         /* Enable all crtcs which require enable */
11069         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11070                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11071                                            old_crtc_state,
11072                                            new_crtc_state,
11073                                            true,
11074                                            &lock_and_validation_needed);
11075                 if (ret) {
11076                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11077                         goto fail;
11078                 }
11079         }
11080
11081         /* Add new/modified planes */
11082         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11083                 ret = dm_update_plane_state(dc, state, plane,
11084                                             old_plane_state,
11085                                             new_plane_state,
11086                                             true,
11087                                             &lock_and_validation_needed);
11088                 if (ret) {
11089                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11090                         goto fail;
11091                 }
11092         }
11093
11094         /* Run this here since we want to validate the streams we created */
11095         ret = drm_atomic_helper_check_planes(dev, state);
11096         if (ret) {
11097                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11098                 goto fail;
11099         }
11100
11101         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11102                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11103                 if (dm_new_crtc_state->mpo_requested)
11104                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11105         }
11106
11107         /* Check cursor planes scaling */
11108         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11109                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11110                 if (ret) {
11111                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11112                         goto fail;
11113                 }
11114         }
11115
11116         if (state->legacy_cursor_update) {
11117                 /*
11118                  * This is a fast cursor update coming from the plane update
11119                  * helper, check if it can be done asynchronously for better
11120                  * performance.
11121                  */
11122                 state->async_update =
11123                         !drm_atomic_helper_async_check(dev, state);
11124
11125                 /*
11126                  * Skip the remaining global validation if this is an async
11127                  * update. Cursor updates can be done without affecting
11128                  * state or bandwidth calcs and this avoids the performance
11129                  * penalty of locking the private state object and
11130                  * allocating a new dc_state.
11131                  */
11132                 if (state->async_update)
11133                         return 0;
11134         }
11135
11136         /* Check scaling and underscan changes*/
11137         /* TODO Removed scaling changes validation due to inability to commit
11138          * new stream into context w\o causing full reset. Need to
11139          * decide how to handle.
11140          */
11141         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11142                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11143                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11144                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11145
11146                 /* Skip any modesets/resets */
11147                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11148                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11149                         continue;
11150
11151                 /* Skip any thing not scale or underscan changes */
11152                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11153                         continue;
11154
11155                 lock_and_validation_needed = true;
11156         }
11157
11158 #if defined(CONFIG_DRM_AMD_DC_DCN)
11159         /* set the slot info for each mst_state based on the link encoding format */
11160         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11161                 struct amdgpu_dm_connector *aconnector;
11162                 struct drm_connector *connector;
11163                 struct drm_connector_list_iter iter;
11164                 u8 link_coding_cap;
11165
11166                 if (!mgr->mst_state )
11167                         continue;
11168
11169                 drm_connector_list_iter_begin(dev, &iter);
11170                 drm_for_each_connector_iter(connector, &iter) {
11171                         int id = connector->index;
11172
11173                         if (id == mst_state->mgr->conn_base_id) {
11174                                 aconnector = to_amdgpu_dm_connector(connector);
11175                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11176                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11177
11178                                 break;
11179                         }
11180                 }
11181                 drm_connector_list_iter_end(&iter);
11182
11183         }
11184 #endif
11185         /**
11186          * Streams and planes are reset when there are changes that affect
11187          * bandwidth. Anything that affects bandwidth needs to go through
11188          * DC global validation to ensure that the configuration can be applied
11189          * to hardware.
11190          *
11191          * We have to currently stall out here in atomic_check for outstanding
11192          * commits to finish in this case because our IRQ handlers reference
11193          * DRM state directly - we can end up disabling interrupts too early
11194          * if we don't.
11195          *
11196          * TODO: Remove this stall and drop DM state private objects.
11197          */
11198         if (lock_and_validation_needed) {
11199                 ret = dm_atomic_get_state(state, &dm_state);
11200                 if (ret) {
11201                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11202                         goto fail;
11203                 }
11204
11205                 ret = do_aquire_global_lock(dev, state);
11206                 if (ret) {
11207                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11208                         goto fail;
11209                 }
11210
11211 #if defined(CONFIG_DRM_AMD_DC_DCN)
11212                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11213                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11214                         goto fail;
11215                 }
11216
11217                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11218                 if (ret) {
11219                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11220                         goto fail;
11221                 }
11222 #endif
11223
11224                 /*
11225                  * Perform validation of MST topology in the state:
11226                  * We need to perform MST atomic check before calling
11227                  * dc_validate_global_state(), or there is a chance
11228                  * to get stuck in an infinite loop and hang eventually.
11229                  */
11230                 ret = drm_dp_mst_atomic_check(state);
11231                 if (ret) {
11232                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11233                         goto fail;
11234                 }
11235                 status = dc_validate_global_state(dc, dm_state->context, true);
11236                 if (status != DC_OK) {
11237                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11238                                        dc_status_to_str(status), status);
11239                         ret = -EINVAL;
11240                         goto fail;
11241                 }
11242         } else {
11243                 /*
11244                  * The commit is a fast update. Fast updates shouldn't change
11245                  * the DC context, affect global validation, and can have their
11246                  * commit work done in parallel with other commits not touching
11247                  * the same resource. If we have a new DC context as part of
11248                  * the DM atomic state from validation we need to free it and
11249                  * retain the existing one instead.
11250                  *
11251                  * Furthermore, since the DM atomic state only contains the DC
11252                  * context and can safely be annulled, we can free the state
11253                  * and clear the associated private object now to free
11254                  * some memory and avoid a possible use-after-free later.
11255                  */
11256
11257                 for (i = 0; i < state->num_private_objs; i++) {
11258                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11259
11260                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11261                                 int j = state->num_private_objs-1;
11262
11263                                 dm_atomic_destroy_state(obj,
11264                                                 state->private_objs[i].state);
11265
11266                                 /* If i is not at the end of the array then the
11267                                  * last element needs to be moved to where i was
11268                                  * before the array can safely be truncated.
11269                                  */
11270                                 if (i != j)
11271                                         state->private_objs[i] =
11272                                                 state->private_objs[j];
11273
11274                                 state->private_objs[j].ptr = NULL;
11275                                 state->private_objs[j].state = NULL;
11276                                 state->private_objs[j].old_state = NULL;
11277                                 state->private_objs[j].new_state = NULL;
11278
11279                                 state->num_private_objs = j;
11280                                 break;
11281                         }
11282                 }
11283         }
11284
11285         /* Store the overall update type for use later in atomic check. */
11286         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11287                 struct dm_crtc_state *dm_new_crtc_state =
11288                         to_dm_crtc_state(new_crtc_state);
11289
11290                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11291                                                          UPDATE_TYPE_FULL :
11292                                                          UPDATE_TYPE_FAST;
11293         }
11294
11295         /* Must be success */
11296         WARN_ON(ret);
11297
11298         trace_amdgpu_dm_atomic_check_finish(state, ret);
11299
11300         return ret;
11301
11302 fail:
11303         if (ret == -EDEADLK)
11304                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11305         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11306                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11307         else
11308                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11309
11310         trace_amdgpu_dm_atomic_check_finish(state, ret);
11311
11312         return ret;
11313 }
11314
11315 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11316                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11317 {
11318         uint8_t dpcd_data;
11319         bool capable = false;
11320
11321         if (amdgpu_dm_connector->dc_link &&
11322                 dm_helpers_dp_read_dpcd(
11323                                 NULL,
11324                                 amdgpu_dm_connector->dc_link,
11325                                 DP_DOWN_STREAM_PORT_COUNT,
11326                                 &dpcd_data,
11327                                 sizeof(dpcd_data))) {
11328                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11329         }
11330
11331         return capable;
11332 }
11333
11334 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11335                 unsigned int offset,
11336                 unsigned int total_length,
11337                 uint8_t *data,
11338                 unsigned int length,
11339                 struct amdgpu_hdmi_vsdb_info *vsdb)
11340 {
11341         bool res;
11342         union dmub_rb_cmd cmd;
11343         struct dmub_cmd_send_edid_cea *input;
11344         struct dmub_cmd_edid_cea_output *output;
11345
11346         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11347                 return false;
11348
11349         memset(&cmd, 0, sizeof(cmd));
11350
11351         input = &cmd.edid_cea.data.input;
11352
11353         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11354         cmd.edid_cea.header.sub_type = 0;
11355         cmd.edid_cea.header.payload_bytes =
11356                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11357         input->offset = offset;
11358         input->length = length;
11359         input->cea_total_length = total_length;
11360         memcpy(input->payload, data, length);
11361
11362         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11363         if (!res) {
11364                 DRM_ERROR("EDID CEA parser failed\n");
11365                 return false;
11366         }
11367
11368         output = &cmd.edid_cea.data.output;
11369
11370         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11371                 if (!output->ack.success) {
11372                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11373                                         output->ack.offset);
11374                 }
11375         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11376                 if (!output->amd_vsdb.vsdb_found)
11377                         return false;
11378
11379                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11380                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11381                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11382                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11383         } else {
11384                 DRM_WARN("Unknown EDID CEA parser results\n");
11385                 return false;
11386         }
11387
11388         return true;
11389 }
11390
11391 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11392                 uint8_t *edid_ext, int len,
11393                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11394 {
11395         int i;
11396
11397         /* send extension block to DMCU for parsing */
11398         for (i = 0; i < len; i += 8) {
11399                 bool res;
11400                 int offset;
11401
11402                 /* send 8 bytes a time */
11403                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11404                         return false;
11405
11406                 if (i+8 == len) {
11407                         /* EDID block sent completed, expect result */
11408                         int version, min_rate, max_rate;
11409
11410                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11411                         if (res) {
11412                                 /* amd vsdb found */
11413                                 vsdb_info->freesync_supported = 1;
11414                                 vsdb_info->amd_vsdb_version = version;
11415                                 vsdb_info->min_refresh_rate_hz = min_rate;
11416                                 vsdb_info->max_refresh_rate_hz = max_rate;
11417                                 return true;
11418                         }
11419                         /* not amd vsdb */
11420                         return false;
11421                 }
11422
11423                 /* check for ack*/
11424                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11425                 if (!res)
11426                         return false;
11427         }
11428
11429         return false;
11430 }
11431
11432 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11433                 uint8_t *edid_ext, int len,
11434                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11435 {
11436         int i;
11437
11438         /* send extension block to DMCU for parsing */
11439         for (i = 0; i < len; i += 8) {
11440                 /* send 8 bytes a time */
11441                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11442                         return false;
11443         }
11444
11445         return vsdb_info->freesync_supported;
11446 }
11447
11448 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11449                 uint8_t *edid_ext, int len,
11450                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11451 {
11452         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11453
11454         if (adev->dm.dmub_srv)
11455                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11456         else
11457                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11458 }
11459
11460 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11461                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11462 {
11463         uint8_t *edid_ext = NULL;
11464         int i;
11465         bool valid_vsdb_found = false;
11466
11467         /*----- drm_find_cea_extension() -----*/
11468         /* No EDID or EDID extensions */
11469         if (edid == NULL || edid->extensions == 0)
11470                 return -ENODEV;
11471
11472         /* Find CEA extension */
11473         for (i = 0; i < edid->extensions; i++) {
11474                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11475                 if (edid_ext[0] == CEA_EXT)
11476                         break;
11477         }
11478
11479         if (i == edid->extensions)
11480                 return -ENODEV;
11481
11482         /*----- cea_db_offsets() -----*/
11483         if (edid_ext[0] != CEA_EXT)
11484                 return -ENODEV;
11485
11486         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11487
11488         return valid_vsdb_found ? i : -ENODEV;
11489 }
11490
11491 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11492                                         struct edid *edid)
11493 {
11494         int i = 0;
11495         struct detailed_timing *timing;
11496         struct detailed_non_pixel *data;
11497         struct detailed_data_monitor_range *range;
11498         struct amdgpu_dm_connector *amdgpu_dm_connector =
11499                         to_amdgpu_dm_connector(connector);
11500         struct dm_connector_state *dm_con_state = NULL;
11501         struct dc_sink *sink;
11502
11503         struct drm_device *dev = connector->dev;
11504         struct amdgpu_device *adev = drm_to_adev(dev);
11505         bool freesync_capable = false;
11506         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11507
11508         if (!connector->state) {
11509                 DRM_ERROR("%s - Connector has no state", __func__);
11510                 goto update;
11511         }
11512
11513         sink = amdgpu_dm_connector->dc_sink ?
11514                 amdgpu_dm_connector->dc_sink :
11515                 amdgpu_dm_connector->dc_em_sink;
11516
11517         if (!edid || !sink) {
11518                 dm_con_state = to_dm_connector_state(connector->state);
11519
11520                 amdgpu_dm_connector->min_vfreq = 0;
11521                 amdgpu_dm_connector->max_vfreq = 0;
11522                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11523                 connector->display_info.monitor_range.min_vfreq = 0;
11524                 connector->display_info.monitor_range.max_vfreq = 0;
11525                 freesync_capable = false;
11526
11527                 goto update;
11528         }
11529
11530         dm_con_state = to_dm_connector_state(connector->state);
11531
11532         if (!adev->dm.freesync_module)
11533                 goto update;
11534
11535
11536         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11537                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11538                 bool edid_check_required = false;
11539
11540                 if (edid) {
11541                         edid_check_required = is_dp_capable_without_timing_msa(
11542                                                 adev->dm.dc,
11543                                                 amdgpu_dm_connector);
11544                 }
11545
11546                 if (edid_check_required == true && (edid->version > 1 ||
11547                    (edid->version == 1 && edid->revision > 1))) {
11548                         for (i = 0; i < 4; i++) {
11549
11550                                 timing  = &edid->detailed_timings[i];
11551                                 data    = &timing->data.other_data;
11552                                 range   = &data->data.range;
11553                                 /*
11554                                  * Check if monitor has continuous frequency mode
11555                                  */
11556                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11557                                         continue;
11558                                 /*
11559                                  * Check for flag range limits only. If flag == 1 then
11560                                  * no additional timing information provided.
11561                                  * Default GTF, GTF Secondary curve and CVT are not
11562                                  * supported
11563                                  */
11564                                 if (range->flags != 1)
11565                                         continue;
11566
11567                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11568                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11569                                 amdgpu_dm_connector->pixel_clock_mhz =
11570                                         range->pixel_clock_mhz * 10;
11571
11572                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11573                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11574
11575                                 break;
11576                         }
11577
11578                         if (amdgpu_dm_connector->max_vfreq -
11579                             amdgpu_dm_connector->min_vfreq > 10) {
11580
11581                                 freesync_capable = true;
11582                         }
11583                 }
11584         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11585                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11586                 if (i >= 0 && vsdb_info.freesync_supported) {
11587                         timing  = &edid->detailed_timings[i];
11588                         data    = &timing->data.other_data;
11589
11590                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11591                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11592                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11593                                 freesync_capable = true;
11594
11595                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11596                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11597                 }
11598         }
11599
11600 update:
11601         if (dm_con_state)
11602                 dm_con_state->freesync_capable = freesync_capable;
11603
11604         if (connector->vrr_capable_property)
11605                 drm_connector_set_vrr_capable_property(connector,
11606                                                        freesync_capable);
11607 }
11608
11609 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11610 {
11611         struct amdgpu_device *adev = drm_to_adev(dev);
11612         struct dc *dc = adev->dm.dc;
11613         int i;
11614
11615         mutex_lock(&adev->dm.dc_lock);
11616         if (dc->current_state) {
11617                 for (i = 0; i < dc->current_state->stream_count; ++i)
11618                         dc->current_state->streams[i]
11619                                 ->triggered_crtc_reset.enabled =
11620                                 adev->dm.force_timing_sync;
11621
11622                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11623                 dc_trigger_sync(dc, dc->current_state);
11624         }
11625         mutex_unlock(&adev->dm.dc_lock);
11626 }
11627
11628 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11629                        uint32_t value, const char *func_name)
11630 {
11631 #ifdef DM_CHECK_ADDR_0
11632         if (address == 0) {
11633                 DC_ERR("invalid register write. address = 0");
11634                 return;
11635         }
11636 #endif
11637         cgs_write_register(ctx->cgs_device, address, value);
11638         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11639 }
11640
11641 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11642                           const char *func_name)
11643 {
11644         uint32_t value;
11645 #ifdef DM_CHECK_ADDR_0
11646         if (address == 0) {
11647                 DC_ERR("invalid register read; address = 0\n");
11648                 return 0;
11649         }
11650 #endif
11651
11652         if (ctx->dmub_srv &&
11653             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11654             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11655                 ASSERT(false);
11656                 return 0;
11657         }
11658
11659         value = cgs_read_register(ctx->cgs_device, address);
11660
11661         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11662
11663         return value;
11664 }
11665
11666 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11667                                                 struct dc_context *ctx,
11668                                                 uint8_t status_type,
11669                                                 uint32_t *operation_result)
11670 {
11671         struct amdgpu_device *adev = ctx->driver_context;
11672         int return_status = -1;
11673         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11674
11675         if (is_cmd_aux) {
11676                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11677                         return_status = p_notify->aux_reply.length;
11678                         *operation_result = p_notify->result;
11679                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11680                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11681                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11682                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11683                 } else {
11684                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11685                 }
11686         } else {
11687                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11688                         return_status = 0;
11689                         *operation_result = p_notify->sc_status;
11690                 } else {
11691                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11692                 }
11693         }
11694
11695         return return_status;
11696 }
11697
11698 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11699         unsigned int link_index, void *cmd_payload, void *operation_result)
11700 {
11701         struct amdgpu_device *adev = ctx->driver_context;
11702         int ret = 0;
11703
11704         if (is_cmd_aux) {
11705                 dc_process_dmub_aux_transfer_async(ctx->dc,
11706                         link_index, (struct aux_payload *)cmd_payload);
11707         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11708                                         (struct set_config_cmd_payload *)cmd_payload,
11709                                         adev->dm.dmub_notify)) {
11710                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11711                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11712                                         (uint32_t *)operation_result);
11713         }
11714
11715         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11716         if (ret == 0) {
11717                 DRM_ERROR("wait_for_completion_timeout timeout!");
11718                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11719                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11720                                 (uint32_t *)operation_result);
11721         }
11722
11723         if (is_cmd_aux) {
11724                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11725                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11726
11727                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11728                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11729                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11730                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11731                                        adev->dm.dmub_notify->aux_reply.length);
11732                         }
11733                 }
11734         }
11735
11736         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11737                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11738                         (uint32_t *)operation_result);
11739 }
11740
11741 /*
11742  * Check whether seamless boot is supported.
11743  *
11744  * So far we only support seamless boot on CHIP_VANGOGH.
11745  * If everything goes well, we may consider expanding
11746  * seamless boot to other ASICs.
11747  */
11748 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11749 {
11750         switch (adev->asic_type) {
11751         case CHIP_VANGOGH:
11752                 if (!adev->mman.keep_stolen_vga_memory)
11753                         return true;
11754                 break;
11755         default:
11756                 break;
11757         }
11758
11759         return false;
11760 }