Merge branch irq/misc-5.17 into irq/irqchip-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117
118 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147         switch (link->dpcd_caps.dongle_type) {
148         case DISPLAY_DONGLE_NONE:
149                 return DRM_MODE_SUBCONNECTOR_Native;
150         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151                 return DRM_MODE_SUBCONNECTOR_VGA;
152         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153         case DISPLAY_DONGLE_DP_DVI_DONGLE:
154                 return DRM_MODE_SUBCONNECTOR_DVID;
155         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_HDMIA;
158         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159         default:
160                 return DRM_MODE_SUBCONNECTOR_Unknown;
161         }
162 }
163
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166         struct dc_link *link = aconnector->dc_link;
167         struct drm_connector *connector = &aconnector->base;
168         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171                 return;
172
173         if (aconnector->dc_sink)
174                 subconnector = get_subconnector_type(link);
175
176         drm_object_property_set_value(&connector->base,
177                         connector->dev->mode_config.dp_subconnector_property,
178                         subconnector);
179 }
180
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193                                 struct drm_plane *plane,
194                                 unsigned long possible_crtcs,
195                                 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197                                struct drm_plane *plane,
198                                uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
201                                     uint32_t link_index,
202                                     struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204                                   struct amdgpu_encoder *aencoder,
205                                   uint32_t link_index);
206
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212                                   struct drm_atomic_state *state);
213
214 static void handle_cursor_update(struct drm_plane *plane,
215                                  struct drm_plane_state *old_plane_state);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225                                  struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241         if (crtc >= adev->mode_info.num_crtc)
242                 return 0;
243         else {
244                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
246                 if (acrtc->dm_irq_params.stream == NULL) {
247                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248                                   crtc);
249                         return 0;
250                 }
251
252                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253         }
254 }
255
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257                                   u32 *vbl, u32 *position)
258 {
259         uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
261         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262                 return -EINVAL;
263         else {
264                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
266                 if (acrtc->dm_irq_params.stream ==  NULL) {
267                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268                                   crtc);
269                         return 0;
270                 }
271
272                 /*
273                  * TODO rework base driver to use values directly.
274                  * for now parse it back into reg-format
275                  */
276                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277                                          &v_blank_start,
278                                          &v_blank_end,
279                                          &h_position,
280                                          &v_position);
281
282                 *position = v_position | (h_position << 16);
283                 *vbl = v_blank_start | (v_blank_end << 16);
284         }
285
286         return 0;
287 }
288
289 static bool dm_is_idle(void *handle)
290 {
291         /* XXX todo */
292         return true;
293 }
294
295 static int dm_wait_for_idle(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static bool dm_check_soft_reset(void *handle)
302 {
303         return false;
304 }
305
306 static int dm_soft_reset(void *handle)
307 {
308         /* XXX todo */
309         return 0;
310 }
311
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314                      int otg_inst)
315 {
316         struct drm_device *dev = adev_to_drm(adev);
317         struct drm_crtc *crtc;
318         struct amdgpu_crtc *amdgpu_crtc;
319
320         if (WARN_ON(otg_inst == -1))
321                 return adev->mode_info.crtcs[0];
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367         struct amdgpu_crtc *amdgpu_crtc;
368         struct common_irq_params *irq_params = interrupt_params;
369         struct amdgpu_device *adev = irq_params->adev;
370         unsigned long flags;
371         struct drm_pending_vblank_event *e;
372         uint32_t vpos, hpos, v_blank_start, v_blank_end;
373         bool vrr_active;
374
375         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377         /* IRQ could occur when in initial stage */
378         /* TODO work and BO cleanup */
379         if (amdgpu_crtc == NULL) {
380                 DC_LOG_PFLIP("CRTC is null, returning.\n");
381                 return;
382         }
383
384         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388                                                  amdgpu_crtc->pflip_status,
389                                                  AMDGPU_FLIP_SUBMITTED,
390                                                  amdgpu_crtc->crtc_id,
391                                                  amdgpu_crtc);
392                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393                 return;
394         }
395
396         /* page flip completed. */
397         e = amdgpu_crtc->event;
398         amdgpu_crtc->event = NULL;
399
400         WARN_ON(!e);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625
626 /**
627  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636 {
637         if (adev->dm.dmub_notify)
638                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640                 complete(&adev->dm.dmub_aux_transfer_done);
641 }
642
643 /**
644  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645  * @adev: amdgpu_device pointer
646  * @notify: dmub notification structure
647  *
648  * Dmub Hpd interrupt processing callback. Gets displayindex through the
649  * ink index and calls helper to do the processing.
650  */
651 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652 {
653         struct amdgpu_dm_connector *aconnector;
654         struct amdgpu_dm_connector *hpd_aconnector = NULL;
655         struct drm_connector *connector;
656         struct drm_connector_list_iter iter;
657         struct dc_link *link;
658         uint8_t link_index = 0;
659         struct drm_device *dev = adev->dm.ddev;
660
661         if (adev == NULL)
662                 return;
663
664         if (notify == NULL) {
665                 DRM_ERROR("DMUB HPD callback notification was NULL");
666                 return;
667         }
668
669         if (notify->link_index > adev->dm.dc->link_count) {
670                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671                 return;
672         }
673
674         link_index = notify->link_index;
675         link = adev->dm.dc->links[link_index];
676
677         drm_connector_list_iter_begin(dev, &iter);
678         drm_for_each_connector_iter(connector, &iter) {
679                 aconnector = to_amdgpu_dm_connector(connector);
680                 if (link && aconnector->dc_link == link) {
681                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682                         hpd_aconnector = aconnector;
683                         break;
684                 }
685         }
686         drm_connector_list_iter_end(&iter);
687
688         if (hpd_aconnector) {
689                 if (notify->type == DMUB_NOTIFICATION_HPD)
690                         handle_hpd_irq_helper(hpd_aconnector);
691                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692                         handle_hpd_rx_irq(hpd_aconnector);
693         }
694 }
695
696 /**
697  * register_dmub_notify_callback - Sets callback for DMUB notify
698  * @adev: amdgpu_device pointer
699  * @type: Type of dmub notification
700  * @callback: Dmub interrupt callback function
701  * @dmub_int_thread_offload: offload indicator
702  *
703  * API to register a dmub callback handler for a dmub notification
704  * Also sets indicator whether callback processing to be offloaded.
705  * to dmub interrupt handling thread
706  * Return: true if successfully registered, false if there is existing registration
707  */
708 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710 {
711         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712                 adev->dm.dmub_callback[type] = callback;
713                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714         } else
715                 return false;
716
717         return true;
718 }
719
720 static void dm_handle_hpd_work(struct work_struct *work)
721 {
722         struct dmub_hpd_work *dmub_hpd_wrk;
723
724         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725
726         if (!dmub_hpd_wrk->dmub_notify) {
727                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728                 return;
729         }
730
731         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733                 dmub_hpd_wrk->dmub_notify);
734         }
735
736         kfree(dmub_hpd_wrk->dmub_notify);
737         kfree(dmub_hpd_wrk);
738
739 }
740
741 #define DMUB_TRACE_MAX_READ 64
742 /**
743  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744  * @interrupt_params: used for determining the Outbox instance
745  *
746  * Handles the Outbox Interrupt
747  * event handler.
748  */
749 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750 {
751         struct dmub_notification notify;
752         struct common_irq_params *irq_params = interrupt_params;
753         struct amdgpu_device *adev = irq_params->adev;
754         struct amdgpu_display_manager *dm = &adev->dm;
755         struct dmcub_trace_buf_entry entry = { 0 };
756         uint32_t count = 0;
757         struct dmub_hpd_work *dmub_hpd_wrk;
758         struct dc_link *plink = NULL;
759
760         if (dc_enable_dmub_notifications(adev->dm.dc) &&
761                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
762
763                 do {
764                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
767                                 continue;
768                         }
769                         if (!dm->dmub_callback[notify.type]) {
770                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771                                 continue;
772                         }
773                         if (dm->dmub_thread_offload[notify.type] == true) {
774                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775                                 if (!dmub_hpd_wrk) {
776                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777                                         return;
778                                 }
779                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780                                 if (!dmub_hpd_wrk->dmub_notify) {
781                                         kfree(dmub_hpd_wrk);
782                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783                                         return;
784                                 }
785                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786                                 if (dmub_hpd_wrk->dmub_notify)
787                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
788                                 dmub_hpd_wrk->adev = adev;
789                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
790                                         plink = adev->dm.dc->links[notify.link_index];
791                                         if (plink) {
792                                                 plink->hpd_status =
793                                                         notify.hpd_status ==
794                                                         DP_HPD_PLUG ? true : false;
795                                         }
796                                 }
797                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
798                         } else {
799                                 dm->dmub_callback[notify.type](adev, &notify);
800                         }
801                 } while (notify.pending_notification);
802         }
803
804
805         do {
806                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
807                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
808                                                         entry.param0, entry.param1);
809
810                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
811                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
812                 } else
813                         break;
814
815                 count++;
816
817         } while (count <= DMUB_TRACE_MAX_READ);
818
819         if (count > DMUB_TRACE_MAX_READ)
820                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
821 }
822 #endif /* CONFIG_DRM_AMD_DC_DCN */
823
824 static int dm_set_clockgating_state(void *handle,
825                   enum amd_clockgating_state state)
826 {
827         return 0;
828 }
829
830 static int dm_set_powergating_state(void *handle,
831                   enum amd_powergating_state state)
832 {
833         return 0;
834 }
835
836 /* Prototypes of private functions */
837 static int dm_early_init(void* handle);
838
839 /* Allocate memory for FBC compressed data  */
840 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
841 {
842         struct drm_device *dev = connector->dev;
843         struct amdgpu_device *adev = drm_to_adev(dev);
844         struct dm_compressor_info *compressor = &adev->dm.compressor;
845         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
846         struct drm_display_mode *mode;
847         unsigned long max_size = 0;
848
849         if (adev->dm.dc->fbc_compressor == NULL)
850                 return;
851
852         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
853                 return;
854
855         if (compressor->bo_ptr)
856                 return;
857
858
859         list_for_each_entry(mode, &connector->modes, head) {
860                 if (max_size < mode->htotal * mode->vtotal)
861                         max_size = mode->htotal * mode->vtotal;
862         }
863
864         if (max_size) {
865                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
866                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
867                             &compressor->gpu_addr, &compressor->cpu_addr);
868
869                 if (r)
870                         DRM_ERROR("DM: Failed to initialize FBC\n");
871                 else {
872                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
873                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
874                 }
875
876         }
877
878 }
879
880 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
881                                           int pipe, bool *enabled,
882                                           unsigned char *buf, int max_bytes)
883 {
884         struct drm_device *dev = dev_get_drvdata(kdev);
885         struct amdgpu_device *adev = drm_to_adev(dev);
886         struct drm_connector *connector;
887         struct drm_connector_list_iter conn_iter;
888         struct amdgpu_dm_connector *aconnector;
889         int ret = 0;
890
891         *enabled = false;
892
893         mutex_lock(&adev->dm.audio_lock);
894
895         drm_connector_list_iter_begin(dev, &conn_iter);
896         drm_for_each_connector_iter(connector, &conn_iter) {
897                 aconnector = to_amdgpu_dm_connector(connector);
898                 if (aconnector->audio_inst != port)
899                         continue;
900
901                 *enabled = true;
902                 ret = drm_eld_size(connector->eld);
903                 memcpy(buf, connector->eld, min(max_bytes, ret));
904
905                 break;
906         }
907         drm_connector_list_iter_end(&conn_iter);
908
909         mutex_unlock(&adev->dm.audio_lock);
910
911         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
912
913         return ret;
914 }
915
916 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
917         .get_eld = amdgpu_dm_audio_component_get_eld,
918 };
919
920 static int amdgpu_dm_audio_component_bind(struct device *kdev,
921                                        struct device *hda_kdev, void *data)
922 {
923         struct drm_device *dev = dev_get_drvdata(kdev);
924         struct amdgpu_device *adev = drm_to_adev(dev);
925         struct drm_audio_component *acomp = data;
926
927         acomp->ops = &amdgpu_dm_audio_component_ops;
928         acomp->dev = kdev;
929         adev->dm.audio_component = acomp;
930
931         return 0;
932 }
933
934 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
935                                           struct device *hda_kdev, void *data)
936 {
937         struct drm_device *dev = dev_get_drvdata(kdev);
938         struct amdgpu_device *adev = drm_to_adev(dev);
939         struct drm_audio_component *acomp = data;
940
941         acomp->ops = NULL;
942         acomp->dev = NULL;
943         adev->dm.audio_component = NULL;
944 }
945
946 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
947         .bind   = amdgpu_dm_audio_component_bind,
948         .unbind = amdgpu_dm_audio_component_unbind,
949 };
950
951 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
952 {
953         int i, ret;
954
955         if (!amdgpu_audio)
956                 return 0;
957
958         adev->mode_info.audio.enabled = true;
959
960         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
961
962         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
963                 adev->mode_info.audio.pin[i].channels = -1;
964                 adev->mode_info.audio.pin[i].rate = -1;
965                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
966                 adev->mode_info.audio.pin[i].status_bits = 0;
967                 adev->mode_info.audio.pin[i].category_code = 0;
968                 adev->mode_info.audio.pin[i].connected = false;
969                 adev->mode_info.audio.pin[i].id =
970                         adev->dm.dc->res_pool->audios[i]->inst;
971                 adev->mode_info.audio.pin[i].offset = 0;
972         }
973
974         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
975         if (ret < 0)
976                 return ret;
977
978         adev->dm.audio_registered = true;
979
980         return 0;
981 }
982
983 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
984 {
985         if (!amdgpu_audio)
986                 return;
987
988         if (!adev->mode_info.audio.enabled)
989                 return;
990
991         if (adev->dm.audio_registered) {
992                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
993                 adev->dm.audio_registered = false;
994         }
995
996         /* TODO: Disable audio? */
997
998         adev->mode_info.audio.enabled = false;
999 }
1000
1001 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1002 {
1003         struct drm_audio_component *acomp = adev->dm.audio_component;
1004
1005         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1006                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1007
1008                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1009                                                  pin, -1);
1010         }
1011 }
1012
1013 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1014 {
1015         const struct dmcub_firmware_header_v1_0 *hdr;
1016         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1017         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1018         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1019         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1020         struct abm *abm = adev->dm.dc->res_pool->abm;
1021         struct dmub_srv_hw_params hw_params;
1022         enum dmub_status status;
1023         const unsigned char *fw_inst_const, *fw_bss_data;
1024         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1025         bool has_hw_support;
1026         struct dc *dc = adev->dm.dc;
1027
1028         if (!dmub_srv)
1029                 /* DMUB isn't supported on the ASIC. */
1030                 return 0;
1031
1032         if (!fb_info) {
1033                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1034                 return -EINVAL;
1035         }
1036
1037         if (!dmub_fw) {
1038                 /* Firmware required for DMUB support. */
1039                 DRM_ERROR("No firmware provided for DMUB.\n");
1040                 return -EINVAL;
1041         }
1042
1043         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1044         if (status != DMUB_STATUS_OK) {
1045                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1046                 return -EINVAL;
1047         }
1048
1049         if (!has_hw_support) {
1050                 DRM_INFO("DMUB unsupported on ASIC\n");
1051                 return 0;
1052         }
1053
1054         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1055
1056         fw_inst_const = dmub_fw->data +
1057                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1058                         PSP_HEADER_BYTES;
1059
1060         fw_bss_data = dmub_fw->data +
1061                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1062                       le32_to_cpu(hdr->inst_const_bytes);
1063
1064         /* Copy firmware and bios info into FB memory. */
1065         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1066                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1067
1068         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1069
1070         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1071          * amdgpu_ucode_init_single_fw will load dmub firmware
1072          * fw_inst_const part to cw0; otherwise, the firmware back door load
1073          * will be done by dm_dmub_hw_init
1074          */
1075         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1076                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1077                                 fw_inst_const_size);
1078         }
1079
1080         if (fw_bss_data_size)
1081                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1082                        fw_bss_data, fw_bss_data_size);
1083
1084         /* Copy firmware bios info into FB memory. */
1085         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1086                adev->bios_size);
1087
1088         /* Reset regions that need to be reset. */
1089         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1090         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1091
1092         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1093                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1094
1095         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1096                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1097
1098         /* Initialize hardware. */
1099         memset(&hw_params, 0, sizeof(hw_params));
1100         hw_params.fb_base = adev->gmc.fb_start;
1101         hw_params.fb_offset = adev->gmc.aper_base;
1102
1103         /* backdoor load firmware and trigger dmub running */
1104         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1105                 hw_params.load_inst_const = true;
1106
1107         if (dmcu)
1108                 hw_params.psp_version = dmcu->psp_version;
1109
1110         for (i = 0; i < fb_info->num_fb; ++i)
1111                 hw_params.fb[i] = &fb_info->fb[i];
1112
1113         switch (adev->asic_type) {
1114         case CHIP_YELLOW_CARP:
1115                 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1116                         hw_params.dpia_supported = true;
1117 #if defined(CONFIG_DRM_AMD_DC_DCN)
1118                         hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1119 #endif
1120                 }
1121                 break;
1122         default:
1123                 break;
1124         }
1125
1126         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1127         if (status != DMUB_STATUS_OK) {
1128                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1129                 return -EINVAL;
1130         }
1131
1132         /* Wait for firmware load to finish. */
1133         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1134         if (status != DMUB_STATUS_OK)
1135                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1136
1137         /* Init DMCU and ABM if available. */
1138         if (dmcu && abm) {
1139                 dmcu->funcs->dmcu_init(dmcu);
1140                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1141         }
1142
1143         if (!adev->dm.dc->ctx->dmub_srv)
1144                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1145         if (!adev->dm.dc->ctx->dmub_srv) {
1146                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1147                 return -ENOMEM;
1148         }
1149
1150         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1151                  adev->dm.dmcub_fw_version);
1152
1153         return 0;
1154 }
1155
1156 #if defined(CONFIG_DRM_AMD_DC_DCN)
1157 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1158 {
1159         uint64_t pt_base;
1160         uint32_t logical_addr_low;
1161         uint32_t logical_addr_high;
1162         uint32_t agp_base, agp_bot, agp_top;
1163         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1164
1165         memset(pa_config, 0, sizeof(*pa_config));
1166
1167         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1168         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1169
1170         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1171                 /*
1172                  * Raven2 has a HW issue that it is unable to use the vram which
1173                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1174                  * workaround that increase system aperture high address (add 1)
1175                  * to get rid of the VM fault and hardware hang.
1176                  */
1177                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1178         else
1179                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1180
1181         agp_base = 0;
1182         agp_bot = adev->gmc.agp_start >> 24;
1183         agp_top = adev->gmc.agp_end >> 24;
1184
1185
1186         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1187         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1188         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1189         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1190         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1191         page_table_base.low_part = lower_32_bits(pt_base);
1192
1193         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1194         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1195
1196         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1197         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1198         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1199
1200         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1201         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1202         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1203
1204         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1205         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1206         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1207
1208         pa_config->is_hvm_enabled = 0;
1209
1210 }
1211 #endif
1212 #if defined(CONFIG_DRM_AMD_DC_DCN)
1213 static void vblank_control_worker(struct work_struct *work)
1214 {
1215         struct vblank_control_work *vblank_work =
1216                 container_of(work, struct vblank_control_work, work);
1217         struct amdgpu_display_manager *dm = vblank_work->dm;
1218
1219         mutex_lock(&dm->dc_lock);
1220
1221         if (vblank_work->enable)
1222                 dm->active_vblank_irq_count++;
1223         else if(dm->active_vblank_irq_count)
1224                 dm->active_vblank_irq_count--;
1225
1226         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1227
1228         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1229
1230         /* Control PSR based on vblank requirements from OS */
1231         if (vblank_work->stream && vblank_work->stream->link) {
1232                 if (vblank_work->enable) {
1233                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1234                                 amdgpu_dm_psr_disable(vblank_work->stream);
1235                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1236                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1237                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1238                         amdgpu_dm_psr_enable(vblank_work->stream);
1239                 }
1240         }
1241
1242         mutex_unlock(&dm->dc_lock);
1243
1244         dc_stream_release(vblank_work->stream);
1245
1246         kfree(vblank_work);
1247 }
1248
1249 #endif
1250
1251 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1252 {
1253         struct hpd_rx_irq_offload_work *offload_work;
1254         struct amdgpu_dm_connector *aconnector;
1255         struct dc_link *dc_link;
1256         struct amdgpu_device *adev;
1257         enum dc_connection_type new_connection_type = dc_connection_none;
1258         unsigned long flags;
1259
1260         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1261         aconnector = offload_work->offload_wq->aconnector;
1262
1263         if (!aconnector) {
1264                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1265                 goto skip;
1266         }
1267
1268         adev = drm_to_adev(aconnector->base.dev);
1269         dc_link = aconnector->dc_link;
1270
1271         mutex_lock(&aconnector->hpd_lock);
1272         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1273                 DRM_ERROR("KMS: Failed to detect connector\n");
1274         mutex_unlock(&aconnector->hpd_lock);
1275
1276         if (new_connection_type == dc_connection_none)
1277                 goto skip;
1278
1279         if (amdgpu_in_reset(adev))
1280                 goto skip;
1281
1282         mutex_lock(&adev->dm.dc_lock);
1283         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1284                 dc_link_dp_handle_automated_test(dc_link);
1285         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1286                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1287                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1288                 dc_link_dp_handle_link_loss(dc_link);
1289                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1290                 offload_work->offload_wq->is_handling_link_loss = false;
1291                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1292         }
1293         mutex_unlock(&adev->dm.dc_lock);
1294
1295 skip:
1296         kfree(offload_work);
1297
1298 }
1299
1300 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1301 {
1302         int max_caps = dc->caps.max_links;
1303         int i = 0;
1304         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1305
1306         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1307
1308         if (!hpd_rx_offload_wq)
1309                 return NULL;
1310
1311
1312         for (i = 0; i < max_caps; i++) {
1313                 hpd_rx_offload_wq[i].wq =
1314                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1315
1316                 if (hpd_rx_offload_wq[i].wq == NULL) {
1317                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1318                         return NULL;
1319                 }
1320
1321                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1322         }
1323
1324         return hpd_rx_offload_wq;
1325 }
1326
1327 struct amdgpu_stutter_quirk {
1328         u16 chip_vendor;
1329         u16 chip_device;
1330         u16 subsys_vendor;
1331         u16 subsys_device;
1332         u8 revision;
1333 };
1334
1335 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1336         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1337         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1338         { 0, 0, 0, 0, 0 },
1339 };
1340
1341 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1342 {
1343         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1344
1345         while (p && p->chip_device != 0) {
1346                 if (pdev->vendor == p->chip_vendor &&
1347                     pdev->device == p->chip_device &&
1348                     pdev->subsystem_vendor == p->subsys_vendor &&
1349                     pdev->subsystem_device == p->subsys_device &&
1350                     pdev->revision == p->revision) {
1351                         return true;
1352                 }
1353                 ++p;
1354         }
1355         return false;
1356 }
1357
1358 static int amdgpu_dm_init(struct amdgpu_device *adev)
1359 {
1360         struct dc_init_data init_data;
1361 #ifdef CONFIG_DRM_AMD_DC_HDCP
1362         struct dc_callback_init init_params;
1363 #endif
1364         int r;
1365
1366         adev->dm.ddev = adev_to_drm(adev);
1367         adev->dm.adev = adev;
1368
1369         /* Zero all the fields */
1370         memset(&init_data, 0, sizeof(init_data));
1371 #ifdef CONFIG_DRM_AMD_DC_HDCP
1372         memset(&init_params, 0, sizeof(init_params));
1373 #endif
1374
1375         mutex_init(&adev->dm.dc_lock);
1376         mutex_init(&adev->dm.audio_lock);
1377 #if defined(CONFIG_DRM_AMD_DC_DCN)
1378         spin_lock_init(&adev->dm.vblank_lock);
1379 #endif
1380
1381         if(amdgpu_dm_irq_init(adev)) {
1382                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1383                 goto error;
1384         }
1385
1386         init_data.asic_id.chip_family = adev->family;
1387
1388         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1389         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1390         init_data.asic_id.chip_id = adev->pdev->device;
1391
1392         init_data.asic_id.vram_width = adev->gmc.vram_width;
1393         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1394         init_data.asic_id.atombios_base_address =
1395                 adev->mode_info.atom_context->bios;
1396
1397         init_data.driver = adev;
1398
1399         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1400
1401         if (!adev->dm.cgs_device) {
1402                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1403                 goto error;
1404         }
1405
1406         init_data.cgs_device = adev->dm.cgs_device;
1407
1408         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1409
1410         switch (adev->asic_type) {
1411         case CHIP_CARRIZO:
1412         case CHIP_STONEY:
1413                 init_data.flags.gpu_vm_support = true;
1414                 break;
1415         default:
1416                 switch (adev->ip_versions[DCE_HWIP][0]) {
1417                 case IP_VERSION(2, 1, 0):
1418                         init_data.flags.gpu_vm_support = true;
1419                         switch (adev->dm.dmcub_fw_version) {
1420                         case 0: /* development */
1421                         case 0x1: /* linux-firmware.git hash 6d9f399 */
1422                         case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1423                                 init_data.flags.disable_dmcu = false;
1424                                 break;
1425                         default:
1426                                 init_data.flags.disable_dmcu = true;
1427                         }
1428                         break;
1429                 case IP_VERSION(1, 0, 0):
1430                 case IP_VERSION(1, 0, 1):
1431                 case IP_VERSION(3, 0, 1):
1432                 case IP_VERSION(3, 1, 2):
1433                 case IP_VERSION(3, 1, 3):
1434                         init_data.flags.gpu_vm_support = true;
1435                         break;
1436                 case IP_VERSION(2, 0, 3):
1437                         init_data.flags.disable_dmcu = true;
1438                         break;
1439                 default:
1440                         break;
1441                 }
1442                 break;
1443         }
1444
1445         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1446                 init_data.flags.fbc_support = true;
1447
1448         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1449                 init_data.flags.multi_mon_pp_mclk_switch = true;
1450
1451         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1452                 init_data.flags.disable_fractional_pwm = true;
1453
1454         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1455                 init_data.flags.edp_no_power_sequencing = true;
1456
1457         init_data.flags.power_down_display_on_boot = true;
1458
1459         INIT_LIST_HEAD(&adev->dm.da_list);
1460         /* Display Core create. */
1461         adev->dm.dc = dc_create(&init_data);
1462
1463         if (adev->dm.dc) {
1464                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1465         } else {
1466                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1467                 goto error;
1468         }
1469
1470         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1471                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1472                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1473         }
1474
1475         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1476                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1477         if (dm_should_disable_stutter(adev->pdev))
1478                 adev->dm.dc->debug.disable_stutter = true;
1479
1480         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1481                 adev->dm.dc->debug.disable_stutter = true;
1482
1483         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1484                 adev->dm.dc->debug.disable_dsc = true;
1485
1486         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1487                 adev->dm.dc->debug.disable_clock_gate = true;
1488
1489         r = dm_dmub_hw_init(adev);
1490         if (r) {
1491                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1492                 goto error;
1493         }
1494
1495         dc_hardware_init(adev->dm.dc);
1496
1497         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1498         if (!adev->dm.hpd_rx_offload_wq) {
1499                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1500                 goto error;
1501         }
1502
1503 #if defined(CONFIG_DRM_AMD_DC_DCN)
1504         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1505                 struct dc_phy_addr_space_config pa_config;
1506
1507                 mmhub_read_system_context(adev, &pa_config);
1508
1509                 // Call the DC init_memory func
1510                 dc_setup_system_context(adev->dm.dc, &pa_config);
1511         }
1512 #endif
1513
1514         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1515         if (!adev->dm.freesync_module) {
1516                 DRM_ERROR(
1517                 "amdgpu: failed to initialize freesync_module.\n");
1518         } else
1519                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1520                                 adev->dm.freesync_module);
1521
1522         amdgpu_dm_init_color_mod();
1523
1524 #if defined(CONFIG_DRM_AMD_DC_DCN)
1525         if (adev->dm.dc->caps.max_links > 0) {
1526                 adev->dm.vblank_control_workqueue =
1527                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1528                 if (!adev->dm.vblank_control_workqueue)
1529                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1530         }
1531 #endif
1532
1533 #ifdef CONFIG_DRM_AMD_DC_HDCP
1534         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1535                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1536
1537                 if (!adev->dm.hdcp_workqueue)
1538                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1539                 else
1540                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1541
1542                 dc_init_callbacks(adev->dm.dc, &init_params);
1543         }
1544 #endif
1545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1546         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1547 #endif
1548         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1549                 init_completion(&adev->dm.dmub_aux_transfer_done);
1550                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1551                 if (!adev->dm.dmub_notify) {
1552                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1553                         goto error;
1554                 }
1555
1556                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1557                 if (!adev->dm.delayed_hpd_wq) {
1558                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1559                         goto error;
1560                 }
1561
1562                 amdgpu_dm_outbox_init(adev);
1563 #if defined(CONFIG_DRM_AMD_DC_DCN)
1564                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1565                         dmub_aux_setconfig_callback, false)) {
1566                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1567                         goto error;
1568                 }
1569                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1570                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1571                         goto error;
1572                 }
1573                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1574                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1575                         goto error;
1576                 }
1577 #endif /* CONFIG_DRM_AMD_DC_DCN */
1578         }
1579
1580         if (amdgpu_dm_initialize_drm_device(adev)) {
1581                 DRM_ERROR(
1582                 "amdgpu: failed to initialize sw for display support.\n");
1583                 goto error;
1584         }
1585
1586         /* create fake encoders for MST */
1587         dm_dp_create_fake_mst_encoders(adev);
1588
1589         /* TODO: Add_display_info? */
1590
1591         /* TODO use dynamic cursor width */
1592         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1593         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1594
1595         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1596                 DRM_ERROR(
1597                 "amdgpu: failed to initialize sw for display support.\n");
1598                 goto error;
1599         }
1600
1601
1602         DRM_DEBUG_DRIVER("KMS initialized.\n");
1603
1604         return 0;
1605 error:
1606         amdgpu_dm_fini(adev);
1607
1608         return -EINVAL;
1609 }
1610
1611 static int amdgpu_dm_early_fini(void *handle)
1612 {
1613         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1614
1615         amdgpu_dm_audio_fini(adev);
1616
1617         return 0;
1618 }
1619
1620 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1621 {
1622         int i;
1623
1624 #if defined(CONFIG_DRM_AMD_DC_DCN)
1625         if (adev->dm.vblank_control_workqueue) {
1626                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1627                 adev->dm.vblank_control_workqueue = NULL;
1628         }
1629 #endif
1630
1631         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1632                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1633         }
1634
1635         amdgpu_dm_destroy_drm_device(&adev->dm);
1636
1637 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1638         if (adev->dm.crc_rd_wrk) {
1639                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1640                 kfree(adev->dm.crc_rd_wrk);
1641                 adev->dm.crc_rd_wrk = NULL;
1642         }
1643 #endif
1644 #ifdef CONFIG_DRM_AMD_DC_HDCP
1645         if (adev->dm.hdcp_workqueue) {
1646                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1647                 adev->dm.hdcp_workqueue = NULL;
1648         }
1649
1650         if (adev->dm.dc)
1651                 dc_deinit_callbacks(adev->dm.dc);
1652 #endif
1653
1654         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1655
1656         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1657                 kfree(adev->dm.dmub_notify);
1658                 adev->dm.dmub_notify = NULL;
1659                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1660                 adev->dm.delayed_hpd_wq = NULL;
1661         }
1662
1663         if (adev->dm.dmub_bo)
1664                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1665                                       &adev->dm.dmub_bo_gpu_addr,
1666                                       &adev->dm.dmub_bo_cpu_addr);
1667
1668         if (adev->dm.hpd_rx_offload_wq) {
1669                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1670                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1671                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1672                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1673                         }
1674                 }
1675
1676                 kfree(adev->dm.hpd_rx_offload_wq);
1677                 adev->dm.hpd_rx_offload_wq = NULL;
1678         }
1679
1680         /* DC Destroy TODO: Replace destroy DAL */
1681         if (adev->dm.dc)
1682                 dc_destroy(&adev->dm.dc);
1683         /*
1684          * TODO: pageflip, vlank interrupt
1685          *
1686          * amdgpu_dm_irq_fini(adev);
1687          */
1688
1689         if (adev->dm.cgs_device) {
1690                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1691                 adev->dm.cgs_device = NULL;
1692         }
1693         if (adev->dm.freesync_module) {
1694                 mod_freesync_destroy(adev->dm.freesync_module);
1695                 adev->dm.freesync_module = NULL;
1696         }
1697
1698         mutex_destroy(&adev->dm.audio_lock);
1699         mutex_destroy(&adev->dm.dc_lock);
1700
1701         return;
1702 }
1703
1704 static int load_dmcu_fw(struct amdgpu_device *adev)
1705 {
1706         const char *fw_name_dmcu = NULL;
1707         int r;
1708         const struct dmcu_firmware_header_v1_0 *hdr;
1709
1710         switch(adev->asic_type) {
1711 #if defined(CONFIG_DRM_AMD_DC_SI)
1712         case CHIP_TAHITI:
1713         case CHIP_PITCAIRN:
1714         case CHIP_VERDE:
1715         case CHIP_OLAND:
1716 #endif
1717         case CHIP_BONAIRE:
1718         case CHIP_HAWAII:
1719         case CHIP_KAVERI:
1720         case CHIP_KABINI:
1721         case CHIP_MULLINS:
1722         case CHIP_TONGA:
1723         case CHIP_FIJI:
1724         case CHIP_CARRIZO:
1725         case CHIP_STONEY:
1726         case CHIP_POLARIS11:
1727         case CHIP_POLARIS10:
1728         case CHIP_POLARIS12:
1729         case CHIP_VEGAM:
1730         case CHIP_VEGA10:
1731         case CHIP_VEGA12:
1732         case CHIP_VEGA20:
1733                 return 0;
1734         case CHIP_NAVI12:
1735                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1736                 break;
1737         case CHIP_RAVEN:
1738                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1739                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1740                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1741                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1742                 else
1743                         return 0;
1744                 break;
1745         default:
1746                 switch (adev->ip_versions[DCE_HWIP][0]) {
1747                 case IP_VERSION(2, 0, 2):
1748                 case IP_VERSION(2, 0, 3):
1749                 case IP_VERSION(2, 0, 0):
1750                 case IP_VERSION(2, 1, 0):
1751                 case IP_VERSION(3, 0, 0):
1752                 case IP_VERSION(3, 0, 2):
1753                 case IP_VERSION(3, 0, 3):
1754                 case IP_VERSION(3, 0, 1):
1755                 case IP_VERSION(3, 1, 2):
1756                 case IP_VERSION(3, 1, 3):
1757                         return 0;
1758                 default:
1759                         break;
1760                 }
1761                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1762                 return -EINVAL;
1763         }
1764
1765         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1766                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1767                 return 0;
1768         }
1769
1770         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1771         if (r == -ENOENT) {
1772                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1773                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1774                 adev->dm.fw_dmcu = NULL;
1775                 return 0;
1776         }
1777         if (r) {
1778                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1779                         fw_name_dmcu);
1780                 return r;
1781         }
1782
1783         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1784         if (r) {
1785                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1786                         fw_name_dmcu);
1787                 release_firmware(adev->dm.fw_dmcu);
1788                 adev->dm.fw_dmcu = NULL;
1789                 return r;
1790         }
1791
1792         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1793         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1794         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1795         adev->firmware.fw_size +=
1796                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1797
1798         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1799         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1800         adev->firmware.fw_size +=
1801                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1802
1803         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1804
1805         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1806
1807         return 0;
1808 }
1809
1810 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1811 {
1812         struct amdgpu_device *adev = ctx;
1813
1814         return dm_read_reg(adev->dm.dc->ctx, address);
1815 }
1816
1817 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1818                                      uint32_t value)
1819 {
1820         struct amdgpu_device *adev = ctx;
1821
1822         return dm_write_reg(adev->dm.dc->ctx, address, value);
1823 }
1824
1825 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1826 {
1827         struct dmub_srv_create_params create_params;
1828         struct dmub_srv_region_params region_params;
1829         struct dmub_srv_region_info region_info;
1830         struct dmub_srv_fb_params fb_params;
1831         struct dmub_srv_fb_info *fb_info;
1832         struct dmub_srv *dmub_srv;
1833         const struct dmcub_firmware_header_v1_0 *hdr;
1834         const char *fw_name_dmub;
1835         enum dmub_asic dmub_asic;
1836         enum dmub_status status;
1837         int r;
1838
1839         switch (adev->ip_versions[DCE_HWIP][0]) {
1840         case IP_VERSION(2, 1, 0):
1841                 dmub_asic = DMUB_ASIC_DCN21;
1842                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1843                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1844                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1845                 break;
1846         case IP_VERSION(3, 0, 0):
1847                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1848                         dmub_asic = DMUB_ASIC_DCN30;
1849                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1850                 } else {
1851                         dmub_asic = DMUB_ASIC_DCN30;
1852                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1853                 }
1854                 break;
1855         case IP_VERSION(3, 0, 1):
1856                 dmub_asic = DMUB_ASIC_DCN301;
1857                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1858                 break;
1859         case IP_VERSION(3, 0, 2):
1860                 dmub_asic = DMUB_ASIC_DCN302;
1861                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1862                 break;
1863         case IP_VERSION(3, 0, 3):
1864                 dmub_asic = DMUB_ASIC_DCN303;
1865                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1866                 break;
1867         case IP_VERSION(3, 1, 2):
1868         case IP_VERSION(3, 1, 3):
1869                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1870                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1871                 break;
1872
1873         default:
1874                 /* ASIC doesn't support DMUB. */
1875                 return 0;
1876         }
1877
1878         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1879         if (r) {
1880                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1881                 return 0;
1882         }
1883
1884         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1885         if (r) {
1886                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1887                 return 0;
1888         }
1889
1890         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1891         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1892
1893         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1894                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1895                         AMDGPU_UCODE_ID_DMCUB;
1896                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1897                         adev->dm.dmub_fw;
1898                 adev->firmware.fw_size +=
1899                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1900
1901                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1902                          adev->dm.dmcub_fw_version);
1903         }
1904
1905
1906         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1907         dmub_srv = adev->dm.dmub_srv;
1908
1909         if (!dmub_srv) {
1910                 DRM_ERROR("Failed to allocate DMUB service!\n");
1911                 return -ENOMEM;
1912         }
1913
1914         memset(&create_params, 0, sizeof(create_params));
1915         create_params.user_ctx = adev;
1916         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1917         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1918         create_params.asic = dmub_asic;
1919
1920         /* Create the DMUB service. */
1921         status = dmub_srv_create(dmub_srv, &create_params);
1922         if (status != DMUB_STATUS_OK) {
1923                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1924                 return -EINVAL;
1925         }
1926
1927         /* Calculate the size of all the regions for the DMUB service. */
1928         memset(&region_params, 0, sizeof(region_params));
1929
1930         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1931                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1932         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1933         region_params.vbios_size = adev->bios_size;
1934         region_params.fw_bss_data = region_params.bss_data_size ?
1935                 adev->dm.dmub_fw->data +
1936                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1937                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1938         region_params.fw_inst_const =
1939                 adev->dm.dmub_fw->data +
1940                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1941                 PSP_HEADER_BYTES;
1942
1943         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1944                                            &region_info);
1945
1946         if (status != DMUB_STATUS_OK) {
1947                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1948                 return -EINVAL;
1949         }
1950
1951         /*
1952          * Allocate a framebuffer based on the total size of all the regions.
1953          * TODO: Move this into GART.
1954          */
1955         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1956                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1957                                     &adev->dm.dmub_bo_gpu_addr,
1958                                     &adev->dm.dmub_bo_cpu_addr);
1959         if (r)
1960                 return r;
1961
1962         /* Rebase the regions on the framebuffer address. */
1963         memset(&fb_params, 0, sizeof(fb_params));
1964         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1965         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1966         fb_params.region_info = &region_info;
1967
1968         adev->dm.dmub_fb_info =
1969                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1970         fb_info = adev->dm.dmub_fb_info;
1971
1972         if (!fb_info) {
1973                 DRM_ERROR(
1974                         "Failed to allocate framebuffer info for DMUB service!\n");
1975                 return -ENOMEM;
1976         }
1977
1978         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1979         if (status != DMUB_STATUS_OK) {
1980                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1981                 return -EINVAL;
1982         }
1983
1984         return 0;
1985 }
1986
1987 static int dm_sw_init(void *handle)
1988 {
1989         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1990         int r;
1991
1992         r = dm_dmub_sw_init(adev);
1993         if (r)
1994                 return r;
1995
1996         return load_dmcu_fw(adev);
1997 }
1998
1999 static int dm_sw_fini(void *handle)
2000 {
2001         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2002
2003         kfree(adev->dm.dmub_fb_info);
2004         adev->dm.dmub_fb_info = NULL;
2005
2006         if (adev->dm.dmub_srv) {
2007                 dmub_srv_destroy(adev->dm.dmub_srv);
2008                 adev->dm.dmub_srv = NULL;
2009         }
2010
2011         release_firmware(adev->dm.dmub_fw);
2012         adev->dm.dmub_fw = NULL;
2013
2014         release_firmware(adev->dm.fw_dmcu);
2015         adev->dm.fw_dmcu = NULL;
2016
2017         return 0;
2018 }
2019
2020 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2021 {
2022         struct amdgpu_dm_connector *aconnector;
2023         struct drm_connector *connector;
2024         struct drm_connector_list_iter iter;
2025         int ret = 0;
2026
2027         drm_connector_list_iter_begin(dev, &iter);
2028         drm_for_each_connector_iter(connector, &iter) {
2029                 aconnector = to_amdgpu_dm_connector(connector);
2030                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2031                     aconnector->mst_mgr.aux) {
2032                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2033                                          aconnector,
2034                                          aconnector->base.base.id);
2035
2036                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2037                         if (ret < 0) {
2038                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2039                                 aconnector->dc_link->type =
2040                                         dc_connection_single;
2041                                 break;
2042                         }
2043                 }
2044         }
2045         drm_connector_list_iter_end(&iter);
2046
2047         return ret;
2048 }
2049
2050 static int dm_late_init(void *handle)
2051 {
2052         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2053
2054         struct dmcu_iram_parameters params;
2055         unsigned int linear_lut[16];
2056         int i;
2057         struct dmcu *dmcu = NULL;
2058
2059         dmcu = adev->dm.dc->res_pool->dmcu;
2060
2061         for (i = 0; i < 16; i++)
2062                 linear_lut[i] = 0xFFFF * i / 15;
2063
2064         params.set = 0;
2065         params.backlight_ramping_override = false;
2066         params.backlight_ramping_start = 0xCCCC;
2067         params.backlight_ramping_reduction = 0xCCCCCCCC;
2068         params.backlight_lut_array_size = 16;
2069         params.backlight_lut_array = linear_lut;
2070
2071         /* Min backlight level after ABM reduction,  Don't allow below 1%
2072          * 0xFFFF x 0.01 = 0x28F
2073          */
2074         params.min_abm_backlight = 0x28F;
2075         /* In the case where abm is implemented on dmcub,
2076         * dmcu object will be null.
2077         * ABM 2.4 and up are implemented on dmcub.
2078         */
2079         if (dmcu) {
2080                 if (!dmcu_load_iram(dmcu, params))
2081                         return -EINVAL;
2082         } else if (adev->dm.dc->ctx->dmub_srv) {
2083                 struct dc_link *edp_links[MAX_NUM_EDP];
2084                 int edp_num;
2085
2086                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2087                 for (i = 0; i < edp_num; i++) {
2088                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2089                                 return -EINVAL;
2090                 }
2091         }
2092
2093         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2094 }
2095
2096 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2097 {
2098         struct amdgpu_dm_connector *aconnector;
2099         struct drm_connector *connector;
2100         struct drm_connector_list_iter iter;
2101         struct drm_dp_mst_topology_mgr *mgr;
2102         int ret;
2103         bool need_hotplug = false;
2104
2105         drm_connector_list_iter_begin(dev, &iter);
2106         drm_for_each_connector_iter(connector, &iter) {
2107                 aconnector = to_amdgpu_dm_connector(connector);
2108                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2109                     aconnector->mst_port)
2110                         continue;
2111
2112                 mgr = &aconnector->mst_mgr;
2113
2114                 if (suspend) {
2115                         drm_dp_mst_topology_mgr_suspend(mgr);
2116                 } else {
2117                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2118                         if (ret < 0) {
2119                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2120                                 need_hotplug = true;
2121                         }
2122                 }
2123         }
2124         drm_connector_list_iter_end(&iter);
2125
2126         if (need_hotplug)
2127                 drm_kms_helper_hotplug_event(dev);
2128 }
2129
2130 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2131 {
2132         struct smu_context *smu = &adev->smu;
2133         int ret = 0;
2134
2135         if (!is_support_sw_smu(adev))
2136                 return 0;
2137
2138         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2139          * on window driver dc implementation.
2140          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2141          * should be passed to smu during boot up and resume from s3.
2142          * boot up: dc calculate dcn watermark clock settings within dc_create,
2143          * dcn20_resource_construct
2144          * then call pplib functions below to pass the settings to smu:
2145          * smu_set_watermarks_for_clock_ranges
2146          * smu_set_watermarks_table
2147          * navi10_set_watermarks_table
2148          * smu_write_watermarks_table
2149          *
2150          * For Renoir, clock settings of dcn watermark are also fixed values.
2151          * dc has implemented different flow for window driver:
2152          * dc_hardware_init / dc_set_power_state
2153          * dcn10_init_hw
2154          * notify_wm_ranges
2155          * set_wm_ranges
2156          * -- Linux
2157          * smu_set_watermarks_for_clock_ranges
2158          * renoir_set_watermarks_table
2159          * smu_write_watermarks_table
2160          *
2161          * For Linux,
2162          * dc_hardware_init -> amdgpu_dm_init
2163          * dc_set_power_state --> dm_resume
2164          *
2165          * therefore, this function apply to navi10/12/14 but not Renoir
2166          * *
2167          */
2168         switch (adev->ip_versions[DCE_HWIP][0]) {
2169         case IP_VERSION(2, 0, 2):
2170         case IP_VERSION(2, 0, 0):
2171                 break;
2172         default:
2173                 return 0;
2174         }
2175
2176         ret = smu_write_watermarks_table(smu);
2177         if (ret) {
2178                 DRM_ERROR("Failed to update WMTABLE!\n");
2179                 return ret;
2180         }
2181
2182         return 0;
2183 }
2184
2185 /**
2186  * dm_hw_init() - Initialize DC device
2187  * @handle: The base driver device containing the amdgpu_dm device.
2188  *
2189  * Initialize the &struct amdgpu_display_manager device. This involves calling
2190  * the initializers of each DM component, then populating the struct with them.
2191  *
2192  * Although the function implies hardware initialization, both hardware and
2193  * software are initialized here. Splitting them out to their relevant init
2194  * hooks is a future TODO item.
2195  *
2196  * Some notable things that are initialized here:
2197  *
2198  * - Display Core, both software and hardware
2199  * - DC modules that we need (freesync and color management)
2200  * - DRM software states
2201  * - Interrupt sources and handlers
2202  * - Vblank support
2203  * - Debug FS entries, if enabled
2204  */
2205 static int dm_hw_init(void *handle)
2206 {
2207         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2208         /* Create DAL display manager */
2209         amdgpu_dm_init(adev);
2210         amdgpu_dm_hpd_init(adev);
2211
2212         return 0;
2213 }
2214
2215 /**
2216  * dm_hw_fini() - Teardown DC device
2217  * @handle: The base driver device containing the amdgpu_dm device.
2218  *
2219  * Teardown components within &struct amdgpu_display_manager that require
2220  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2221  * were loaded. Also flush IRQ workqueues and disable them.
2222  */
2223 static int dm_hw_fini(void *handle)
2224 {
2225         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2226
2227         amdgpu_dm_hpd_fini(adev);
2228
2229         amdgpu_dm_irq_fini(adev);
2230         amdgpu_dm_fini(adev);
2231         return 0;
2232 }
2233
2234
2235 static int dm_enable_vblank(struct drm_crtc *crtc);
2236 static void dm_disable_vblank(struct drm_crtc *crtc);
2237
2238 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2239                                  struct dc_state *state, bool enable)
2240 {
2241         enum dc_irq_source irq_source;
2242         struct amdgpu_crtc *acrtc;
2243         int rc = -EBUSY;
2244         int i = 0;
2245
2246         for (i = 0; i < state->stream_count; i++) {
2247                 acrtc = get_crtc_by_otg_inst(
2248                                 adev, state->stream_status[i].primary_otg_inst);
2249
2250                 if (acrtc && state->stream_status[i].plane_count != 0) {
2251                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2252                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2253                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2254                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2255                         if (rc)
2256                                 DRM_WARN("Failed to %s pflip interrupts\n",
2257                                          enable ? "enable" : "disable");
2258
2259                         if (enable) {
2260                                 rc = dm_enable_vblank(&acrtc->base);
2261                                 if (rc)
2262                                         DRM_WARN("Failed to enable vblank interrupts\n");
2263                         } else {
2264                                 dm_disable_vblank(&acrtc->base);
2265                         }
2266
2267                 }
2268         }
2269
2270 }
2271
2272 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2273 {
2274         struct dc_state *context = NULL;
2275         enum dc_status res = DC_ERROR_UNEXPECTED;
2276         int i;
2277         struct dc_stream_state *del_streams[MAX_PIPES];
2278         int del_streams_count = 0;
2279
2280         memset(del_streams, 0, sizeof(del_streams));
2281
2282         context = dc_create_state(dc);
2283         if (context == NULL)
2284                 goto context_alloc_fail;
2285
2286         dc_resource_state_copy_construct_current(dc, context);
2287
2288         /* First remove from context all streams */
2289         for (i = 0; i < context->stream_count; i++) {
2290                 struct dc_stream_state *stream = context->streams[i];
2291
2292                 del_streams[del_streams_count++] = stream;
2293         }
2294
2295         /* Remove all planes for removed streams and then remove the streams */
2296         for (i = 0; i < del_streams_count; i++) {
2297                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2298                         res = DC_FAIL_DETACH_SURFACES;
2299                         goto fail;
2300                 }
2301
2302                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2303                 if (res != DC_OK)
2304                         goto fail;
2305         }
2306
2307
2308         res = dc_validate_global_state(dc, context, false);
2309
2310         if (res != DC_OK) {
2311                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2312                 goto fail;
2313         }
2314
2315         res = dc_commit_state(dc, context);
2316
2317 fail:
2318         dc_release_state(context);
2319
2320 context_alloc_fail:
2321         return res;
2322 }
2323
2324 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2325 {
2326         int i;
2327
2328         if (dm->hpd_rx_offload_wq) {
2329                 for (i = 0; i < dm->dc->caps.max_links; i++)
2330                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2331         }
2332 }
2333
2334 static int dm_suspend(void *handle)
2335 {
2336         struct amdgpu_device *adev = handle;
2337         struct amdgpu_display_manager *dm = &adev->dm;
2338         int ret = 0;
2339
2340         if (amdgpu_in_reset(adev)) {
2341                 mutex_lock(&dm->dc_lock);
2342
2343 #if defined(CONFIG_DRM_AMD_DC_DCN)
2344                 dc_allow_idle_optimizations(adev->dm.dc, false);
2345 #endif
2346
2347                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2348
2349                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2350
2351                 amdgpu_dm_commit_zero_streams(dm->dc);
2352
2353                 amdgpu_dm_irq_suspend(adev);
2354
2355                 hpd_rx_irq_work_suspend(dm);
2356
2357                 return ret;
2358         }
2359
2360         WARN_ON(adev->dm.cached_state);
2361         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2362
2363         s3_handle_mst(adev_to_drm(adev), true);
2364
2365         amdgpu_dm_irq_suspend(adev);
2366
2367         hpd_rx_irq_work_suspend(dm);
2368
2369         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2370
2371         return 0;
2372 }
2373
2374 static struct amdgpu_dm_connector *
2375 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2376                                              struct drm_crtc *crtc)
2377 {
2378         uint32_t i;
2379         struct drm_connector_state *new_con_state;
2380         struct drm_connector *connector;
2381         struct drm_crtc *crtc_from_state;
2382
2383         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2384                 crtc_from_state = new_con_state->crtc;
2385
2386                 if (crtc_from_state == crtc)
2387                         return to_amdgpu_dm_connector(connector);
2388         }
2389
2390         return NULL;
2391 }
2392
2393 static void emulated_link_detect(struct dc_link *link)
2394 {
2395         struct dc_sink_init_data sink_init_data = { 0 };
2396         struct display_sink_capability sink_caps = { 0 };
2397         enum dc_edid_status edid_status;
2398         struct dc_context *dc_ctx = link->ctx;
2399         struct dc_sink *sink = NULL;
2400         struct dc_sink *prev_sink = NULL;
2401
2402         link->type = dc_connection_none;
2403         prev_sink = link->local_sink;
2404
2405         if (prev_sink)
2406                 dc_sink_release(prev_sink);
2407
2408         switch (link->connector_signal) {
2409         case SIGNAL_TYPE_HDMI_TYPE_A: {
2410                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2411                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2412                 break;
2413         }
2414
2415         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2416                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2417                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2418                 break;
2419         }
2420
2421         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2422                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2423                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2424                 break;
2425         }
2426
2427         case SIGNAL_TYPE_LVDS: {
2428                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2429                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2430                 break;
2431         }
2432
2433         case SIGNAL_TYPE_EDP: {
2434                 sink_caps.transaction_type =
2435                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2436                 sink_caps.signal = SIGNAL_TYPE_EDP;
2437                 break;
2438         }
2439
2440         case SIGNAL_TYPE_DISPLAY_PORT: {
2441                 sink_caps.transaction_type =
2442                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2443                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2444                 break;
2445         }
2446
2447         default:
2448                 DC_ERROR("Invalid connector type! signal:%d\n",
2449                         link->connector_signal);
2450                 return;
2451         }
2452
2453         sink_init_data.link = link;
2454         sink_init_data.sink_signal = sink_caps.signal;
2455
2456         sink = dc_sink_create(&sink_init_data);
2457         if (!sink) {
2458                 DC_ERROR("Failed to create sink!\n");
2459                 return;
2460         }
2461
2462         /* dc_sink_create returns a new reference */
2463         link->local_sink = sink;
2464
2465         edid_status = dm_helpers_read_local_edid(
2466                         link->ctx,
2467                         link,
2468                         sink);
2469
2470         if (edid_status != EDID_OK)
2471                 DC_ERROR("Failed to read EDID");
2472
2473 }
2474
2475 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2476                                      struct amdgpu_display_manager *dm)
2477 {
2478         struct {
2479                 struct dc_surface_update surface_updates[MAX_SURFACES];
2480                 struct dc_plane_info plane_infos[MAX_SURFACES];
2481                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2482                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2483                 struct dc_stream_update stream_update;
2484         } * bundle;
2485         int k, m;
2486
2487         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2488
2489         if (!bundle) {
2490                 dm_error("Failed to allocate update bundle\n");
2491                 goto cleanup;
2492         }
2493
2494         for (k = 0; k < dc_state->stream_count; k++) {
2495                 bundle->stream_update.stream = dc_state->streams[k];
2496
2497                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2498                         bundle->surface_updates[m].surface =
2499                                 dc_state->stream_status->plane_states[m];
2500                         bundle->surface_updates[m].surface->force_full_update =
2501                                 true;
2502                 }
2503                 dc_commit_updates_for_stream(
2504                         dm->dc, bundle->surface_updates,
2505                         dc_state->stream_status->plane_count,
2506                         dc_state->streams[k], &bundle->stream_update, dc_state);
2507         }
2508
2509 cleanup:
2510         kfree(bundle);
2511
2512         return;
2513 }
2514
2515 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2516 {
2517         struct dc_stream_state *stream_state;
2518         struct amdgpu_dm_connector *aconnector = link->priv;
2519         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2520         struct dc_stream_update stream_update;
2521         bool dpms_off = true;
2522
2523         memset(&stream_update, 0, sizeof(stream_update));
2524         stream_update.dpms_off = &dpms_off;
2525
2526         mutex_lock(&adev->dm.dc_lock);
2527         stream_state = dc_stream_find_from_link(link);
2528
2529         if (stream_state == NULL) {
2530                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2531                 mutex_unlock(&adev->dm.dc_lock);
2532                 return;
2533         }
2534
2535         stream_update.stream = stream_state;
2536         acrtc_state->force_dpms_off = true;
2537         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2538                                      stream_state, &stream_update,
2539                                      stream_state->ctx->dc->current_state);
2540         mutex_unlock(&adev->dm.dc_lock);
2541 }
2542
2543 static int dm_resume(void *handle)
2544 {
2545         struct amdgpu_device *adev = handle;
2546         struct drm_device *ddev = adev_to_drm(adev);
2547         struct amdgpu_display_manager *dm = &adev->dm;
2548         struct amdgpu_dm_connector *aconnector;
2549         struct drm_connector *connector;
2550         struct drm_connector_list_iter iter;
2551         struct drm_crtc *crtc;
2552         struct drm_crtc_state *new_crtc_state;
2553         struct dm_crtc_state *dm_new_crtc_state;
2554         struct drm_plane *plane;
2555         struct drm_plane_state *new_plane_state;
2556         struct dm_plane_state *dm_new_plane_state;
2557         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2558         enum dc_connection_type new_connection_type = dc_connection_none;
2559         struct dc_state *dc_state;
2560         int i, r, j;
2561
2562         if (amdgpu_in_reset(adev)) {
2563                 dc_state = dm->cached_dc_state;
2564
2565                 /*
2566                  * The dc->current_state is backed up into dm->cached_dc_state
2567                  * before we commit 0 streams.
2568                  *
2569                  * DC will clear link encoder assignments on the real state
2570                  * but the changes won't propagate over to the copy we made
2571                  * before the 0 streams commit.
2572                  *
2573                  * DC expects that link encoder assignments are *not* valid
2574                  * when committing a state, so as a workaround it needs to be
2575                  * cleared here.
2576                  */
2577                 link_enc_cfg_init(dm->dc, dc_state);
2578
2579                 amdgpu_dm_outbox_init(adev);
2580
2581                 r = dm_dmub_hw_init(adev);
2582                 if (r)
2583                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2584
2585                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2586                 dc_resume(dm->dc);
2587
2588                 amdgpu_dm_irq_resume_early(adev);
2589
2590                 for (i = 0; i < dc_state->stream_count; i++) {
2591                         dc_state->streams[i]->mode_changed = true;
2592                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2593                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2594                                         = 0xffffffff;
2595                         }
2596                 }
2597 #if defined(CONFIG_DRM_AMD_DC_DCN)
2598                 /*
2599                  * Resource allocation happens for link encoders for newer ASIC in
2600                  * dc_validate_global_state, so we need to revalidate it.
2601                  *
2602                  * This shouldn't fail (it passed once before), so warn if it does.
2603                  */
2604                 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2605 #endif
2606
2607                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2608
2609                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2610
2611                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2612
2613                 dc_release_state(dm->cached_dc_state);
2614                 dm->cached_dc_state = NULL;
2615
2616                 amdgpu_dm_irq_resume_late(adev);
2617
2618                 mutex_unlock(&dm->dc_lock);
2619
2620                 return 0;
2621         }
2622         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2623         dc_release_state(dm_state->context);
2624         dm_state->context = dc_create_state(dm->dc);
2625         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2626         dc_resource_state_construct(dm->dc, dm_state->context);
2627
2628         /* Before powering on DC we need to re-initialize DMUB. */
2629         r = dm_dmub_hw_init(adev);
2630         if (r)
2631                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2632
2633         /* power on hardware */
2634         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2635
2636         /* program HPD filter */
2637         dc_resume(dm->dc);
2638
2639         /*
2640          * early enable HPD Rx IRQ, should be done before set mode as short
2641          * pulse interrupts are used for MST
2642          */
2643         amdgpu_dm_irq_resume_early(adev);
2644
2645         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2646         s3_handle_mst(ddev, false);
2647
2648         /* Do detection*/
2649         drm_connector_list_iter_begin(ddev, &iter);
2650         drm_for_each_connector_iter(connector, &iter) {
2651                 aconnector = to_amdgpu_dm_connector(connector);
2652
2653                 /*
2654                  * this is the case when traversing through already created
2655                  * MST connectors, should be skipped
2656                  */
2657                 if (aconnector->mst_port)
2658                         continue;
2659
2660                 mutex_lock(&aconnector->hpd_lock);
2661                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2662                         DRM_ERROR("KMS: Failed to detect connector\n");
2663
2664                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2665                         emulated_link_detect(aconnector->dc_link);
2666                 else
2667                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2668
2669                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2670                         aconnector->fake_enable = false;
2671
2672                 if (aconnector->dc_sink)
2673                         dc_sink_release(aconnector->dc_sink);
2674                 aconnector->dc_sink = NULL;
2675                 amdgpu_dm_update_connector_after_detect(aconnector);
2676                 mutex_unlock(&aconnector->hpd_lock);
2677         }
2678         drm_connector_list_iter_end(&iter);
2679
2680         /* Force mode set in atomic commit */
2681         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2682                 new_crtc_state->active_changed = true;
2683
2684         /*
2685          * atomic_check is expected to create the dc states. We need to release
2686          * them here, since they were duplicated as part of the suspend
2687          * procedure.
2688          */
2689         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2690                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2691                 if (dm_new_crtc_state->stream) {
2692                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2693                         dc_stream_release(dm_new_crtc_state->stream);
2694                         dm_new_crtc_state->stream = NULL;
2695                 }
2696         }
2697
2698         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2699                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2700                 if (dm_new_plane_state->dc_state) {
2701                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2702                         dc_plane_state_release(dm_new_plane_state->dc_state);
2703                         dm_new_plane_state->dc_state = NULL;
2704                 }
2705         }
2706
2707         drm_atomic_helper_resume(ddev, dm->cached_state);
2708
2709         dm->cached_state = NULL;
2710
2711         amdgpu_dm_irq_resume_late(adev);
2712
2713         amdgpu_dm_smu_write_watermarks_table(adev);
2714
2715         return 0;
2716 }
2717
2718 /**
2719  * DOC: DM Lifecycle
2720  *
2721  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2722  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2723  * the base driver's device list to be initialized and torn down accordingly.
2724  *
2725  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2726  */
2727
2728 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2729         .name = "dm",
2730         .early_init = dm_early_init,
2731         .late_init = dm_late_init,
2732         .sw_init = dm_sw_init,
2733         .sw_fini = dm_sw_fini,
2734         .early_fini = amdgpu_dm_early_fini,
2735         .hw_init = dm_hw_init,
2736         .hw_fini = dm_hw_fini,
2737         .suspend = dm_suspend,
2738         .resume = dm_resume,
2739         .is_idle = dm_is_idle,
2740         .wait_for_idle = dm_wait_for_idle,
2741         .check_soft_reset = dm_check_soft_reset,
2742         .soft_reset = dm_soft_reset,
2743         .set_clockgating_state = dm_set_clockgating_state,
2744         .set_powergating_state = dm_set_powergating_state,
2745 };
2746
2747 const struct amdgpu_ip_block_version dm_ip_block =
2748 {
2749         .type = AMD_IP_BLOCK_TYPE_DCE,
2750         .major = 1,
2751         .minor = 0,
2752         .rev = 0,
2753         .funcs = &amdgpu_dm_funcs,
2754 };
2755
2756
2757 /**
2758  * DOC: atomic
2759  *
2760  * *WIP*
2761  */
2762
2763 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2764         .fb_create = amdgpu_display_user_framebuffer_create,
2765         .get_format_info = amd_get_format_info,
2766         .output_poll_changed = drm_fb_helper_output_poll_changed,
2767         .atomic_check = amdgpu_dm_atomic_check,
2768         .atomic_commit = drm_atomic_helper_commit,
2769 };
2770
2771 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2772         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2773 };
2774
2775 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2776 {
2777         u32 max_cll, min_cll, max, min, q, r;
2778         struct amdgpu_dm_backlight_caps *caps;
2779         struct amdgpu_display_manager *dm;
2780         struct drm_connector *conn_base;
2781         struct amdgpu_device *adev;
2782         struct dc_link *link = NULL;
2783         static const u8 pre_computed_values[] = {
2784                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2785                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2786         int i;
2787
2788         if (!aconnector || !aconnector->dc_link)
2789                 return;
2790
2791         link = aconnector->dc_link;
2792         if (link->connector_signal != SIGNAL_TYPE_EDP)
2793                 return;
2794
2795         conn_base = &aconnector->base;
2796         adev = drm_to_adev(conn_base->dev);
2797         dm = &adev->dm;
2798         for (i = 0; i < dm->num_of_edps; i++) {
2799                 if (link == dm->backlight_link[i])
2800                         break;
2801         }
2802         if (i >= dm->num_of_edps)
2803                 return;
2804         caps = &dm->backlight_caps[i];
2805         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2806         caps->aux_support = false;
2807         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2808         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2809
2810         if (caps->ext_caps->bits.oled == 1 /*||
2811             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2812             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2813                 caps->aux_support = true;
2814
2815         if (amdgpu_backlight == 0)
2816                 caps->aux_support = false;
2817         else if (amdgpu_backlight == 1)
2818                 caps->aux_support = true;
2819
2820         /* From the specification (CTA-861-G), for calculating the maximum
2821          * luminance we need to use:
2822          *      Luminance = 50*2**(CV/32)
2823          * Where CV is a one-byte value.
2824          * For calculating this expression we may need float point precision;
2825          * to avoid this complexity level, we take advantage that CV is divided
2826          * by a constant. From the Euclids division algorithm, we know that CV
2827          * can be written as: CV = 32*q + r. Next, we replace CV in the
2828          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2829          * need to pre-compute the value of r/32. For pre-computing the values
2830          * We just used the following Ruby line:
2831          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2832          * The results of the above expressions can be verified at
2833          * pre_computed_values.
2834          */
2835         q = max_cll >> 5;
2836         r = max_cll % 32;
2837         max = (1 << q) * pre_computed_values[r];
2838
2839         // min luminance: maxLum * (CV/255)^2 / 100
2840         q = DIV_ROUND_CLOSEST(min_cll, 255);
2841         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2842
2843         caps->aux_max_input_signal = max;
2844         caps->aux_min_input_signal = min;
2845 }
2846
2847 void amdgpu_dm_update_connector_after_detect(
2848                 struct amdgpu_dm_connector *aconnector)
2849 {
2850         struct drm_connector *connector = &aconnector->base;
2851         struct drm_device *dev = connector->dev;
2852         struct dc_sink *sink;
2853
2854         /* MST handled by drm_mst framework */
2855         if (aconnector->mst_mgr.mst_state == true)
2856                 return;
2857
2858         sink = aconnector->dc_link->local_sink;
2859         if (sink)
2860                 dc_sink_retain(sink);
2861
2862         /*
2863          * Edid mgmt connector gets first update only in mode_valid hook and then
2864          * the connector sink is set to either fake or physical sink depends on link status.
2865          * Skip if already done during boot.
2866          */
2867         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2868                         && aconnector->dc_em_sink) {
2869
2870                 /*
2871                  * For S3 resume with headless use eml_sink to fake stream
2872                  * because on resume connector->sink is set to NULL
2873                  */
2874                 mutex_lock(&dev->mode_config.mutex);
2875
2876                 if (sink) {
2877                         if (aconnector->dc_sink) {
2878                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2879                                 /*
2880                                  * retain and release below are used to
2881                                  * bump up refcount for sink because the link doesn't point
2882                                  * to it anymore after disconnect, so on next crtc to connector
2883                                  * reshuffle by UMD we will get into unwanted dc_sink release
2884                                  */
2885                                 dc_sink_release(aconnector->dc_sink);
2886                         }
2887                         aconnector->dc_sink = sink;
2888                         dc_sink_retain(aconnector->dc_sink);
2889                         amdgpu_dm_update_freesync_caps(connector,
2890                                         aconnector->edid);
2891                 } else {
2892                         amdgpu_dm_update_freesync_caps(connector, NULL);
2893                         if (!aconnector->dc_sink) {
2894                                 aconnector->dc_sink = aconnector->dc_em_sink;
2895                                 dc_sink_retain(aconnector->dc_sink);
2896                         }
2897                 }
2898
2899                 mutex_unlock(&dev->mode_config.mutex);
2900
2901                 if (sink)
2902                         dc_sink_release(sink);
2903                 return;
2904         }
2905
2906         /*
2907          * TODO: temporary guard to look for proper fix
2908          * if this sink is MST sink, we should not do anything
2909          */
2910         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2911                 dc_sink_release(sink);
2912                 return;
2913         }
2914
2915         if (aconnector->dc_sink == sink) {
2916                 /*
2917                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2918                  * Do nothing!!
2919                  */
2920                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2921                                 aconnector->connector_id);
2922                 if (sink)
2923                         dc_sink_release(sink);
2924                 return;
2925         }
2926
2927         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2928                 aconnector->connector_id, aconnector->dc_sink, sink);
2929
2930         mutex_lock(&dev->mode_config.mutex);
2931
2932         /*
2933          * 1. Update status of the drm connector
2934          * 2. Send an event and let userspace tell us what to do
2935          */
2936         if (sink) {
2937                 /*
2938                  * TODO: check if we still need the S3 mode update workaround.
2939                  * If yes, put it here.
2940                  */
2941                 if (aconnector->dc_sink) {
2942                         amdgpu_dm_update_freesync_caps(connector, NULL);
2943                         dc_sink_release(aconnector->dc_sink);
2944                 }
2945
2946                 aconnector->dc_sink = sink;
2947                 dc_sink_retain(aconnector->dc_sink);
2948                 if (sink->dc_edid.length == 0) {
2949                         aconnector->edid = NULL;
2950                         if (aconnector->dc_link->aux_mode) {
2951                                 drm_dp_cec_unset_edid(
2952                                         &aconnector->dm_dp_aux.aux);
2953                         }
2954                 } else {
2955                         aconnector->edid =
2956                                 (struct edid *)sink->dc_edid.raw_edid;
2957
2958                         drm_connector_update_edid_property(connector,
2959                                                            aconnector->edid);
2960                         if (aconnector->dc_link->aux_mode)
2961                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2962                                                     aconnector->edid);
2963                 }
2964
2965                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2966                 update_connector_ext_caps(aconnector);
2967         } else {
2968                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2969                 amdgpu_dm_update_freesync_caps(connector, NULL);
2970                 drm_connector_update_edid_property(connector, NULL);
2971                 aconnector->num_modes = 0;
2972                 dc_sink_release(aconnector->dc_sink);
2973                 aconnector->dc_sink = NULL;
2974                 aconnector->edid = NULL;
2975 #ifdef CONFIG_DRM_AMD_DC_HDCP
2976                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2977                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2978                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2979 #endif
2980         }
2981
2982         mutex_unlock(&dev->mode_config.mutex);
2983
2984         update_subconnector_property(aconnector);
2985
2986         if (sink)
2987                 dc_sink_release(sink);
2988 }
2989
2990 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2991 {
2992         struct drm_connector *connector = &aconnector->base;
2993         struct drm_device *dev = connector->dev;
2994         enum dc_connection_type new_connection_type = dc_connection_none;
2995         struct amdgpu_device *adev = drm_to_adev(dev);
2996         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2997         struct dm_crtc_state *dm_crtc_state = NULL;
2998
2999         if (adev->dm.disable_hpd_irq)
3000                 return;
3001
3002         if (dm_con_state->base.state && dm_con_state->base.crtc)
3003                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3004                                         dm_con_state->base.state,
3005                                         dm_con_state->base.crtc));
3006         /*
3007          * In case of failure or MST no need to update connector status or notify the OS
3008          * since (for MST case) MST does this in its own context.
3009          */
3010         mutex_lock(&aconnector->hpd_lock);
3011
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013         if (adev->dm.hdcp_workqueue) {
3014                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3015                 dm_con_state->update_hdcp = true;
3016         }
3017 #endif
3018         if (aconnector->fake_enable)
3019                 aconnector->fake_enable = false;
3020
3021         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3022                 DRM_ERROR("KMS: Failed to detect connector\n");
3023
3024         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3025                 emulated_link_detect(aconnector->dc_link);
3026
3027                 drm_modeset_lock_all(dev);
3028                 dm_restore_drm_connector_state(dev, connector);
3029                 drm_modeset_unlock_all(dev);
3030
3031                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3032                         drm_kms_helper_hotplug_event(dev);
3033
3034         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3035                 if (new_connection_type == dc_connection_none &&
3036                     aconnector->dc_link->type == dc_connection_none &&
3037                     dm_crtc_state)
3038                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3039
3040                 amdgpu_dm_update_connector_after_detect(aconnector);
3041
3042                 drm_modeset_lock_all(dev);
3043                 dm_restore_drm_connector_state(dev, connector);
3044                 drm_modeset_unlock_all(dev);
3045
3046                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3047                         drm_kms_helper_hotplug_event(dev);
3048         }
3049         mutex_unlock(&aconnector->hpd_lock);
3050
3051 }
3052
3053 static void handle_hpd_irq(void *param)
3054 {
3055         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3056
3057         handle_hpd_irq_helper(aconnector);
3058
3059 }
3060
3061 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3062 {
3063         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3064         uint8_t dret;
3065         bool new_irq_handled = false;
3066         int dpcd_addr;
3067         int dpcd_bytes_to_read;
3068
3069         const int max_process_count = 30;
3070         int process_count = 0;
3071
3072         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3073
3074         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3075                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3076                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3077                 dpcd_addr = DP_SINK_COUNT;
3078         } else {
3079                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3080                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3081                 dpcd_addr = DP_SINK_COUNT_ESI;
3082         }
3083
3084         dret = drm_dp_dpcd_read(
3085                 &aconnector->dm_dp_aux.aux,
3086                 dpcd_addr,
3087                 esi,
3088                 dpcd_bytes_to_read);
3089
3090         while (dret == dpcd_bytes_to_read &&
3091                 process_count < max_process_count) {
3092                 uint8_t retry;
3093                 dret = 0;
3094
3095                 process_count++;
3096
3097                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3098                 /* handle HPD short pulse irq */
3099                 if (aconnector->mst_mgr.mst_state)
3100                         drm_dp_mst_hpd_irq(
3101                                 &aconnector->mst_mgr,
3102                                 esi,
3103                                 &new_irq_handled);
3104
3105                 if (new_irq_handled) {
3106                         /* ACK at DPCD to notify down stream */
3107                         const int ack_dpcd_bytes_to_write =
3108                                 dpcd_bytes_to_read - 1;
3109
3110                         for (retry = 0; retry < 3; retry++) {
3111                                 uint8_t wret;
3112
3113                                 wret = drm_dp_dpcd_write(
3114                                         &aconnector->dm_dp_aux.aux,
3115                                         dpcd_addr + 1,
3116                                         &esi[1],
3117                                         ack_dpcd_bytes_to_write);
3118                                 if (wret == ack_dpcd_bytes_to_write)
3119                                         break;
3120                         }
3121
3122                         /* check if there is new irq to be handled */
3123                         dret = drm_dp_dpcd_read(
3124                                 &aconnector->dm_dp_aux.aux,
3125                                 dpcd_addr,
3126                                 esi,
3127                                 dpcd_bytes_to_read);
3128
3129                         new_irq_handled = false;
3130                 } else {
3131                         break;
3132                 }
3133         }
3134
3135         if (process_count == max_process_count)
3136                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3137 }
3138
3139 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3140                                                         union hpd_irq_data hpd_irq_data)
3141 {
3142         struct hpd_rx_irq_offload_work *offload_work =
3143                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3144
3145         if (!offload_work) {
3146                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3147                 return;
3148         }
3149
3150         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3151         offload_work->data = hpd_irq_data;
3152         offload_work->offload_wq = offload_wq;
3153
3154         queue_work(offload_wq->wq, &offload_work->work);
3155         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3156 }
3157
3158 static void handle_hpd_rx_irq(void *param)
3159 {
3160         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3161         struct drm_connector *connector = &aconnector->base;
3162         struct drm_device *dev = connector->dev;
3163         struct dc_link *dc_link = aconnector->dc_link;
3164         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3165         bool result = false;
3166         enum dc_connection_type new_connection_type = dc_connection_none;
3167         struct amdgpu_device *adev = drm_to_adev(dev);
3168         union hpd_irq_data hpd_irq_data;
3169         bool link_loss = false;
3170         bool has_left_work = false;
3171         int idx = aconnector->base.index;
3172         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3173
3174         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3175
3176         if (adev->dm.disable_hpd_irq)
3177                 return;
3178
3179         /*
3180          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3181          * conflict, after implement i2c helper, this mutex should be
3182          * retired.
3183          */
3184         mutex_lock(&aconnector->hpd_lock);
3185
3186         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3187                                                 &link_loss, true, &has_left_work);
3188
3189         if (!has_left_work)
3190                 goto out;
3191
3192         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3193                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3194                 goto out;
3195         }
3196
3197         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3198                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3199                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3200                         dm_handle_mst_sideband_msg(aconnector);
3201                         goto out;
3202                 }
3203
3204                 if (link_loss) {
3205                         bool skip = false;
3206
3207                         spin_lock(&offload_wq->offload_lock);
3208                         skip = offload_wq->is_handling_link_loss;
3209
3210                         if (!skip)
3211                                 offload_wq->is_handling_link_loss = true;
3212
3213                         spin_unlock(&offload_wq->offload_lock);
3214
3215                         if (!skip)
3216                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3217
3218                         goto out;
3219                 }
3220         }
3221
3222 out:
3223         if (result && !is_mst_root_connector) {
3224                 /* Downstream Port status changed. */
3225                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3226                         DRM_ERROR("KMS: Failed to detect connector\n");
3227
3228                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3229                         emulated_link_detect(dc_link);
3230
3231                         if (aconnector->fake_enable)
3232                                 aconnector->fake_enable = false;
3233
3234                         amdgpu_dm_update_connector_after_detect(aconnector);
3235
3236
3237                         drm_modeset_lock_all(dev);
3238                         dm_restore_drm_connector_state(dev, connector);
3239                         drm_modeset_unlock_all(dev);
3240
3241                         drm_kms_helper_hotplug_event(dev);
3242                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3243
3244                         if (aconnector->fake_enable)
3245                                 aconnector->fake_enable = false;
3246
3247                         amdgpu_dm_update_connector_after_detect(aconnector);
3248
3249
3250                         drm_modeset_lock_all(dev);
3251                         dm_restore_drm_connector_state(dev, connector);
3252                         drm_modeset_unlock_all(dev);
3253
3254                         drm_kms_helper_hotplug_event(dev);
3255                 }
3256         }
3257 #ifdef CONFIG_DRM_AMD_DC_HDCP
3258         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3259                 if (adev->dm.hdcp_workqueue)
3260                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3261         }
3262 #endif
3263
3264         if (dc_link->type != dc_connection_mst_branch)
3265                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3266
3267         mutex_unlock(&aconnector->hpd_lock);
3268 }
3269
3270 static void register_hpd_handlers(struct amdgpu_device *adev)
3271 {
3272         struct drm_device *dev = adev_to_drm(adev);
3273         struct drm_connector *connector;
3274         struct amdgpu_dm_connector *aconnector;
3275         const struct dc_link *dc_link;
3276         struct dc_interrupt_params int_params = {0};
3277
3278         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3279         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3280
3281         list_for_each_entry(connector,
3282                         &dev->mode_config.connector_list, head) {
3283
3284                 aconnector = to_amdgpu_dm_connector(connector);
3285                 dc_link = aconnector->dc_link;
3286
3287                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3288                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3289                         int_params.irq_source = dc_link->irq_source_hpd;
3290
3291                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3292                                         handle_hpd_irq,
3293                                         (void *) aconnector);
3294                 }
3295
3296                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3297
3298                         /* Also register for DP short pulse (hpd_rx). */
3299                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3300                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3301
3302                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3303                                         handle_hpd_rx_irq,
3304                                         (void *) aconnector);
3305
3306                         if (adev->dm.hpd_rx_offload_wq)
3307                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3308                                         aconnector;
3309                 }
3310         }
3311 }
3312
3313 #if defined(CONFIG_DRM_AMD_DC_SI)
3314 /* Register IRQ sources and initialize IRQ callbacks */
3315 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3316 {
3317         struct dc *dc = adev->dm.dc;
3318         struct common_irq_params *c_irq_params;
3319         struct dc_interrupt_params int_params = {0};
3320         int r;
3321         int i;
3322         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3323
3324         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3325         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3326
3327         /*
3328          * Actions of amdgpu_irq_add_id():
3329          * 1. Register a set() function with base driver.
3330          *    Base driver will call set() function to enable/disable an
3331          *    interrupt in DC hardware.
3332          * 2. Register amdgpu_dm_irq_handler().
3333          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3334          *    coming from DC hardware.
3335          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3336          *    for acknowledging and handling. */
3337
3338         /* Use VBLANK interrupt */
3339         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3340                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3341                 if (r) {
3342                         DRM_ERROR("Failed to add crtc irq id!\n");
3343                         return r;
3344                 }
3345
3346                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3347                 int_params.irq_source =
3348                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3349
3350                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3351
3352                 c_irq_params->adev = adev;
3353                 c_irq_params->irq_src = int_params.irq_source;
3354
3355                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3356                                 dm_crtc_high_irq, c_irq_params);
3357         }
3358
3359         /* Use GRPH_PFLIP interrupt */
3360         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3361                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3362                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3363                 if (r) {
3364                         DRM_ERROR("Failed to add page flip irq id!\n");
3365                         return r;
3366                 }
3367
3368                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3369                 int_params.irq_source =
3370                         dc_interrupt_to_irq_source(dc, i, 0);
3371
3372                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3373
3374                 c_irq_params->adev = adev;
3375                 c_irq_params->irq_src = int_params.irq_source;
3376
3377                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3378                                 dm_pflip_high_irq, c_irq_params);
3379
3380         }
3381
3382         /* HPD */
3383         r = amdgpu_irq_add_id(adev, client_id,
3384                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3385         if (r) {
3386                 DRM_ERROR("Failed to add hpd irq id!\n");
3387                 return r;
3388         }
3389
3390         register_hpd_handlers(adev);
3391
3392         return 0;
3393 }
3394 #endif
3395
3396 /* Register IRQ sources and initialize IRQ callbacks */
3397 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3398 {
3399         struct dc *dc = adev->dm.dc;
3400         struct common_irq_params *c_irq_params;
3401         struct dc_interrupt_params int_params = {0};
3402         int r;
3403         int i;
3404         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3405
3406         if (adev->family >= AMDGPU_FAMILY_AI)
3407                 client_id = SOC15_IH_CLIENTID_DCE;
3408
3409         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3410         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3411
3412         /*
3413          * Actions of amdgpu_irq_add_id():
3414          * 1. Register a set() function with base driver.
3415          *    Base driver will call set() function to enable/disable an
3416          *    interrupt in DC hardware.
3417          * 2. Register amdgpu_dm_irq_handler().
3418          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3419          *    coming from DC hardware.
3420          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3421          *    for acknowledging and handling. */
3422
3423         /* Use VBLANK interrupt */
3424         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3425                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3426                 if (r) {
3427                         DRM_ERROR("Failed to add crtc irq id!\n");
3428                         return r;
3429                 }
3430
3431                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3432                 int_params.irq_source =
3433                         dc_interrupt_to_irq_source(dc, i, 0);
3434
3435                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3436
3437                 c_irq_params->adev = adev;
3438                 c_irq_params->irq_src = int_params.irq_source;
3439
3440                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3441                                 dm_crtc_high_irq, c_irq_params);
3442         }
3443
3444         /* Use VUPDATE interrupt */
3445         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3446                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3447                 if (r) {
3448                         DRM_ERROR("Failed to add vupdate irq id!\n");
3449                         return r;
3450                 }
3451
3452                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3453                 int_params.irq_source =
3454                         dc_interrupt_to_irq_source(dc, i, 0);
3455
3456                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3457
3458                 c_irq_params->adev = adev;
3459                 c_irq_params->irq_src = int_params.irq_source;
3460
3461                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3462                                 dm_vupdate_high_irq, c_irq_params);
3463         }
3464
3465         /* Use GRPH_PFLIP interrupt */
3466         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3467                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3468                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3469                 if (r) {
3470                         DRM_ERROR("Failed to add page flip irq id!\n");
3471                         return r;
3472                 }
3473
3474                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3475                 int_params.irq_source =
3476                         dc_interrupt_to_irq_source(dc, i, 0);
3477
3478                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3479
3480                 c_irq_params->adev = adev;
3481                 c_irq_params->irq_src = int_params.irq_source;
3482
3483                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3484                                 dm_pflip_high_irq, c_irq_params);
3485
3486         }
3487
3488         /* HPD */
3489         r = amdgpu_irq_add_id(adev, client_id,
3490                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3491         if (r) {
3492                 DRM_ERROR("Failed to add hpd irq id!\n");
3493                 return r;
3494         }
3495
3496         register_hpd_handlers(adev);
3497
3498         return 0;
3499 }
3500
3501 #if defined(CONFIG_DRM_AMD_DC_DCN)
3502 /* Register IRQ sources and initialize IRQ callbacks */
3503 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3504 {
3505         struct dc *dc = adev->dm.dc;
3506         struct common_irq_params *c_irq_params;
3507         struct dc_interrupt_params int_params = {0};
3508         int r;
3509         int i;
3510 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3511         static const unsigned int vrtl_int_srcid[] = {
3512                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3513                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3514                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3515                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3516                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3517                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3518         };
3519 #endif
3520
3521         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3522         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3523
3524         /*
3525          * Actions of amdgpu_irq_add_id():
3526          * 1. Register a set() function with base driver.
3527          *    Base driver will call set() function to enable/disable an
3528          *    interrupt in DC hardware.
3529          * 2. Register amdgpu_dm_irq_handler().
3530          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3531          *    coming from DC hardware.
3532          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3533          *    for acknowledging and handling.
3534          */
3535
3536         /* Use VSTARTUP interrupt */
3537         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3538                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3539                         i++) {
3540                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3541
3542                 if (r) {
3543                         DRM_ERROR("Failed to add crtc irq id!\n");
3544                         return r;
3545                 }
3546
3547                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3548                 int_params.irq_source =
3549                         dc_interrupt_to_irq_source(dc, i, 0);
3550
3551                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3552
3553                 c_irq_params->adev = adev;
3554                 c_irq_params->irq_src = int_params.irq_source;
3555
3556                 amdgpu_dm_irq_register_interrupt(
3557                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3558         }
3559
3560         /* Use otg vertical line interrupt */
3561 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3562         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3563                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3564                                 vrtl_int_srcid[i], &adev->vline0_irq);
3565
3566                 if (r) {
3567                         DRM_ERROR("Failed to add vline0 irq id!\n");
3568                         return r;
3569                 }
3570
3571                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3572                 int_params.irq_source =
3573                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3574
3575                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3576                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3577                         break;
3578                 }
3579
3580                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3581                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3582
3583                 c_irq_params->adev = adev;
3584                 c_irq_params->irq_src = int_params.irq_source;
3585
3586                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3587                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3588         }
3589 #endif
3590
3591         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3592          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3593          * to trigger at end of each vblank, regardless of state of the lock,
3594          * matching DCE behaviour.
3595          */
3596         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3597              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3598              i++) {
3599                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3600
3601                 if (r) {
3602                         DRM_ERROR("Failed to add vupdate irq id!\n");
3603                         return r;
3604                 }
3605
3606                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3607                 int_params.irq_source =
3608                         dc_interrupt_to_irq_source(dc, i, 0);
3609
3610                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3611
3612                 c_irq_params->adev = adev;
3613                 c_irq_params->irq_src = int_params.irq_source;
3614
3615                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3616                                 dm_vupdate_high_irq, c_irq_params);
3617         }
3618
3619         /* Use GRPH_PFLIP interrupt */
3620         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3621                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3622                         i++) {
3623                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3624                 if (r) {
3625                         DRM_ERROR("Failed to add page flip irq id!\n");
3626                         return r;
3627                 }
3628
3629                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3630                 int_params.irq_source =
3631                         dc_interrupt_to_irq_source(dc, i, 0);
3632
3633                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3634
3635                 c_irq_params->adev = adev;
3636                 c_irq_params->irq_src = int_params.irq_source;
3637
3638                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3639                                 dm_pflip_high_irq, c_irq_params);
3640
3641         }
3642
3643         /* HPD */
3644         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3645                         &adev->hpd_irq);
3646         if (r) {
3647                 DRM_ERROR("Failed to add hpd irq id!\n");
3648                 return r;
3649         }
3650
3651         register_hpd_handlers(adev);
3652
3653         return 0;
3654 }
3655 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3656 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3657 {
3658         struct dc *dc = adev->dm.dc;
3659         struct common_irq_params *c_irq_params;
3660         struct dc_interrupt_params int_params = {0};
3661         int r, i;
3662
3663         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3664         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3665
3666         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3667                         &adev->dmub_outbox_irq);
3668         if (r) {
3669                 DRM_ERROR("Failed to add outbox irq id!\n");
3670                 return r;
3671         }
3672
3673         if (dc->ctx->dmub_srv) {
3674                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3675                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3676                 int_params.irq_source =
3677                 dc_interrupt_to_irq_source(dc, i, 0);
3678
3679                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3680
3681                 c_irq_params->adev = adev;
3682                 c_irq_params->irq_src = int_params.irq_source;
3683
3684                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3685                                 dm_dmub_outbox1_low_irq, c_irq_params);
3686         }
3687
3688         return 0;
3689 }
3690 #endif
3691
3692 /*
3693  * Acquires the lock for the atomic state object and returns
3694  * the new atomic state.
3695  *
3696  * This should only be called during atomic check.
3697  */
3698 static int dm_atomic_get_state(struct drm_atomic_state *state,
3699                                struct dm_atomic_state **dm_state)
3700 {
3701         struct drm_device *dev = state->dev;
3702         struct amdgpu_device *adev = drm_to_adev(dev);
3703         struct amdgpu_display_manager *dm = &adev->dm;
3704         struct drm_private_state *priv_state;
3705
3706         if (*dm_state)
3707                 return 0;
3708
3709         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3710         if (IS_ERR(priv_state))
3711                 return PTR_ERR(priv_state);
3712
3713         *dm_state = to_dm_atomic_state(priv_state);
3714
3715         return 0;
3716 }
3717
3718 static struct dm_atomic_state *
3719 dm_atomic_get_new_state(struct drm_atomic_state *state)
3720 {
3721         struct drm_device *dev = state->dev;
3722         struct amdgpu_device *adev = drm_to_adev(dev);
3723         struct amdgpu_display_manager *dm = &adev->dm;
3724         struct drm_private_obj *obj;
3725         struct drm_private_state *new_obj_state;
3726         int i;
3727
3728         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3729                 if (obj->funcs == dm->atomic_obj.funcs)
3730                         return to_dm_atomic_state(new_obj_state);
3731         }
3732
3733         return NULL;
3734 }
3735
3736 static struct drm_private_state *
3737 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3738 {
3739         struct dm_atomic_state *old_state, *new_state;
3740
3741         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3742         if (!new_state)
3743                 return NULL;
3744
3745         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3746
3747         old_state = to_dm_atomic_state(obj->state);
3748
3749         if (old_state && old_state->context)
3750                 new_state->context = dc_copy_state(old_state->context);
3751
3752         if (!new_state->context) {
3753                 kfree(new_state);
3754                 return NULL;
3755         }
3756
3757         return &new_state->base;
3758 }
3759
3760 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3761                                     struct drm_private_state *state)
3762 {
3763         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3764
3765         if (dm_state && dm_state->context)
3766                 dc_release_state(dm_state->context);
3767
3768         kfree(dm_state);
3769 }
3770
3771 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3772         .atomic_duplicate_state = dm_atomic_duplicate_state,
3773         .atomic_destroy_state = dm_atomic_destroy_state,
3774 };
3775
3776 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3777 {
3778         struct dm_atomic_state *state;
3779         int r;
3780
3781         adev->mode_info.mode_config_initialized = true;
3782
3783         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3784         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3785
3786         adev_to_drm(adev)->mode_config.max_width = 16384;
3787         adev_to_drm(adev)->mode_config.max_height = 16384;
3788
3789         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3790         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3791         /* indicates support for immediate flip */
3792         adev_to_drm(adev)->mode_config.async_page_flip = true;
3793
3794         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3795
3796         state = kzalloc(sizeof(*state), GFP_KERNEL);
3797         if (!state)
3798                 return -ENOMEM;
3799
3800         state->context = dc_create_state(adev->dm.dc);
3801         if (!state->context) {
3802                 kfree(state);
3803                 return -ENOMEM;
3804         }
3805
3806         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3807
3808         drm_atomic_private_obj_init(adev_to_drm(adev),
3809                                     &adev->dm.atomic_obj,
3810                                     &state->base,
3811                                     &dm_atomic_state_funcs);
3812
3813         r = amdgpu_display_modeset_create_props(adev);
3814         if (r) {
3815                 dc_release_state(state->context);
3816                 kfree(state);
3817                 return r;
3818         }
3819
3820         r = amdgpu_dm_audio_init(adev);
3821         if (r) {
3822                 dc_release_state(state->context);
3823                 kfree(state);
3824                 return r;
3825         }
3826
3827         return 0;
3828 }
3829
3830 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3831 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3832 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3833
3834 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3835         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3836
3837 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3838                                             int bl_idx)
3839 {
3840 #if defined(CONFIG_ACPI)
3841         struct amdgpu_dm_backlight_caps caps;
3842
3843         memset(&caps, 0, sizeof(caps));
3844
3845         if (dm->backlight_caps[bl_idx].caps_valid)
3846                 return;
3847
3848         amdgpu_acpi_get_backlight_caps(&caps);
3849         if (caps.caps_valid) {
3850                 dm->backlight_caps[bl_idx].caps_valid = true;
3851                 if (caps.aux_support)
3852                         return;
3853                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3854                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3855         } else {
3856                 dm->backlight_caps[bl_idx].min_input_signal =
3857                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3858                 dm->backlight_caps[bl_idx].max_input_signal =
3859                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3860         }
3861 #else
3862         if (dm->backlight_caps[bl_idx].aux_support)
3863                 return;
3864
3865         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3866         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3867 #endif
3868 }
3869
3870 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3871                                 unsigned *min, unsigned *max)
3872 {
3873         if (!caps)
3874                 return 0;
3875
3876         if (caps->aux_support) {
3877                 // Firmware limits are in nits, DC API wants millinits.
3878                 *max = 1000 * caps->aux_max_input_signal;
3879                 *min = 1000 * caps->aux_min_input_signal;
3880         } else {
3881                 // Firmware limits are 8-bit, PWM control is 16-bit.
3882                 *max = 0x101 * caps->max_input_signal;
3883                 *min = 0x101 * caps->min_input_signal;
3884         }
3885         return 1;
3886 }
3887
3888 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3889                                         uint32_t brightness)
3890 {
3891         unsigned min, max;
3892
3893         if (!get_brightness_range(caps, &min, &max))
3894                 return brightness;
3895
3896         // Rescale 0..255 to min..max
3897         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3898                                        AMDGPU_MAX_BL_LEVEL);
3899 }
3900
3901 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3902                                       uint32_t brightness)
3903 {
3904         unsigned min, max;
3905
3906         if (!get_brightness_range(caps, &min, &max))
3907                 return brightness;
3908
3909         if (brightness < min)
3910                 return 0;
3911         // Rescale min..max to 0..255
3912         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3913                                  max - min);
3914 }
3915
3916 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3917                                          int bl_idx,
3918                                          u32 user_brightness)
3919 {
3920         struct amdgpu_dm_backlight_caps caps;
3921         struct dc_link *link;
3922         u32 brightness;
3923         bool rc;
3924
3925         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3926         caps = dm->backlight_caps[bl_idx];
3927
3928         dm->brightness[bl_idx] = user_brightness;
3929         /* update scratch register */
3930         if (bl_idx == 0)
3931                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3932         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3933         link = (struct dc_link *)dm->backlight_link[bl_idx];
3934
3935         /* Change brightness based on AUX property */
3936         if (caps.aux_support) {
3937                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3938                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3939                 if (!rc)
3940                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3941         } else {
3942                 rc = dc_link_set_backlight_level(link, brightness, 0);
3943                 if (!rc)
3944                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3945         }
3946
3947         return rc ? 0 : 1;
3948 }
3949
3950 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3951 {
3952         struct amdgpu_display_manager *dm = bl_get_data(bd);
3953         int i;
3954
3955         for (i = 0; i < dm->num_of_edps; i++) {
3956                 if (bd == dm->backlight_dev[i])
3957                         break;
3958         }
3959         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3960                 i = 0;
3961         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3962
3963         return 0;
3964 }
3965
3966 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3967                                          int bl_idx)
3968 {
3969         struct amdgpu_dm_backlight_caps caps;
3970         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3971
3972         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3973         caps = dm->backlight_caps[bl_idx];
3974
3975         if (caps.aux_support) {
3976                 u32 avg, peak;
3977                 bool rc;
3978
3979                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3980                 if (!rc)
3981                         return dm->brightness[bl_idx];
3982                 return convert_brightness_to_user(&caps, avg);
3983         } else {
3984                 int ret = dc_link_get_backlight_level(link);
3985
3986                 if (ret == DC_ERROR_UNEXPECTED)
3987                         return dm->brightness[bl_idx];
3988                 return convert_brightness_to_user(&caps, ret);
3989         }
3990 }
3991
3992 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3993 {
3994         struct amdgpu_display_manager *dm = bl_get_data(bd);
3995         int i;
3996
3997         for (i = 0; i < dm->num_of_edps; i++) {
3998                 if (bd == dm->backlight_dev[i])
3999                         break;
4000         }
4001         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4002                 i = 0;
4003         return amdgpu_dm_backlight_get_level(dm, i);
4004 }
4005
4006 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4007         .options = BL_CORE_SUSPENDRESUME,
4008         .get_brightness = amdgpu_dm_backlight_get_brightness,
4009         .update_status  = amdgpu_dm_backlight_update_status,
4010 };
4011
4012 static void
4013 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4014 {
4015         char bl_name[16];
4016         struct backlight_properties props = { 0 };
4017
4018         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4019         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4020
4021         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4022         props.brightness = AMDGPU_MAX_BL_LEVEL;
4023         props.type = BACKLIGHT_RAW;
4024
4025         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4026                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4027
4028         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4029                                                                        adev_to_drm(dm->adev)->dev,
4030                                                                        dm,
4031                                                                        &amdgpu_dm_backlight_ops,
4032                                                                        &props);
4033
4034         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4035                 DRM_ERROR("DM: Backlight registration failed!\n");
4036         else
4037                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4038 }
4039 #endif
4040
4041 static int initialize_plane(struct amdgpu_display_manager *dm,
4042                             struct amdgpu_mode_info *mode_info, int plane_id,
4043                             enum drm_plane_type plane_type,
4044                             const struct dc_plane_cap *plane_cap)
4045 {
4046         struct drm_plane *plane;
4047         unsigned long possible_crtcs;
4048         int ret = 0;
4049
4050         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4051         if (!plane) {
4052                 DRM_ERROR("KMS: Failed to allocate plane\n");
4053                 return -ENOMEM;
4054         }
4055         plane->type = plane_type;
4056
4057         /*
4058          * HACK: IGT tests expect that the primary plane for a CRTC
4059          * can only have one possible CRTC. Only expose support for
4060          * any CRTC if they're not going to be used as a primary plane
4061          * for a CRTC - like overlay or underlay planes.
4062          */
4063         possible_crtcs = 1 << plane_id;
4064         if (plane_id >= dm->dc->caps.max_streams)
4065                 possible_crtcs = 0xff;
4066
4067         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4068
4069         if (ret) {
4070                 DRM_ERROR("KMS: Failed to initialize plane\n");
4071                 kfree(plane);
4072                 return ret;
4073         }
4074
4075         if (mode_info)
4076                 mode_info->planes[plane_id] = plane;
4077
4078         return ret;
4079 }
4080
4081
4082 static void register_backlight_device(struct amdgpu_display_manager *dm,
4083                                       struct dc_link *link)
4084 {
4085 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4086         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4087
4088         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4089             link->type != dc_connection_none) {
4090                 /*
4091                  * Event if registration failed, we should continue with
4092                  * DM initialization because not having a backlight control
4093                  * is better then a black screen.
4094                  */
4095                 if (!dm->backlight_dev[dm->num_of_edps])
4096                         amdgpu_dm_register_backlight_device(dm);
4097
4098                 if (dm->backlight_dev[dm->num_of_edps]) {
4099                         dm->backlight_link[dm->num_of_edps] = link;
4100                         dm->num_of_edps++;
4101                 }
4102         }
4103 #endif
4104 }
4105
4106
4107 /*
4108  * In this architecture, the association
4109  * connector -> encoder -> crtc
4110  * id not really requried. The crtc and connector will hold the
4111  * display_index as an abstraction to use with DAL component
4112  *
4113  * Returns 0 on success
4114  */
4115 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4116 {
4117         struct amdgpu_display_manager *dm = &adev->dm;
4118         int32_t i;
4119         struct amdgpu_dm_connector *aconnector = NULL;
4120         struct amdgpu_encoder *aencoder = NULL;
4121         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4122         uint32_t link_cnt;
4123         int32_t primary_planes;
4124         enum dc_connection_type new_connection_type = dc_connection_none;
4125         const struct dc_plane_cap *plane;
4126         bool psr_feature_enabled = false;
4127
4128         dm->display_indexes_num = dm->dc->caps.max_streams;
4129         /* Update the actual used number of crtc */
4130         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4131
4132         link_cnt = dm->dc->caps.max_links;
4133         if (amdgpu_dm_mode_config_init(dm->adev)) {
4134                 DRM_ERROR("DM: Failed to initialize mode config\n");
4135                 return -EINVAL;
4136         }
4137
4138         /* There is one primary plane per CRTC */
4139         primary_planes = dm->dc->caps.max_streams;
4140         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4141
4142         /*
4143          * Initialize primary planes, implicit planes for legacy IOCTLS.
4144          * Order is reversed to match iteration order in atomic check.
4145          */
4146         for (i = (primary_planes - 1); i >= 0; i--) {
4147                 plane = &dm->dc->caps.planes[i];
4148
4149                 if (initialize_plane(dm, mode_info, i,
4150                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4151                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4152                         goto fail;
4153                 }
4154         }
4155
4156         /*
4157          * Initialize overlay planes, index starting after primary planes.
4158          * These planes have a higher DRM index than the primary planes since
4159          * they should be considered as having a higher z-order.
4160          * Order is reversed to match iteration order in atomic check.
4161          *
4162          * Only support DCN for now, and only expose one so we don't encourage
4163          * userspace to use up all the pipes.
4164          */
4165         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4166                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4167
4168                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4169                         continue;
4170
4171                 if (!plane->blends_with_above || !plane->blends_with_below)
4172                         continue;
4173
4174                 if (!plane->pixel_format_support.argb8888)
4175                         continue;
4176
4177                 if (initialize_plane(dm, NULL, primary_planes + i,
4178                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4179                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4180                         goto fail;
4181                 }
4182
4183                 /* Only create one overlay plane. */
4184                 break;
4185         }
4186
4187         for (i = 0; i < dm->dc->caps.max_streams; i++)
4188                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4189                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4190                         goto fail;
4191                 }
4192
4193 #if defined(CONFIG_DRM_AMD_DC_DCN)
4194         /* Use Outbox interrupt */
4195         switch (adev->ip_versions[DCE_HWIP][0]) {
4196         case IP_VERSION(3, 0, 0):
4197         case IP_VERSION(3, 1, 2):
4198         case IP_VERSION(3, 1, 3):
4199         case IP_VERSION(2, 1, 0):
4200                 if (register_outbox_irq_handlers(dm->adev)) {
4201                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4202                         goto fail;
4203                 }
4204                 break;
4205         default:
4206                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4207                               adev->ip_versions[DCE_HWIP][0]);
4208         }
4209
4210         /* Determine whether to enable PSR support by default. */
4211         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4212                 switch (adev->ip_versions[DCE_HWIP][0]) {
4213                 case IP_VERSION(3, 1, 2):
4214                 case IP_VERSION(3, 1, 3):
4215                         psr_feature_enabled = true;
4216                         break;
4217                 default:
4218                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4219                         break;
4220                 }
4221         }
4222 #endif
4223
4224         /* loops over all connectors on the board */
4225         for (i = 0; i < link_cnt; i++) {
4226                 struct dc_link *link = NULL;
4227
4228                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4229                         DRM_ERROR(
4230                                 "KMS: Cannot support more than %d display indexes\n",
4231                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4232                         continue;
4233                 }
4234
4235                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4236                 if (!aconnector)
4237                         goto fail;
4238
4239                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4240                 if (!aencoder)
4241                         goto fail;
4242
4243                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4244                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4245                         goto fail;
4246                 }
4247
4248                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4249                         DRM_ERROR("KMS: Failed to initialize connector\n");
4250                         goto fail;
4251                 }
4252
4253                 link = dc_get_link_at_index(dm->dc, i);
4254
4255                 if (!dc_link_detect_sink(link, &new_connection_type))
4256                         DRM_ERROR("KMS: Failed to detect connector\n");
4257
4258                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4259                         emulated_link_detect(link);
4260                         amdgpu_dm_update_connector_after_detect(aconnector);
4261
4262                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4263                         amdgpu_dm_update_connector_after_detect(aconnector);
4264                         register_backlight_device(dm, link);
4265                         if (dm->num_of_edps)
4266                                 update_connector_ext_caps(aconnector);
4267                         if (psr_feature_enabled)
4268                                 amdgpu_dm_set_psr_caps(link);
4269                 }
4270
4271
4272         }
4273
4274         /* Software is initialized. Now we can register interrupt handlers. */
4275         switch (adev->asic_type) {
4276 #if defined(CONFIG_DRM_AMD_DC_SI)
4277         case CHIP_TAHITI:
4278         case CHIP_PITCAIRN:
4279         case CHIP_VERDE:
4280         case CHIP_OLAND:
4281                 if (dce60_register_irq_handlers(dm->adev)) {
4282                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4283                         goto fail;
4284                 }
4285                 break;
4286 #endif
4287         case CHIP_BONAIRE:
4288         case CHIP_HAWAII:
4289         case CHIP_KAVERI:
4290         case CHIP_KABINI:
4291         case CHIP_MULLINS:
4292         case CHIP_TONGA:
4293         case CHIP_FIJI:
4294         case CHIP_CARRIZO:
4295         case CHIP_STONEY:
4296         case CHIP_POLARIS11:
4297         case CHIP_POLARIS10:
4298         case CHIP_POLARIS12:
4299         case CHIP_VEGAM:
4300         case CHIP_VEGA10:
4301         case CHIP_VEGA12:
4302         case CHIP_VEGA20:
4303                 if (dce110_register_irq_handlers(dm->adev)) {
4304                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4305                         goto fail;
4306                 }
4307                 break;
4308         default:
4309 #if defined(CONFIG_DRM_AMD_DC_DCN)
4310                 switch (adev->ip_versions[DCE_HWIP][0]) {
4311                 case IP_VERSION(1, 0, 0):
4312                 case IP_VERSION(1, 0, 1):
4313                 case IP_VERSION(2, 0, 2):
4314                 case IP_VERSION(2, 0, 3):
4315                 case IP_VERSION(2, 0, 0):
4316                 case IP_VERSION(2, 1, 0):
4317                 case IP_VERSION(3, 0, 0):
4318                 case IP_VERSION(3, 0, 2):
4319                 case IP_VERSION(3, 0, 3):
4320                 case IP_VERSION(3, 0, 1):
4321                 case IP_VERSION(3, 1, 2):
4322                 case IP_VERSION(3, 1, 3):
4323                         if (dcn10_register_irq_handlers(dm->adev)) {
4324                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4325                                 goto fail;
4326                         }
4327                         break;
4328                 default:
4329                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4330                                         adev->ip_versions[DCE_HWIP][0]);
4331                         goto fail;
4332                 }
4333 #endif
4334                 break;
4335         }
4336
4337         return 0;
4338 fail:
4339         kfree(aencoder);
4340         kfree(aconnector);
4341
4342         return -EINVAL;
4343 }
4344
4345 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4346 {
4347         drm_atomic_private_obj_fini(&dm->atomic_obj);
4348         return;
4349 }
4350
4351 /******************************************************************************
4352  * amdgpu_display_funcs functions
4353  *****************************************************************************/
4354
4355 /*
4356  * dm_bandwidth_update - program display watermarks
4357  *
4358  * @adev: amdgpu_device pointer
4359  *
4360  * Calculate and program the display watermarks and line buffer allocation.
4361  */
4362 static void dm_bandwidth_update(struct amdgpu_device *adev)
4363 {
4364         /* TODO: implement later */
4365 }
4366
4367 static const struct amdgpu_display_funcs dm_display_funcs = {
4368         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4369         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4370         .backlight_set_level = NULL, /* never called for DC */
4371         .backlight_get_level = NULL, /* never called for DC */
4372         .hpd_sense = NULL,/* called unconditionally */
4373         .hpd_set_polarity = NULL, /* called unconditionally */
4374         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4375         .page_flip_get_scanoutpos =
4376                 dm_crtc_get_scanoutpos,/* called unconditionally */
4377         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4378         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4379 };
4380
4381 #if defined(CONFIG_DEBUG_KERNEL_DC)
4382
4383 static ssize_t s3_debug_store(struct device *device,
4384                               struct device_attribute *attr,
4385                               const char *buf,
4386                               size_t count)
4387 {
4388         int ret;
4389         int s3_state;
4390         struct drm_device *drm_dev = dev_get_drvdata(device);
4391         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4392
4393         ret = kstrtoint(buf, 0, &s3_state);
4394
4395         if (ret == 0) {
4396                 if (s3_state) {
4397                         dm_resume(adev);
4398                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4399                 } else
4400                         dm_suspend(adev);
4401         }
4402
4403         return ret == 0 ? count : 0;
4404 }
4405
4406 DEVICE_ATTR_WO(s3_debug);
4407
4408 #endif
4409
4410 static int dm_early_init(void *handle)
4411 {
4412         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4413
4414         switch (adev->asic_type) {
4415 #if defined(CONFIG_DRM_AMD_DC_SI)
4416         case CHIP_TAHITI:
4417         case CHIP_PITCAIRN:
4418         case CHIP_VERDE:
4419                 adev->mode_info.num_crtc = 6;
4420                 adev->mode_info.num_hpd = 6;
4421                 adev->mode_info.num_dig = 6;
4422                 break;
4423         case CHIP_OLAND:
4424                 adev->mode_info.num_crtc = 2;
4425                 adev->mode_info.num_hpd = 2;
4426                 adev->mode_info.num_dig = 2;
4427                 break;
4428 #endif
4429         case CHIP_BONAIRE:
4430         case CHIP_HAWAII:
4431                 adev->mode_info.num_crtc = 6;
4432                 adev->mode_info.num_hpd = 6;
4433                 adev->mode_info.num_dig = 6;
4434                 break;
4435         case CHIP_KAVERI:
4436                 adev->mode_info.num_crtc = 4;
4437                 adev->mode_info.num_hpd = 6;
4438                 adev->mode_info.num_dig = 7;
4439                 break;
4440         case CHIP_KABINI:
4441         case CHIP_MULLINS:
4442                 adev->mode_info.num_crtc = 2;
4443                 adev->mode_info.num_hpd = 6;
4444                 adev->mode_info.num_dig = 6;
4445                 break;
4446         case CHIP_FIJI:
4447         case CHIP_TONGA:
4448                 adev->mode_info.num_crtc = 6;
4449                 adev->mode_info.num_hpd = 6;
4450                 adev->mode_info.num_dig = 7;
4451                 break;
4452         case CHIP_CARRIZO:
4453                 adev->mode_info.num_crtc = 3;
4454                 adev->mode_info.num_hpd = 6;
4455                 adev->mode_info.num_dig = 9;
4456                 break;
4457         case CHIP_STONEY:
4458                 adev->mode_info.num_crtc = 2;
4459                 adev->mode_info.num_hpd = 6;
4460                 adev->mode_info.num_dig = 9;
4461                 break;
4462         case CHIP_POLARIS11:
4463         case CHIP_POLARIS12:
4464                 adev->mode_info.num_crtc = 5;
4465                 adev->mode_info.num_hpd = 5;
4466                 adev->mode_info.num_dig = 5;
4467                 break;
4468         case CHIP_POLARIS10:
4469         case CHIP_VEGAM:
4470                 adev->mode_info.num_crtc = 6;
4471                 adev->mode_info.num_hpd = 6;
4472                 adev->mode_info.num_dig = 6;
4473                 break;
4474         case CHIP_VEGA10:
4475         case CHIP_VEGA12:
4476         case CHIP_VEGA20:
4477                 adev->mode_info.num_crtc = 6;
4478                 adev->mode_info.num_hpd = 6;
4479                 adev->mode_info.num_dig = 6;
4480                 break;
4481         default:
4482 #if defined(CONFIG_DRM_AMD_DC_DCN)
4483                 switch (adev->ip_versions[DCE_HWIP][0]) {
4484                 case IP_VERSION(2, 0, 2):
4485                 case IP_VERSION(3, 0, 0):
4486                         adev->mode_info.num_crtc = 6;
4487                         adev->mode_info.num_hpd = 6;
4488                         adev->mode_info.num_dig = 6;
4489                         break;
4490                 case IP_VERSION(2, 0, 0):
4491                 case IP_VERSION(3, 0, 2):
4492                         adev->mode_info.num_crtc = 5;
4493                         adev->mode_info.num_hpd = 5;
4494                         adev->mode_info.num_dig = 5;
4495                         break;
4496                 case IP_VERSION(2, 0, 3):
4497                 case IP_VERSION(3, 0, 3):
4498                         adev->mode_info.num_crtc = 2;
4499                         adev->mode_info.num_hpd = 2;
4500                         adev->mode_info.num_dig = 2;
4501                         break;
4502                 case IP_VERSION(1, 0, 0):
4503                 case IP_VERSION(1, 0, 1):
4504                 case IP_VERSION(3, 0, 1):
4505                 case IP_VERSION(2, 1, 0):
4506                 case IP_VERSION(3, 1, 2):
4507                 case IP_VERSION(3, 1, 3):
4508                         adev->mode_info.num_crtc = 4;
4509                         adev->mode_info.num_hpd = 4;
4510                         adev->mode_info.num_dig = 4;
4511                         break;
4512                 default:
4513                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4514                                         adev->ip_versions[DCE_HWIP][0]);
4515                         return -EINVAL;
4516                 }
4517 #endif
4518                 break;
4519         }
4520
4521         amdgpu_dm_set_irq_funcs(adev);
4522
4523         if (adev->mode_info.funcs == NULL)
4524                 adev->mode_info.funcs = &dm_display_funcs;
4525
4526         /*
4527          * Note: Do NOT change adev->audio_endpt_rreg and
4528          * adev->audio_endpt_wreg because they are initialised in
4529          * amdgpu_device_init()
4530          */
4531 #if defined(CONFIG_DEBUG_KERNEL_DC)
4532         device_create_file(
4533                 adev_to_drm(adev)->dev,
4534                 &dev_attr_s3_debug);
4535 #endif
4536
4537         return 0;
4538 }
4539
4540 static bool modeset_required(struct drm_crtc_state *crtc_state,
4541                              struct dc_stream_state *new_stream,
4542                              struct dc_stream_state *old_stream)
4543 {
4544         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4545 }
4546
4547 static bool modereset_required(struct drm_crtc_state *crtc_state)
4548 {
4549         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4550 }
4551
4552 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4553 {
4554         drm_encoder_cleanup(encoder);
4555         kfree(encoder);
4556 }
4557
4558 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4559         .destroy = amdgpu_dm_encoder_destroy,
4560 };
4561
4562
4563 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4564                                          struct drm_framebuffer *fb,
4565                                          int *min_downscale, int *max_upscale)
4566 {
4567         struct amdgpu_device *adev = drm_to_adev(dev);
4568         struct dc *dc = adev->dm.dc;
4569         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4570         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4571
4572         switch (fb->format->format) {
4573         case DRM_FORMAT_P010:
4574         case DRM_FORMAT_NV12:
4575         case DRM_FORMAT_NV21:
4576                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4577                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4578                 break;
4579
4580         case DRM_FORMAT_XRGB16161616F:
4581         case DRM_FORMAT_ARGB16161616F:
4582         case DRM_FORMAT_XBGR16161616F:
4583         case DRM_FORMAT_ABGR16161616F:
4584                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4585                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4586                 break;
4587
4588         default:
4589                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4590                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4591                 break;
4592         }
4593
4594         /*
4595          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4596          * scaling factor of 1.0 == 1000 units.
4597          */
4598         if (*max_upscale == 1)
4599                 *max_upscale = 1000;
4600
4601         if (*min_downscale == 1)
4602                 *min_downscale = 1000;
4603 }
4604
4605
4606 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4607                                 const struct drm_plane_state *state,
4608                                 struct dc_scaling_info *scaling_info)
4609 {
4610         int scale_w, scale_h, min_downscale, max_upscale;
4611
4612         memset(scaling_info, 0, sizeof(*scaling_info));
4613
4614         /* Source is fixed 16.16 but we ignore mantissa for now... */
4615         scaling_info->src_rect.x = state->src_x >> 16;
4616         scaling_info->src_rect.y = state->src_y >> 16;
4617
4618         /*
4619          * For reasons we don't (yet) fully understand a non-zero
4620          * src_y coordinate into an NV12 buffer can cause a
4621          * system hang on DCN1x.
4622          * To avoid hangs (and maybe be overly cautious)
4623          * let's reject both non-zero src_x and src_y.
4624          *
4625          * We currently know of only one use-case to reproduce a
4626          * scenario with non-zero src_x and src_y for NV12, which
4627          * is to gesture the YouTube Android app into full screen
4628          * on ChromeOS.
4629          */
4630         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4631             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4632             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4633             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4634                 return -EINVAL;
4635
4636         scaling_info->src_rect.width = state->src_w >> 16;
4637         if (scaling_info->src_rect.width == 0)
4638                 return -EINVAL;
4639
4640         scaling_info->src_rect.height = state->src_h >> 16;
4641         if (scaling_info->src_rect.height == 0)
4642                 return -EINVAL;
4643
4644         scaling_info->dst_rect.x = state->crtc_x;
4645         scaling_info->dst_rect.y = state->crtc_y;
4646
4647         if (state->crtc_w == 0)
4648                 return -EINVAL;
4649
4650         scaling_info->dst_rect.width = state->crtc_w;
4651
4652         if (state->crtc_h == 0)
4653                 return -EINVAL;
4654
4655         scaling_info->dst_rect.height = state->crtc_h;
4656
4657         /* DRM doesn't specify clipping on destination output. */
4658         scaling_info->clip_rect = scaling_info->dst_rect;
4659
4660         /* Validate scaling per-format with DC plane caps */
4661         if (state->plane && state->plane->dev && state->fb) {
4662                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4663                                              &min_downscale, &max_upscale);
4664         } else {
4665                 min_downscale = 250;
4666                 max_upscale = 16000;
4667         }
4668
4669         scale_w = scaling_info->dst_rect.width * 1000 /
4670                   scaling_info->src_rect.width;
4671
4672         if (scale_w < min_downscale || scale_w > max_upscale)
4673                 return -EINVAL;
4674
4675         scale_h = scaling_info->dst_rect.height * 1000 /
4676                   scaling_info->src_rect.height;
4677
4678         if (scale_h < min_downscale || scale_h > max_upscale)
4679                 return -EINVAL;
4680
4681         /*
4682          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4683          * assume reasonable defaults based on the format.
4684          */
4685
4686         return 0;
4687 }
4688
4689 static void
4690 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4691                                  uint64_t tiling_flags)
4692 {
4693         /* Fill GFX8 params */
4694         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4695                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4696
4697                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4698                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4699                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4700                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4701                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4702
4703                 /* XXX fix me for VI */
4704                 tiling_info->gfx8.num_banks = num_banks;
4705                 tiling_info->gfx8.array_mode =
4706                                 DC_ARRAY_2D_TILED_THIN1;
4707                 tiling_info->gfx8.tile_split = tile_split;
4708                 tiling_info->gfx8.bank_width = bankw;
4709                 tiling_info->gfx8.bank_height = bankh;
4710                 tiling_info->gfx8.tile_aspect = mtaspect;
4711                 tiling_info->gfx8.tile_mode =
4712                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4713         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4714                         == DC_ARRAY_1D_TILED_THIN1) {
4715                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4716         }
4717
4718         tiling_info->gfx8.pipe_config =
4719                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4720 }
4721
4722 static void
4723 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4724                                   union dc_tiling_info *tiling_info)
4725 {
4726         tiling_info->gfx9.num_pipes =
4727                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4728         tiling_info->gfx9.num_banks =
4729                 adev->gfx.config.gb_addr_config_fields.num_banks;
4730         tiling_info->gfx9.pipe_interleave =
4731                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4732         tiling_info->gfx9.num_shader_engines =
4733                 adev->gfx.config.gb_addr_config_fields.num_se;
4734         tiling_info->gfx9.max_compressed_frags =
4735                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4736         tiling_info->gfx9.num_rb_per_se =
4737                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4738         tiling_info->gfx9.shaderEnable = 1;
4739         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4740                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4741 }
4742
4743 static int
4744 validate_dcc(struct amdgpu_device *adev,
4745              const enum surface_pixel_format format,
4746              const enum dc_rotation_angle rotation,
4747              const union dc_tiling_info *tiling_info,
4748              const struct dc_plane_dcc_param *dcc,
4749              const struct dc_plane_address *address,
4750              const struct plane_size *plane_size)
4751 {
4752         struct dc *dc = adev->dm.dc;
4753         struct dc_dcc_surface_param input;
4754         struct dc_surface_dcc_cap output;
4755
4756         memset(&input, 0, sizeof(input));
4757         memset(&output, 0, sizeof(output));
4758
4759         if (!dcc->enable)
4760                 return 0;
4761
4762         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4763             !dc->cap_funcs.get_dcc_compression_cap)
4764                 return -EINVAL;
4765
4766         input.format = format;
4767         input.surface_size.width = plane_size->surface_size.width;
4768         input.surface_size.height = plane_size->surface_size.height;
4769         input.swizzle_mode = tiling_info->gfx9.swizzle;
4770
4771         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4772                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4773         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4774                 input.scan = SCAN_DIRECTION_VERTICAL;
4775
4776         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4777                 return -EINVAL;
4778
4779         if (!output.capable)
4780                 return -EINVAL;
4781
4782         if (dcc->independent_64b_blks == 0 &&
4783             output.grph.rgb.independent_64b_blks != 0)
4784                 return -EINVAL;
4785
4786         return 0;
4787 }
4788
4789 static bool
4790 modifier_has_dcc(uint64_t modifier)
4791 {
4792         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4793 }
4794
4795 static unsigned
4796 modifier_gfx9_swizzle_mode(uint64_t modifier)
4797 {
4798         if (modifier == DRM_FORMAT_MOD_LINEAR)
4799                 return 0;
4800
4801         return AMD_FMT_MOD_GET(TILE, modifier);
4802 }
4803
4804 static const struct drm_format_info *
4805 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4806 {
4807         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4808 }
4809
4810 static void
4811 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4812                                     union dc_tiling_info *tiling_info,
4813                                     uint64_t modifier)
4814 {
4815         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4816         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4817         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4818         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4819
4820         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4821
4822         if (!IS_AMD_FMT_MOD(modifier))
4823                 return;
4824
4825         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4826         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4827
4828         if (adev->family >= AMDGPU_FAMILY_NV) {
4829                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4830         } else {
4831                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4832
4833                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4834         }
4835 }
4836
4837 enum dm_micro_swizzle {
4838         MICRO_SWIZZLE_Z = 0,
4839         MICRO_SWIZZLE_S = 1,
4840         MICRO_SWIZZLE_D = 2,
4841         MICRO_SWIZZLE_R = 3
4842 };
4843
4844 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4845                                           uint32_t format,
4846                                           uint64_t modifier)
4847 {
4848         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4849         const struct drm_format_info *info = drm_format_info(format);
4850         int i;
4851
4852         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4853
4854         if (!info)
4855                 return false;
4856
4857         /*
4858          * We always have to allow these modifiers:
4859          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4860          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4861          */
4862         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4863             modifier == DRM_FORMAT_MOD_INVALID) {
4864                 return true;
4865         }
4866
4867         /* Check that the modifier is on the list of the plane's supported modifiers. */
4868         for (i = 0; i < plane->modifier_count; i++) {
4869                 if (modifier == plane->modifiers[i])
4870                         break;
4871         }
4872         if (i == plane->modifier_count)
4873                 return false;
4874
4875         /*
4876          * For D swizzle the canonical modifier depends on the bpp, so check
4877          * it here.
4878          */
4879         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4880             adev->family >= AMDGPU_FAMILY_NV) {
4881                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4882                         return false;
4883         }
4884
4885         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4886             info->cpp[0] < 8)
4887                 return false;
4888
4889         if (modifier_has_dcc(modifier)) {
4890                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4891                 if (info->cpp[0] != 4)
4892                         return false;
4893                 /* We support multi-planar formats, but not when combined with
4894                  * additional DCC metadata planes. */
4895                 if (info->num_planes > 1)
4896                         return false;
4897         }
4898
4899         return true;
4900 }
4901
4902 static void
4903 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4904 {
4905         if (!*mods)
4906                 return;
4907
4908         if (*cap - *size < 1) {
4909                 uint64_t new_cap = *cap * 2;
4910                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4911
4912                 if (!new_mods) {
4913                         kfree(*mods);
4914                         *mods = NULL;
4915                         return;
4916                 }
4917
4918                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4919                 kfree(*mods);
4920                 *mods = new_mods;
4921                 *cap = new_cap;
4922         }
4923
4924         (*mods)[*size] = mod;
4925         *size += 1;
4926 }
4927
4928 static void
4929 add_gfx9_modifiers(const struct amdgpu_device *adev,
4930                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4931 {
4932         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4933         int pipe_xor_bits = min(8, pipes +
4934                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4935         int bank_xor_bits = min(8 - pipe_xor_bits,
4936                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4937         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4938                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4939
4940
4941         if (adev->family == AMDGPU_FAMILY_RV) {
4942                 /* Raven2 and later */
4943                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4944
4945                 /*
4946                  * No _D DCC swizzles yet because we only allow 32bpp, which
4947                  * doesn't support _D on DCN
4948                  */
4949
4950                 if (has_constant_encode) {
4951                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4952                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4953                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4954                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4955                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4956                                     AMD_FMT_MOD_SET(DCC, 1) |
4957                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4958                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4959                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4960                 }
4961
4962                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4963                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4964                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4965                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4966                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4967                             AMD_FMT_MOD_SET(DCC, 1) |
4968                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4969                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4970                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4971
4972                 if (has_constant_encode) {
4973                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4974                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4975                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4976                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4977                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4978                                     AMD_FMT_MOD_SET(DCC, 1) |
4979                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4980                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4981                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4982
4983                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4984                                     AMD_FMT_MOD_SET(RB, rb) |
4985                                     AMD_FMT_MOD_SET(PIPE, pipes));
4986                 }
4987
4988                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4989                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4990                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4991                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4992                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4993                             AMD_FMT_MOD_SET(DCC, 1) |
4994                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4995                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4996                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4997                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4998                             AMD_FMT_MOD_SET(RB, rb) |
4999                             AMD_FMT_MOD_SET(PIPE, pipes));
5000         }
5001
5002         /*
5003          * Only supported for 64bpp on Raven, will be filtered on format in
5004          * dm_plane_format_mod_supported.
5005          */
5006         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5007                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5008                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5009                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5010                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5011
5012         if (adev->family == AMDGPU_FAMILY_RV) {
5013                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5014                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5015                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5016                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5017                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5018         }
5019
5020         /*
5021          * Only supported for 64bpp on Raven, will be filtered on format in
5022          * dm_plane_format_mod_supported.
5023          */
5024         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5025                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5026                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5027
5028         if (adev->family == AMDGPU_FAMILY_RV) {
5029                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5031                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5032         }
5033 }
5034
5035 static void
5036 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5037                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5038 {
5039         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5040
5041         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5042                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5043                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5044                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5045                     AMD_FMT_MOD_SET(DCC, 1) |
5046                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5047                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5048                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5049
5050         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5051                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5052                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5053                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5054                     AMD_FMT_MOD_SET(DCC, 1) |
5055                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5056                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5057                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5058                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5059
5060         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5061                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5062                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5063                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5064
5065         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5066                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5067                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5068                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5069
5070
5071         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5072         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5074                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5075
5076         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5077                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5078                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5079 }
5080
5081 static void
5082 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5083                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5084 {
5085         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5086         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5087
5088         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5089                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5090                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5091                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5092                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5093                     AMD_FMT_MOD_SET(DCC, 1) |
5094                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5095                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5096                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5097                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5098
5099         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5100                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5101                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5102                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5103                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5104                     AMD_FMT_MOD_SET(DCC, 1) |
5105                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5106                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5107                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5108
5109         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5111                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5112                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5113                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5114                     AMD_FMT_MOD_SET(DCC, 1) |
5115                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5116                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5117                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5118                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5119                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5120
5121         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5122                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5123                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5124                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5125                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5126                     AMD_FMT_MOD_SET(DCC, 1) |
5127                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5128                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5129                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5130                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5131
5132         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5133                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5134                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5135                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5136                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5137
5138         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5140                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5141                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5142                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5143
5144         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5145         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5147                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5148
5149         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5150                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5151                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5152 }
5153
5154 static int
5155 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5156 {
5157         uint64_t size = 0, capacity = 128;
5158         *mods = NULL;
5159
5160         /* We have not hooked up any pre-GFX9 modifiers. */
5161         if (adev->family < AMDGPU_FAMILY_AI)
5162                 return 0;
5163
5164         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5165
5166         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5167                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5168                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5169                 return *mods ? 0 : -ENOMEM;
5170         }
5171
5172         switch (adev->family) {
5173         case AMDGPU_FAMILY_AI:
5174         case AMDGPU_FAMILY_RV:
5175                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5176                 break;
5177         case AMDGPU_FAMILY_NV:
5178         case AMDGPU_FAMILY_VGH:
5179         case AMDGPU_FAMILY_YC:
5180                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5181                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5182                 else
5183                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5184                 break;
5185         }
5186
5187         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5188
5189         /* INVALID marks the end of the list. */
5190         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5191
5192         if (!*mods)
5193                 return -ENOMEM;
5194
5195         return 0;
5196 }
5197
5198 static int
5199 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5200                                           const struct amdgpu_framebuffer *afb,
5201                                           const enum surface_pixel_format format,
5202                                           const enum dc_rotation_angle rotation,
5203                                           const struct plane_size *plane_size,
5204                                           union dc_tiling_info *tiling_info,
5205                                           struct dc_plane_dcc_param *dcc,
5206                                           struct dc_plane_address *address,
5207                                           const bool force_disable_dcc)
5208 {
5209         const uint64_t modifier = afb->base.modifier;
5210         int ret = 0;
5211
5212         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5213         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5214
5215         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5216                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5217                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5218                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5219
5220                 dcc->enable = 1;
5221                 dcc->meta_pitch = afb->base.pitches[1];
5222                 dcc->independent_64b_blks = independent_64b_blks;
5223                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5224                         if (independent_64b_blks && independent_128b_blks)
5225                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5226                         else if (independent_128b_blks)
5227                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5228                         else if (independent_64b_blks && !independent_128b_blks)
5229                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5230                         else
5231                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5232                 } else {
5233                         if (independent_64b_blks)
5234                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5235                         else
5236                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5237                 }
5238
5239                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5240                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5241         }
5242
5243         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5244         if (ret)
5245                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5246
5247         return ret;
5248 }
5249
5250 static int
5251 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5252                              const struct amdgpu_framebuffer *afb,
5253                              const enum surface_pixel_format format,
5254                              const enum dc_rotation_angle rotation,
5255                              const uint64_t tiling_flags,
5256                              union dc_tiling_info *tiling_info,
5257                              struct plane_size *plane_size,
5258                              struct dc_plane_dcc_param *dcc,
5259                              struct dc_plane_address *address,
5260                              bool tmz_surface,
5261                              bool force_disable_dcc)
5262 {
5263         const struct drm_framebuffer *fb = &afb->base;
5264         int ret;
5265
5266         memset(tiling_info, 0, sizeof(*tiling_info));
5267         memset(plane_size, 0, sizeof(*plane_size));
5268         memset(dcc, 0, sizeof(*dcc));
5269         memset(address, 0, sizeof(*address));
5270
5271         address->tmz_surface = tmz_surface;
5272
5273         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5274                 uint64_t addr = afb->address + fb->offsets[0];
5275
5276                 plane_size->surface_size.x = 0;
5277                 plane_size->surface_size.y = 0;
5278                 plane_size->surface_size.width = fb->width;
5279                 plane_size->surface_size.height = fb->height;
5280                 plane_size->surface_pitch =
5281                         fb->pitches[0] / fb->format->cpp[0];
5282
5283                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5284                 address->grph.addr.low_part = lower_32_bits(addr);
5285                 address->grph.addr.high_part = upper_32_bits(addr);
5286         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5287                 uint64_t luma_addr = afb->address + fb->offsets[0];
5288                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5289
5290                 plane_size->surface_size.x = 0;
5291                 plane_size->surface_size.y = 0;
5292                 plane_size->surface_size.width = fb->width;
5293                 plane_size->surface_size.height = fb->height;
5294                 plane_size->surface_pitch =
5295                         fb->pitches[0] / fb->format->cpp[0];
5296
5297                 plane_size->chroma_size.x = 0;
5298                 plane_size->chroma_size.y = 0;
5299                 /* TODO: set these based on surface format */
5300                 plane_size->chroma_size.width = fb->width / 2;
5301                 plane_size->chroma_size.height = fb->height / 2;
5302
5303                 plane_size->chroma_pitch =
5304                         fb->pitches[1] / fb->format->cpp[1];
5305
5306                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5307                 address->video_progressive.luma_addr.low_part =
5308                         lower_32_bits(luma_addr);
5309                 address->video_progressive.luma_addr.high_part =
5310                         upper_32_bits(luma_addr);
5311                 address->video_progressive.chroma_addr.low_part =
5312                         lower_32_bits(chroma_addr);
5313                 address->video_progressive.chroma_addr.high_part =
5314                         upper_32_bits(chroma_addr);
5315         }
5316
5317         if (adev->family >= AMDGPU_FAMILY_AI) {
5318                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5319                                                                 rotation, plane_size,
5320                                                                 tiling_info, dcc,
5321                                                                 address,
5322                                                                 force_disable_dcc);
5323                 if (ret)
5324                         return ret;
5325         } else {
5326                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5327         }
5328
5329         return 0;
5330 }
5331
5332 static void
5333 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5334                                bool *per_pixel_alpha, bool *global_alpha,
5335                                int *global_alpha_value)
5336 {
5337         *per_pixel_alpha = false;
5338         *global_alpha = false;
5339         *global_alpha_value = 0xff;
5340
5341         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5342                 return;
5343
5344         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5345                 static const uint32_t alpha_formats[] = {
5346                         DRM_FORMAT_ARGB8888,
5347                         DRM_FORMAT_RGBA8888,
5348                         DRM_FORMAT_ABGR8888,
5349                 };
5350                 uint32_t format = plane_state->fb->format->format;
5351                 unsigned int i;
5352
5353                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5354                         if (format == alpha_formats[i]) {
5355                                 *per_pixel_alpha = true;
5356                                 break;
5357                         }
5358                 }
5359         }
5360
5361         if (plane_state->alpha < 0xffff) {
5362                 *global_alpha = true;
5363                 *global_alpha_value = plane_state->alpha >> 8;
5364         }
5365 }
5366
5367 static int
5368 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5369                             const enum surface_pixel_format format,
5370                             enum dc_color_space *color_space)
5371 {
5372         bool full_range;
5373
5374         *color_space = COLOR_SPACE_SRGB;
5375
5376         /* DRM color properties only affect non-RGB formats. */
5377         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5378                 return 0;
5379
5380         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5381
5382         switch (plane_state->color_encoding) {
5383         case DRM_COLOR_YCBCR_BT601:
5384                 if (full_range)
5385                         *color_space = COLOR_SPACE_YCBCR601;
5386                 else
5387                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5388                 break;
5389
5390         case DRM_COLOR_YCBCR_BT709:
5391                 if (full_range)
5392                         *color_space = COLOR_SPACE_YCBCR709;
5393                 else
5394                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5395                 break;
5396
5397         case DRM_COLOR_YCBCR_BT2020:
5398                 if (full_range)
5399                         *color_space = COLOR_SPACE_2020_YCBCR;
5400                 else
5401                         return -EINVAL;
5402                 break;
5403
5404         default:
5405                 return -EINVAL;
5406         }
5407
5408         return 0;
5409 }
5410
5411 static int
5412 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5413                             const struct drm_plane_state *plane_state,
5414                             const uint64_t tiling_flags,
5415                             struct dc_plane_info *plane_info,
5416                             struct dc_plane_address *address,
5417                             bool tmz_surface,
5418                             bool force_disable_dcc)
5419 {
5420         const struct drm_framebuffer *fb = plane_state->fb;
5421         const struct amdgpu_framebuffer *afb =
5422                 to_amdgpu_framebuffer(plane_state->fb);
5423         int ret;
5424
5425         memset(plane_info, 0, sizeof(*plane_info));
5426
5427         switch (fb->format->format) {
5428         case DRM_FORMAT_C8:
5429                 plane_info->format =
5430                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5431                 break;
5432         case DRM_FORMAT_RGB565:
5433                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5434                 break;
5435         case DRM_FORMAT_XRGB8888:
5436         case DRM_FORMAT_ARGB8888:
5437                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5438                 break;
5439         case DRM_FORMAT_XRGB2101010:
5440         case DRM_FORMAT_ARGB2101010:
5441                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5442                 break;
5443         case DRM_FORMAT_XBGR2101010:
5444         case DRM_FORMAT_ABGR2101010:
5445                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5446                 break;
5447         case DRM_FORMAT_XBGR8888:
5448         case DRM_FORMAT_ABGR8888:
5449                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5450                 break;
5451         case DRM_FORMAT_NV21:
5452                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5453                 break;
5454         case DRM_FORMAT_NV12:
5455                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5456                 break;
5457         case DRM_FORMAT_P010:
5458                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5459                 break;
5460         case DRM_FORMAT_XRGB16161616F:
5461         case DRM_FORMAT_ARGB16161616F:
5462                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5463                 break;
5464         case DRM_FORMAT_XBGR16161616F:
5465         case DRM_FORMAT_ABGR16161616F:
5466                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5467                 break;
5468         case DRM_FORMAT_XRGB16161616:
5469         case DRM_FORMAT_ARGB16161616:
5470                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5471                 break;
5472         case DRM_FORMAT_XBGR16161616:
5473         case DRM_FORMAT_ABGR16161616:
5474                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5475                 break;
5476         default:
5477                 DRM_ERROR(
5478                         "Unsupported screen format %p4cc\n",
5479                         &fb->format->format);
5480                 return -EINVAL;
5481         }
5482
5483         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5484         case DRM_MODE_ROTATE_0:
5485                 plane_info->rotation = ROTATION_ANGLE_0;
5486                 break;
5487         case DRM_MODE_ROTATE_90:
5488                 plane_info->rotation = ROTATION_ANGLE_90;
5489                 break;
5490         case DRM_MODE_ROTATE_180:
5491                 plane_info->rotation = ROTATION_ANGLE_180;
5492                 break;
5493         case DRM_MODE_ROTATE_270:
5494                 plane_info->rotation = ROTATION_ANGLE_270;
5495                 break;
5496         default:
5497                 plane_info->rotation = ROTATION_ANGLE_0;
5498                 break;
5499         }
5500
5501         plane_info->visible = true;
5502         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5503
5504         plane_info->layer_index = 0;
5505
5506         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5507                                           &plane_info->color_space);
5508         if (ret)
5509                 return ret;
5510
5511         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5512                                            plane_info->rotation, tiling_flags,
5513                                            &plane_info->tiling_info,
5514                                            &plane_info->plane_size,
5515                                            &plane_info->dcc, address, tmz_surface,
5516                                            force_disable_dcc);
5517         if (ret)
5518                 return ret;
5519
5520         fill_blending_from_plane_state(
5521                 plane_state, &plane_info->per_pixel_alpha,
5522                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5523
5524         return 0;
5525 }
5526
5527 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5528                                     struct dc_plane_state *dc_plane_state,
5529                                     struct drm_plane_state *plane_state,
5530                                     struct drm_crtc_state *crtc_state)
5531 {
5532         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5533         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5534         struct dc_scaling_info scaling_info;
5535         struct dc_plane_info plane_info;
5536         int ret;
5537         bool force_disable_dcc = false;
5538
5539         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5540         if (ret)
5541                 return ret;
5542
5543         dc_plane_state->src_rect = scaling_info.src_rect;
5544         dc_plane_state->dst_rect = scaling_info.dst_rect;
5545         dc_plane_state->clip_rect = scaling_info.clip_rect;
5546         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5547
5548         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5549         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5550                                           afb->tiling_flags,
5551                                           &plane_info,
5552                                           &dc_plane_state->address,
5553                                           afb->tmz_surface,
5554                                           force_disable_dcc);
5555         if (ret)
5556                 return ret;
5557
5558         dc_plane_state->format = plane_info.format;
5559         dc_plane_state->color_space = plane_info.color_space;
5560         dc_plane_state->format = plane_info.format;
5561         dc_plane_state->plane_size = plane_info.plane_size;
5562         dc_plane_state->rotation = plane_info.rotation;
5563         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5564         dc_plane_state->stereo_format = plane_info.stereo_format;
5565         dc_plane_state->tiling_info = plane_info.tiling_info;
5566         dc_plane_state->visible = plane_info.visible;
5567         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5568         dc_plane_state->global_alpha = plane_info.global_alpha;
5569         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5570         dc_plane_state->dcc = plane_info.dcc;
5571         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5572         dc_plane_state->flip_int_enabled = true;
5573
5574         /*
5575          * Always set input transfer function, since plane state is refreshed
5576          * every time.
5577          */
5578         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5579         if (ret)
5580                 return ret;
5581
5582         return 0;
5583 }
5584
5585 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5586                                            const struct dm_connector_state *dm_state,
5587                                            struct dc_stream_state *stream)
5588 {
5589         enum amdgpu_rmx_type rmx_type;
5590
5591         struct rect src = { 0 }; /* viewport in composition space*/
5592         struct rect dst = { 0 }; /* stream addressable area */
5593
5594         /* no mode. nothing to be done */
5595         if (!mode)
5596                 return;
5597
5598         /* Full screen scaling by default */
5599         src.width = mode->hdisplay;
5600         src.height = mode->vdisplay;
5601         dst.width = stream->timing.h_addressable;
5602         dst.height = stream->timing.v_addressable;
5603
5604         if (dm_state) {
5605                 rmx_type = dm_state->scaling;
5606                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5607                         if (src.width * dst.height <
5608                                         src.height * dst.width) {
5609                                 /* height needs less upscaling/more downscaling */
5610                                 dst.width = src.width *
5611                                                 dst.height / src.height;
5612                         } else {
5613                                 /* width needs less upscaling/more downscaling */
5614                                 dst.height = src.height *
5615                                                 dst.width / src.width;
5616                         }
5617                 } else if (rmx_type == RMX_CENTER) {
5618                         dst = src;
5619                 }
5620
5621                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5622                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5623
5624                 if (dm_state->underscan_enable) {
5625                         dst.x += dm_state->underscan_hborder / 2;
5626                         dst.y += dm_state->underscan_vborder / 2;
5627                         dst.width -= dm_state->underscan_hborder;
5628                         dst.height -= dm_state->underscan_vborder;
5629                 }
5630         }
5631
5632         stream->src = src;
5633         stream->dst = dst;
5634
5635         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5636                       dst.x, dst.y, dst.width, dst.height);
5637
5638 }
5639
5640 static enum dc_color_depth
5641 convert_color_depth_from_display_info(const struct drm_connector *connector,
5642                                       bool is_y420, int requested_bpc)
5643 {
5644         uint8_t bpc;
5645
5646         if (is_y420) {
5647                 bpc = 8;
5648
5649                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5650                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5651                         bpc = 16;
5652                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5653                         bpc = 12;
5654                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5655                         bpc = 10;
5656         } else {
5657                 bpc = (uint8_t)connector->display_info.bpc;
5658                 /* Assume 8 bpc by default if no bpc is specified. */
5659                 bpc = bpc ? bpc : 8;
5660         }
5661
5662         if (requested_bpc > 0) {
5663                 /*
5664                  * Cap display bpc based on the user requested value.
5665                  *
5666                  * The value for state->max_bpc may not correctly updated
5667                  * depending on when the connector gets added to the state
5668                  * or if this was called outside of atomic check, so it
5669                  * can't be used directly.
5670                  */
5671                 bpc = min_t(u8, bpc, requested_bpc);
5672
5673                 /* Round down to the nearest even number. */
5674                 bpc = bpc - (bpc & 1);
5675         }
5676
5677         switch (bpc) {
5678         case 0:
5679                 /*
5680                  * Temporary Work around, DRM doesn't parse color depth for
5681                  * EDID revision before 1.4
5682                  * TODO: Fix edid parsing
5683                  */
5684                 return COLOR_DEPTH_888;
5685         case 6:
5686                 return COLOR_DEPTH_666;
5687         case 8:
5688                 return COLOR_DEPTH_888;
5689         case 10:
5690                 return COLOR_DEPTH_101010;
5691         case 12:
5692                 return COLOR_DEPTH_121212;
5693         case 14:
5694                 return COLOR_DEPTH_141414;
5695         case 16:
5696                 return COLOR_DEPTH_161616;
5697         default:
5698                 return COLOR_DEPTH_UNDEFINED;
5699         }
5700 }
5701
5702 static enum dc_aspect_ratio
5703 get_aspect_ratio(const struct drm_display_mode *mode_in)
5704 {
5705         /* 1-1 mapping, since both enums follow the HDMI spec. */
5706         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5707 }
5708
5709 static enum dc_color_space
5710 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5711 {
5712         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5713
5714         switch (dc_crtc_timing->pixel_encoding) {
5715         case PIXEL_ENCODING_YCBCR422:
5716         case PIXEL_ENCODING_YCBCR444:
5717         case PIXEL_ENCODING_YCBCR420:
5718         {
5719                 /*
5720                  * 27030khz is the separation point between HDTV and SDTV
5721                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5722                  * respectively
5723                  */
5724                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5725                         if (dc_crtc_timing->flags.Y_ONLY)
5726                                 color_space =
5727                                         COLOR_SPACE_YCBCR709_LIMITED;
5728                         else
5729                                 color_space = COLOR_SPACE_YCBCR709;
5730                 } else {
5731                         if (dc_crtc_timing->flags.Y_ONLY)
5732                                 color_space =
5733                                         COLOR_SPACE_YCBCR601_LIMITED;
5734                         else
5735                                 color_space = COLOR_SPACE_YCBCR601;
5736                 }
5737
5738         }
5739         break;
5740         case PIXEL_ENCODING_RGB:
5741                 color_space = COLOR_SPACE_SRGB;
5742                 break;
5743
5744         default:
5745                 WARN_ON(1);
5746                 break;
5747         }
5748
5749         return color_space;
5750 }
5751
5752 static bool adjust_colour_depth_from_display_info(
5753         struct dc_crtc_timing *timing_out,
5754         const struct drm_display_info *info)
5755 {
5756         enum dc_color_depth depth = timing_out->display_color_depth;
5757         int normalized_clk;
5758         do {
5759                 normalized_clk = timing_out->pix_clk_100hz / 10;
5760                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5761                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5762                         normalized_clk /= 2;
5763                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5764                 switch (depth) {
5765                 case COLOR_DEPTH_888:
5766                         break;
5767                 case COLOR_DEPTH_101010:
5768                         normalized_clk = (normalized_clk * 30) / 24;
5769                         break;
5770                 case COLOR_DEPTH_121212:
5771                         normalized_clk = (normalized_clk * 36) / 24;
5772                         break;
5773                 case COLOR_DEPTH_161616:
5774                         normalized_clk = (normalized_clk * 48) / 24;
5775                         break;
5776                 default:
5777                         /* The above depths are the only ones valid for HDMI. */
5778                         return false;
5779                 }
5780                 if (normalized_clk <= info->max_tmds_clock) {
5781                         timing_out->display_color_depth = depth;
5782                         return true;
5783                 }
5784         } while (--depth > COLOR_DEPTH_666);
5785         return false;
5786 }
5787
5788 static void fill_stream_properties_from_drm_display_mode(
5789         struct dc_stream_state *stream,
5790         const struct drm_display_mode *mode_in,
5791         const struct drm_connector *connector,
5792         const struct drm_connector_state *connector_state,
5793         const struct dc_stream_state *old_stream,
5794         int requested_bpc)
5795 {
5796         struct dc_crtc_timing *timing_out = &stream->timing;
5797         const struct drm_display_info *info = &connector->display_info;
5798         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5799         struct hdmi_vendor_infoframe hv_frame;
5800         struct hdmi_avi_infoframe avi_frame;
5801
5802         memset(&hv_frame, 0, sizeof(hv_frame));
5803         memset(&avi_frame, 0, sizeof(avi_frame));
5804
5805         timing_out->h_border_left = 0;
5806         timing_out->h_border_right = 0;
5807         timing_out->v_border_top = 0;
5808         timing_out->v_border_bottom = 0;
5809         /* TODO: un-hardcode */
5810         if (drm_mode_is_420_only(info, mode_in)
5811                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5812                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5813         else if (drm_mode_is_420_also(info, mode_in)
5814                         && aconnector->force_yuv420_output)
5815                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5816         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5817                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5818                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5819         else
5820                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5821
5822         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5823         timing_out->display_color_depth = convert_color_depth_from_display_info(
5824                 connector,
5825                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5826                 requested_bpc);
5827         timing_out->scan_type = SCANNING_TYPE_NODATA;
5828         timing_out->hdmi_vic = 0;
5829
5830         if(old_stream) {
5831                 timing_out->vic = old_stream->timing.vic;
5832                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5833                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5834         } else {
5835                 timing_out->vic = drm_match_cea_mode(mode_in);
5836                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5837                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5838                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5839                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5840         }
5841
5842         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5843                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5844                 timing_out->vic = avi_frame.video_code;
5845                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5846                 timing_out->hdmi_vic = hv_frame.vic;
5847         }
5848
5849         if (is_freesync_video_mode(mode_in, aconnector)) {
5850                 timing_out->h_addressable = mode_in->hdisplay;
5851                 timing_out->h_total = mode_in->htotal;
5852                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5853                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5854                 timing_out->v_total = mode_in->vtotal;
5855                 timing_out->v_addressable = mode_in->vdisplay;
5856                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5857                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5858                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5859         } else {
5860                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5861                 timing_out->h_total = mode_in->crtc_htotal;
5862                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5863                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5864                 timing_out->v_total = mode_in->crtc_vtotal;
5865                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5866                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5867                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5868                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5869         }
5870
5871         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5872
5873         stream->output_color_space = get_output_color_space(timing_out);
5874
5875         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5876         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5877         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5878                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5879                     drm_mode_is_420_also(info, mode_in) &&
5880                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5881                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5882                         adjust_colour_depth_from_display_info(timing_out, info);
5883                 }
5884         }
5885 }
5886
5887 static void fill_audio_info(struct audio_info *audio_info,
5888                             const struct drm_connector *drm_connector,
5889                             const struct dc_sink *dc_sink)
5890 {
5891         int i = 0;
5892         int cea_revision = 0;
5893         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5894
5895         audio_info->manufacture_id = edid_caps->manufacturer_id;
5896         audio_info->product_id = edid_caps->product_id;
5897
5898         cea_revision = drm_connector->display_info.cea_rev;
5899
5900         strscpy(audio_info->display_name,
5901                 edid_caps->display_name,
5902                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5903
5904         if (cea_revision >= 3) {
5905                 audio_info->mode_count = edid_caps->audio_mode_count;
5906
5907                 for (i = 0; i < audio_info->mode_count; ++i) {
5908                         audio_info->modes[i].format_code =
5909                                         (enum audio_format_code)
5910                                         (edid_caps->audio_modes[i].format_code);
5911                         audio_info->modes[i].channel_count =
5912                                         edid_caps->audio_modes[i].channel_count;
5913                         audio_info->modes[i].sample_rates.all =
5914                                         edid_caps->audio_modes[i].sample_rate;
5915                         audio_info->modes[i].sample_size =
5916                                         edid_caps->audio_modes[i].sample_size;
5917                 }
5918         }
5919
5920         audio_info->flags.all = edid_caps->speaker_flags;
5921
5922         /* TODO: We only check for the progressive mode, check for interlace mode too */
5923         if (drm_connector->latency_present[0]) {
5924                 audio_info->video_latency = drm_connector->video_latency[0];
5925                 audio_info->audio_latency = drm_connector->audio_latency[0];
5926         }
5927
5928         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5929
5930 }
5931
5932 static void
5933 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5934                                       struct drm_display_mode *dst_mode)
5935 {
5936         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5937         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5938         dst_mode->crtc_clock = src_mode->crtc_clock;
5939         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5940         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5941         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5942         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5943         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5944         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5945         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5946         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5947         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5948         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5949         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5950 }
5951
5952 static void
5953 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5954                                         const struct drm_display_mode *native_mode,
5955                                         bool scale_enabled)
5956 {
5957         if (scale_enabled) {
5958                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5959         } else if (native_mode->clock == drm_mode->clock &&
5960                         native_mode->htotal == drm_mode->htotal &&
5961                         native_mode->vtotal == drm_mode->vtotal) {
5962                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5963         } else {
5964                 /* no scaling nor amdgpu inserted, no need to patch */
5965         }
5966 }
5967
5968 static struct dc_sink *
5969 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5970 {
5971         struct dc_sink_init_data sink_init_data = { 0 };
5972         struct dc_sink *sink = NULL;
5973         sink_init_data.link = aconnector->dc_link;
5974         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5975
5976         sink = dc_sink_create(&sink_init_data);
5977         if (!sink) {
5978                 DRM_ERROR("Failed to create sink!\n");
5979                 return NULL;
5980         }
5981         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5982
5983         return sink;
5984 }
5985
5986 static void set_multisync_trigger_params(
5987                 struct dc_stream_state *stream)
5988 {
5989         struct dc_stream_state *master = NULL;
5990
5991         if (stream->triggered_crtc_reset.enabled) {
5992                 master = stream->triggered_crtc_reset.event_source;
5993                 stream->triggered_crtc_reset.event =
5994                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5995                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5996                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5997         }
5998 }
5999
6000 static void set_master_stream(struct dc_stream_state *stream_set[],
6001                               int stream_count)
6002 {
6003         int j, highest_rfr = 0, master_stream = 0;
6004
6005         for (j = 0;  j < stream_count; j++) {
6006                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6007                         int refresh_rate = 0;
6008
6009                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6010                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6011                         if (refresh_rate > highest_rfr) {
6012                                 highest_rfr = refresh_rate;
6013                                 master_stream = j;
6014                         }
6015                 }
6016         }
6017         for (j = 0;  j < stream_count; j++) {
6018                 if (stream_set[j])
6019                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6020         }
6021 }
6022
6023 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6024 {
6025         int i = 0;
6026         struct dc_stream_state *stream;
6027
6028         if (context->stream_count < 2)
6029                 return;
6030         for (i = 0; i < context->stream_count ; i++) {
6031                 if (!context->streams[i])
6032                         continue;
6033                 /*
6034                  * TODO: add a function to read AMD VSDB bits and set
6035                  * crtc_sync_master.multi_sync_enabled flag
6036                  * For now it's set to false
6037                  */
6038         }
6039
6040         set_master_stream(context->streams, context->stream_count);
6041
6042         for (i = 0; i < context->stream_count ; i++) {
6043                 stream = context->streams[i];
6044
6045                 if (!stream)
6046                         continue;
6047
6048                 set_multisync_trigger_params(stream);
6049         }
6050 }
6051
6052 #if defined(CONFIG_DRM_AMD_DC_DCN)
6053 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6054                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6055                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6056 {
6057         stream->timing.flags.DSC = 0;
6058
6059         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6060                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6061                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6062                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6063                                       dsc_caps);
6064         }
6065 }
6066
6067 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6068                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6069                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6070 {
6071         struct drm_connector *drm_connector = &aconnector->base;
6072         uint32_t link_bandwidth_kbps;
6073         uint32_t max_dsc_target_bpp_limit_override = 0;
6074
6075         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6076                                                         dc_link_get_link_cap(aconnector->dc_link));
6077
6078         if (stream->link && stream->link->local_sink)
6079                 max_dsc_target_bpp_limit_override =
6080                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6081         
6082         /* Set DSC policy according to dsc_clock_en */
6083         dc_dsc_policy_set_enable_dsc_when_not_needed(
6084                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6085
6086         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6087
6088                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6089                                                 dsc_caps,
6090                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6091                                                 max_dsc_target_bpp_limit_override,
6092                                                 link_bandwidth_kbps,
6093                                                 &stream->timing,
6094                                                 &stream->timing.dsc_cfg)) {
6095                         stream->timing.flags.DSC = 1;
6096                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6097                 }
6098         }
6099
6100         /* Overwrite the stream flag if DSC is enabled through debugfs */
6101         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6102                 stream->timing.flags.DSC = 1;
6103
6104         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6105                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6106
6107         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6108                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6109
6110         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6111                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6112 }
6113 #endif /* CONFIG_DRM_AMD_DC_DCN */
6114
6115 /**
6116  * DOC: FreeSync Video
6117  *
6118  * When a userspace application wants to play a video, the content follows a
6119  * standard format definition that usually specifies the FPS for that format.
6120  * The below list illustrates some video format and the expected FPS,
6121  * respectively:
6122  *
6123  * - TV/NTSC (23.976 FPS)
6124  * - Cinema (24 FPS)
6125  * - TV/PAL (25 FPS)
6126  * - TV/NTSC (29.97 FPS)
6127  * - TV/NTSC (30 FPS)
6128  * - Cinema HFR (48 FPS)
6129  * - TV/PAL (50 FPS)
6130  * - Commonly used (60 FPS)
6131  * - Multiples of 24 (48,72,96,120 FPS)
6132  *
6133  * The list of standards video format is not huge and can be added to the
6134  * connector modeset list beforehand. With that, userspace can leverage
6135  * FreeSync to extends the front porch in order to attain the target refresh
6136  * rate. Such a switch will happen seamlessly, without screen blanking or
6137  * reprogramming of the output in any other way. If the userspace requests a
6138  * modesetting change compatible with FreeSync modes that only differ in the
6139  * refresh rate, DC will skip the full update and avoid blink during the
6140  * transition. For example, the video player can change the modesetting from
6141  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6142  * causing any display blink. This same concept can be applied to a mode
6143  * setting change.
6144  */
6145 static struct drm_display_mode *
6146 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6147                           bool use_probed_modes)
6148 {
6149         struct drm_display_mode *m, *m_pref = NULL;
6150         u16 current_refresh, highest_refresh;
6151         struct list_head *list_head = use_probed_modes ?
6152                                                     &aconnector->base.probed_modes :
6153                                                     &aconnector->base.modes;
6154
6155         if (aconnector->freesync_vid_base.clock != 0)
6156                 return &aconnector->freesync_vid_base;
6157
6158         /* Find the preferred mode */
6159         list_for_each_entry (m, list_head, head) {
6160                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6161                         m_pref = m;
6162                         break;
6163                 }
6164         }
6165
6166         if (!m_pref) {
6167                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6168                 m_pref = list_first_entry_or_null(
6169                         &aconnector->base.modes, struct drm_display_mode, head);
6170                 if (!m_pref) {
6171                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6172                         return NULL;
6173                 }
6174         }
6175
6176         highest_refresh = drm_mode_vrefresh(m_pref);
6177
6178         /*
6179          * Find the mode with highest refresh rate with same resolution.
6180          * For some monitors, preferred mode is not the mode with highest
6181          * supported refresh rate.
6182          */
6183         list_for_each_entry (m, list_head, head) {
6184                 current_refresh  = drm_mode_vrefresh(m);
6185
6186                 if (m->hdisplay == m_pref->hdisplay &&
6187                     m->vdisplay == m_pref->vdisplay &&
6188                     highest_refresh < current_refresh) {
6189                         highest_refresh = current_refresh;
6190                         m_pref = m;
6191                 }
6192         }
6193
6194         aconnector->freesync_vid_base = *m_pref;
6195         return m_pref;
6196 }
6197
6198 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6199                                    struct amdgpu_dm_connector *aconnector)
6200 {
6201         struct drm_display_mode *high_mode;
6202         int timing_diff;
6203
6204         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6205         if (!high_mode || !mode)
6206                 return false;
6207
6208         timing_diff = high_mode->vtotal - mode->vtotal;
6209
6210         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6211             high_mode->hdisplay != mode->hdisplay ||
6212             high_mode->vdisplay != mode->vdisplay ||
6213             high_mode->hsync_start != mode->hsync_start ||
6214             high_mode->hsync_end != mode->hsync_end ||
6215             high_mode->htotal != mode->htotal ||
6216             high_mode->hskew != mode->hskew ||
6217             high_mode->vscan != mode->vscan ||
6218             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6219             high_mode->vsync_end - mode->vsync_end != timing_diff)
6220                 return false;
6221         else
6222                 return true;
6223 }
6224
6225 static struct dc_stream_state *
6226 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6227                        const struct drm_display_mode *drm_mode,
6228                        const struct dm_connector_state *dm_state,
6229                        const struct dc_stream_state *old_stream,
6230                        int requested_bpc)
6231 {
6232         struct drm_display_mode *preferred_mode = NULL;
6233         struct drm_connector *drm_connector;
6234         const struct drm_connector_state *con_state =
6235                 dm_state ? &dm_state->base : NULL;
6236         struct dc_stream_state *stream = NULL;
6237         struct drm_display_mode mode = *drm_mode;
6238         struct drm_display_mode saved_mode;
6239         struct drm_display_mode *freesync_mode = NULL;
6240         bool native_mode_found = false;
6241         bool recalculate_timing = false;
6242         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6243         int mode_refresh;
6244         int preferred_refresh = 0;
6245 #if defined(CONFIG_DRM_AMD_DC_DCN)
6246         struct dsc_dec_dpcd_caps dsc_caps;
6247 #endif
6248         struct dc_sink *sink = NULL;
6249
6250         memset(&saved_mode, 0, sizeof(saved_mode));
6251
6252         if (aconnector == NULL) {
6253                 DRM_ERROR("aconnector is NULL!\n");
6254                 return stream;
6255         }
6256
6257         drm_connector = &aconnector->base;
6258
6259         if (!aconnector->dc_sink) {
6260                 sink = create_fake_sink(aconnector);
6261                 if (!sink)
6262                         return stream;
6263         } else {
6264                 sink = aconnector->dc_sink;
6265                 dc_sink_retain(sink);
6266         }
6267
6268         stream = dc_create_stream_for_sink(sink);
6269
6270         if (stream == NULL) {
6271                 DRM_ERROR("Failed to create stream for sink!\n");
6272                 goto finish;
6273         }
6274
6275         stream->dm_stream_context = aconnector;
6276
6277         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6278                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6279
6280         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6281                 /* Search for preferred mode */
6282                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6283                         native_mode_found = true;
6284                         break;
6285                 }
6286         }
6287         if (!native_mode_found)
6288                 preferred_mode = list_first_entry_or_null(
6289                                 &aconnector->base.modes,
6290                                 struct drm_display_mode,
6291                                 head);
6292
6293         mode_refresh = drm_mode_vrefresh(&mode);
6294
6295         if (preferred_mode == NULL) {
6296                 /*
6297                  * This may not be an error, the use case is when we have no
6298                  * usermode calls to reset and set mode upon hotplug. In this
6299                  * case, we call set mode ourselves to restore the previous mode
6300                  * and the modelist may not be filled in in time.
6301                  */
6302                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6303         } else {
6304                 recalculate_timing = amdgpu_freesync_vid_mode &&
6305                                  is_freesync_video_mode(&mode, aconnector);
6306                 if (recalculate_timing) {
6307                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6308                         saved_mode = mode;
6309                         mode = *freesync_mode;
6310                 } else {
6311                         decide_crtc_timing_for_drm_display_mode(
6312                                 &mode, preferred_mode, scale);
6313
6314                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6315                 }
6316         }
6317
6318         if (recalculate_timing)
6319                 drm_mode_set_crtcinfo(&saved_mode, 0);
6320         else if (!dm_state)
6321                 drm_mode_set_crtcinfo(&mode, 0);
6322
6323        /*
6324         * If scaling is enabled and refresh rate didn't change
6325         * we copy the vic and polarities of the old timings
6326         */
6327         if (!scale || mode_refresh != preferred_refresh)
6328                 fill_stream_properties_from_drm_display_mode(
6329                         stream, &mode, &aconnector->base, con_state, NULL,
6330                         requested_bpc);
6331         else
6332                 fill_stream_properties_from_drm_display_mode(
6333                         stream, &mode, &aconnector->base, con_state, old_stream,
6334                         requested_bpc);
6335
6336 #if defined(CONFIG_DRM_AMD_DC_DCN)
6337         /* SST DSC determination policy */
6338         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6339         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6340                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6341 #endif
6342
6343         update_stream_scaling_settings(&mode, dm_state, stream);
6344
6345         fill_audio_info(
6346                 &stream->audio_info,
6347                 drm_connector,
6348                 sink);
6349
6350         update_stream_signal(stream, sink);
6351
6352         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6353                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6354
6355         if (stream->link->psr_settings.psr_feature_enabled) {
6356                 //
6357                 // should decide stream support vsc sdp colorimetry capability
6358                 // before building vsc info packet
6359                 //
6360                 stream->use_vsc_sdp_for_colorimetry = false;
6361                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6362                         stream->use_vsc_sdp_for_colorimetry =
6363                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6364                 } else {
6365                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6366                                 stream->use_vsc_sdp_for_colorimetry = true;
6367                 }
6368                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6369                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6370
6371         }
6372 finish:
6373         dc_sink_release(sink);
6374
6375         return stream;
6376 }
6377
6378 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6379 {
6380         drm_crtc_cleanup(crtc);
6381         kfree(crtc);
6382 }
6383
6384 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6385                                   struct drm_crtc_state *state)
6386 {
6387         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6388
6389         /* TODO Destroy dc_stream objects are stream object is flattened */
6390         if (cur->stream)
6391                 dc_stream_release(cur->stream);
6392
6393
6394         __drm_atomic_helper_crtc_destroy_state(state);
6395
6396
6397         kfree(state);
6398 }
6399
6400 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6401 {
6402         struct dm_crtc_state *state;
6403
6404         if (crtc->state)
6405                 dm_crtc_destroy_state(crtc, crtc->state);
6406
6407         state = kzalloc(sizeof(*state), GFP_KERNEL);
6408         if (WARN_ON(!state))
6409                 return;
6410
6411         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6412 }
6413
6414 static struct drm_crtc_state *
6415 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6416 {
6417         struct dm_crtc_state *state, *cur;
6418
6419         cur = to_dm_crtc_state(crtc->state);
6420
6421         if (WARN_ON(!crtc->state))
6422                 return NULL;
6423
6424         state = kzalloc(sizeof(*state), GFP_KERNEL);
6425         if (!state)
6426                 return NULL;
6427
6428         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6429
6430         if (cur->stream) {
6431                 state->stream = cur->stream;
6432                 dc_stream_retain(state->stream);
6433         }
6434
6435         state->active_planes = cur->active_planes;
6436         state->vrr_infopacket = cur->vrr_infopacket;
6437         state->abm_level = cur->abm_level;
6438         state->vrr_supported = cur->vrr_supported;
6439         state->freesync_config = cur->freesync_config;
6440         state->cm_has_degamma = cur->cm_has_degamma;
6441         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6442         state->force_dpms_off = cur->force_dpms_off;
6443         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6444
6445         return &state->base;
6446 }
6447
6448 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6449 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6450 {
6451         crtc_debugfs_init(crtc);
6452
6453         return 0;
6454 }
6455 #endif
6456
6457 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6458 {
6459         enum dc_irq_source irq_source;
6460         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6461         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6462         int rc;
6463
6464         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6465
6466         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6467
6468         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6469                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6470         return rc;
6471 }
6472
6473 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6474 {
6475         enum dc_irq_source irq_source;
6476         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6477         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6478         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6479 #if defined(CONFIG_DRM_AMD_DC_DCN)
6480         struct amdgpu_display_manager *dm = &adev->dm;
6481         struct vblank_control_work *work;
6482 #endif
6483         int rc = 0;
6484
6485         if (enable) {
6486                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6487                 if (amdgpu_dm_vrr_active(acrtc_state))
6488                         rc = dm_set_vupdate_irq(crtc, true);
6489         } else {
6490                 /* vblank irq off -> vupdate irq off */
6491                 rc = dm_set_vupdate_irq(crtc, false);
6492         }
6493
6494         if (rc)
6495                 return rc;
6496
6497         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6498
6499         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6500                 return -EBUSY;
6501
6502         if (amdgpu_in_reset(adev))
6503                 return 0;
6504
6505 #if defined(CONFIG_DRM_AMD_DC_DCN)
6506         if (dm->vblank_control_workqueue) {
6507                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6508                 if (!work)
6509                         return -ENOMEM;
6510
6511                 INIT_WORK(&work->work, vblank_control_worker);
6512                 work->dm = dm;
6513                 work->acrtc = acrtc;
6514                 work->enable = enable;
6515
6516                 if (acrtc_state->stream) {
6517                         dc_stream_retain(acrtc_state->stream);
6518                         work->stream = acrtc_state->stream;
6519                 }
6520
6521                 queue_work(dm->vblank_control_workqueue, &work->work);
6522         }
6523 #endif
6524
6525         return 0;
6526 }
6527
6528 static int dm_enable_vblank(struct drm_crtc *crtc)
6529 {
6530         return dm_set_vblank(crtc, true);
6531 }
6532
6533 static void dm_disable_vblank(struct drm_crtc *crtc)
6534 {
6535         dm_set_vblank(crtc, false);
6536 }
6537
6538 /* Implemented only the options currently availible for the driver */
6539 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6540         .reset = dm_crtc_reset_state,
6541         .destroy = amdgpu_dm_crtc_destroy,
6542         .set_config = drm_atomic_helper_set_config,
6543         .page_flip = drm_atomic_helper_page_flip,
6544         .atomic_duplicate_state = dm_crtc_duplicate_state,
6545         .atomic_destroy_state = dm_crtc_destroy_state,
6546         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6547         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6548         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6549         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6550         .enable_vblank = dm_enable_vblank,
6551         .disable_vblank = dm_disable_vblank,
6552         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6553 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6554         .late_register = amdgpu_dm_crtc_late_register,
6555 #endif
6556 };
6557
6558 static enum drm_connector_status
6559 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6560 {
6561         bool connected;
6562         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6563
6564         /*
6565          * Notes:
6566          * 1. This interface is NOT called in context of HPD irq.
6567          * 2. This interface *is called* in context of user-mode ioctl. Which
6568          * makes it a bad place for *any* MST-related activity.
6569          */
6570
6571         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6572             !aconnector->fake_enable)
6573                 connected = (aconnector->dc_sink != NULL);
6574         else
6575                 connected = (aconnector->base.force == DRM_FORCE_ON);
6576
6577         update_subconnector_property(aconnector);
6578
6579         return (connected ? connector_status_connected :
6580                         connector_status_disconnected);
6581 }
6582
6583 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6584                                             struct drm_connector_state *connector_state,
6585                                             struct drm_property *property,
6586                                             uint64_t val)
6587 {
6588         struct drm_device *dev = connector->dev;
6589         struct amdgpu_device *adev = drm_to_adev(dev);
6590         struct dm_connector_state *dm_old_state =
6591                 to_dm_connector_state(connector->state);
6592         struct dm_connector_state *dm_new_state =
6593                 to_dm_connector_state(connector_state);
6594
6595         int ret = -EINVAL;
6596
6597         if (property == dev->mode_config.scaling_mode_property) {
6598                 enum amdgpu_rmx_type rmx_type;
6599
6600                 switch (val) {
6601                 case DRM_MODE_SCALE_CENTER:
6602                         rmx_type = RMX_CENTER;
6603                         break;
6604                 case DRM_MODE_SCALE_ASPECT:
6605                         rmx_type = RMX_ASPECT;
6606                         break;
6607                 case DRM_MODE_SCALE_FULLSCREEN:
6608                         rmx_type = RMX_FULL;
6609                         break;
6610                 case DRM_MODE_SCALE_NONE:
6611                 default:
6612                         rmx_type = RMX_OFF;
6613                         break;
6614                 }
6615
6616                 if (dm_old_state->scaling == rmx_type)
6617                         return 0;
6618
6619                 dm_new_state->scaling = rmx_type;
6620                 ret = 0;
6621         } else if (property == adev->mode_info.underscan_hborder_property) {
6622                 dm_new_state->underscan_hborder = val;
6623                 ret = 0;
6624         } else if (property == adev->mode_info.underscan_vborder_property) {
6625                 dm_new_state->underscan_vborder = val;
6626                 ret = 0;
6627         } else if (property == adev->mode_info.underscan_property) {
6628                 dm_new_state->underscan_enable = val;
6629                 ret = 0;
6630         } else if (property == adev->mode_info.abm_level_property) {
6631                 dm_new_state->abm_level = val;
6632                 ret = 0;
6633         }
6634
6635         return ret;
6636 }
6637
6638 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6639                                             const struct drm_connector_state *state,
6640                                             struct drm_property *property,
6641                                             uint64_t *val)
6642 {
6643         struct drm_device *dev = connector->dev;
6644         struct amdgpu_device *adev = drm_to_adev(dev);
6645         struct dm_connector_state *dm_state =
6646                 to_dm_connector_state(state);
6647         int ret = -EINVAL;
6648
6649         if (property == dev->mode_config.scaling_mode_property) {
6650                 switch (dm_state->scaling) {
6651                 case RMX_CENTER:
6652                         *val = DRM_MODE_SCALE_CENTER;
6653                         break;
6654                 case RMX_ASPECT:
6655                         *val = DRM_MODE_SCALE_ASPECT;
6656                         break;
6657                 case RMX_FULL:
6658                         *val = DRM_MODE_SCALE_FULLSCREEN;
6659                         break;
6660                 case RMX_OFF:
6661                 default:
6662                         *val = DRM_MODE_SCALE_NONE;
6663                         break;
6664                 }
6665                 ret = 0;
6666         } else if (property == adev->mode_info.underscan_hborder_property) {
6667                 *val = dm_state->underscan_hborder;
6668                 ret = 0;
6669         } else if (property == adev->mode_info.underscan_vborder_property) {
6670                 *val = dm_state->underscan_vborder;
6671                 ret = 0;
6672         } else if (property == adev->mode_info.underscan_property) {
6673                 *val = dm_state->underscan_enable;
6674                 ret = 0;
6675         } else if (property == adev->mode_info.abm_level_property) {
6676                 *val = dm_state->abm_level;
6677                 ret = 0;
6678         }
6679
6680         return ret;
6681 }
6682
6683 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6684 {
6685         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6686
6687         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6688 }
6689
6690 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6691 {
6692         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6693         const struct dc_link *link = aconnector->dc_link;
6694         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6695         struct amdgpu_display_manager *dm = &adev->dm;
6696         int i;
6697
6698         /*
6699          * Call only if mst_mgr was iniitalized before since it's not done
6700          * for all connector types.
6701          */
6702         if (aconnector->mst_mgr.dev)
6703                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6704
6705 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6706         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6707         for (i = 0; i < dm->num_of_edps; i++) {
6708                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6709                         backlight_device_unregister(dm->backlight_dev[i]);
6710                         dm->backlight_dev[i] = NULL;
6711                 }
6712         }
6713 #endif
6714
6715         if (aconnector->dc_em_sink)
6716                 dc_sink_release(aconnector->dc_em_sink);
6717         aconnector->dc_em_sink = NULL;
6718         if (aconnector->dc_sink)
6719                 dc_sink_release(aconnector->dc_sink);
6720         aconnector->dc_sink = NULL;
6721
6722         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6723         drm_connector_unregister(connector);
6724         drm_connector_cleanup(connector);
6725         if (aconnector->i2c) {
6726                 i2c_del_adapter(&aconnector->i2c->base);
6727                 kfree(aconnector->i2c);
6728         }
6729         kfree(aconnector->dm_dp_aux.aux.name);
6730
6731         kfree(connector);
6732 }
6733
6734 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6735 {
6736         struct dm_connector_state *state =
6737                 to_dm_connector_state(connector->state);
6738
6739         if (connector->state)
6740                 __drm_atomic_helper_connector_destroy_state(connector->state);
6741
6742         kfree(state);
6743
6744         state = kzalloc(sizeof(*state), GFP_KERNEL);
6745
6746         if (state) {
6747                 state->scaling = RMX_OFF;
6748                 state->underscan_enable = false;
6749                 state->underscan_hborder = 0;
6750                 state->underscan_vborder = 0;
6751                 state->base.max_requested_bpc = 8;
6752                 state->vcpi_slots = 0;
6753                 state->pbn = 0;
6754                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6755                         state->abm_level = amdgpu_dm_abm_level;
6756
6757                 __drm_atomic_helper_connector_reset(connector, &state->base);
6758         }
6759 }
6760
6761 struct drm_connector_state *
6762 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6763 {
6764         struct dm_connector_state *state =
6765                 to_dm_connector_state(connector->state);
6766
6767         struct dm_connector_state *new_state =
6768                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6769
6770         if (!new_state)
6771                 return NULL;
6772
6773         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6774
6775         new_state->freesync_capable = state->freesync_capable;
6776         new_state->abm_level = state->abm_level;
6777         new_state->scaling = state->scaling;
6778         new_state->underscan_enable = state->underscan_enable;
6779         new_state->underscan_hborder = state->underscan_hborder;
6780         new_state->underscan_vborder = state->underscan_vborder;
6781         new_state->vcpi_slots = state->vcpi_slots;
6782         new_state->pbn = state->pbn;
6783         return &new_state->base;
6784 }
6785
6786 static int
6787 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6788 {
6789         struct amdgpu_dm_connector *amdgpu_dm_connector =
6790                 to_amdgpu_dm_connector(connector);
6791         int r;
6792
6793         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6794             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6795                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6796                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6797                 if (r)
6798                         return r;
6799         }
6800
6801 #if defined(CONFIG_DEBUG_FS)
6802         connector_debugfs_init(amdgpu_dm_connector);
6803 #endif
6804
6805         return 0;
6806 }
6807
6808 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6809         .reset = amdgpu_dm_connector_funcs_reset,
6810         .detect = amdgpu_dm_connector_detect,
6811         .fill_modes = drm_helper_probe_single_connector_modes,
6812         .destroy = amdgpu_dm_connector_destroy,
6813         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6814         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6815         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6816         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6817         .late_register = amdgpu_dm_connector_late_register,
6818         .early_unregister = amdgpu_dm_connector_unregister
6819 };
6820
6821 static int get_modes(struct drm_connector *connector)
6822 {
6823         return amdgpu_dm_connector_get_modes(connector);
6824 }
6825
6826 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6827 {
6828         struct dc_sink_init_data init_params = {
6829                         .link = aconnector->dc_link,
6830                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6831         };
6832         struct edid *edid;
6833
6834         if (!aconnector->base.edid_blob_ptr) {
6835                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6836                                 aconnector->base.name);
6837
6838                 aconnector->base.force = DRM_FORCE_OFF;
6839                 aconnector->base.override_edid = false;
6840                 return;
6841         }
6842
6843         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6844
6845         aconnector->edid = edid;
6846
6847         aconnector->dc_em_sink = dc_link_add_remote_sink(
6848                 aconnector->dc_link,
6849                 (uint8_t *)edid,
6850                 (edid->extensions + 1) * EDID_LENGTH,
6851                 &init_params);
6852
6853         if (aconnector->base.force == DRM_FORCE_ON) {
6854                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6855                 aconnector->dc_link->local_sink :
6856                 aconnector->dc_em_sink;
6857                 dc_sink_retain(aconnector->dc_sink);
6858         }
6859 }
6860
6861 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6862 {
6863         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6864
6865         /*
6866          * In case of headless boot with force on for DP managed connector
6867          * Those settings have to be != 0 to get initial modeset
6868          */
6869         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6870                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6871                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6872         }
6873
6874
6875         aconnector->base.override_edid = true;
6876         create_eml_sink(aconnector);
6877 }
6878
6879 static struct dc_stream_state *
6880 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6881                                 const struct drm_display_mode *drm_mode,
6882                                 const struct dm_connector_state *dm_state,
6883                                 const struct dc_stream_state *old_stream)
6884 {
6885         struct drm_connector *connector = &aconnector->base;
6886         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6887         struct dc_stream_state *stream;
6888         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6889         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6890         enum dc_status dc_result = DC_OK;
6891
6892         do {
6893                 stream = create_stream_for_sink(aconnector, drm_mode,
6894                                                 dm_state, old_stream,
6895                                                 requested_bpc);
6896                 if (stream == NULL) {
6897                         DRM_ERROR("Failed to create stream for sink!\n");
6898                         break;
6899                 }
6900
6901                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6902
6903                 if (dc_result != DC_OK) {
6904                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6905                                       drm_mode->hdisplay,
6906                                       drm_mode->vdisplay,
6907                                       drm_mode->clock,
6908                                       dc_result,
6909                                       dc_status_to_str(dc_result));
6910
6911                         dc_stream_release(stream);
6912                         stream = NULL;
6913                         requested_bpc -= 2; /* lower bpc to retry validation */
6914                 }
6915
6916         } while (stream == NULL && requested_bpc >= 6);
6917
6918         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6919                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6920
6921                 aconnector->force_yuv420_output = true;
6922                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6923                                                 dm_state, old_stream);
6924                 aconnector->force_yuv420_output = false;
6925         }
6926
6927         return stream;
6928 }
6929
6930 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6931                                    struct drm_display_mode *mode)
6932 {
6933         int result = MODE_ERROR;
6934         struct dc_sink *dc_sink;
6935         /* TODO: Unhardcode stream count */
6936         struct dc_stream_state *stream;
6937         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6938
6939         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6940                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6941                 return result;
6942
6943         /*
6944          * Only run this the first time mode_valid is called to initilialize
6945          * EDID mgmt
6946          */
6947         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6948                 !aconnector->dc_em_sink)
6949                 handle_edid_mgmt(aconnector);
6950
6951         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6952
6953         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6954                                 aconnector->base.force != DRM_FORCE_ON) {
6955                 DRM_ERROR("dc_sink is NULL!\n");
6956                 goto fail;
6957         }
6958
6959         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6960         if (stream) {
6961                 dc_stream_release(stream);
6962                 result = MODE_OK;
6963         }
6964
6965 fail:
6966         /* TODO: error handling*/
6967         return result;
6968 }
6969
6970 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6971                                 struct dc_info_packet *out)
6972 {
6973         struct hdmi_drm_infoframe frame;
6974         unsigned char buf[30]; /* 26 + 4 */
6975         ssize_t len;
6976         int ret, i;
6977
6978         memset(out, 0, sizeof(*out));
6979
6980         if (!state->hdr_output_metadata)
6981                 return 0;
6982
6983         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6984         if (ret)
6985                 return ret;
6986
6987         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6988         if (len < 0)
6989                 return (int)len;
6990
6991         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6992         if (len != 30)
6993                 return -EINVAL;
6994
6995         /* Prepare the infopacket for DC. */
6996         switch (state->connector->connector_type) {
6997         case DRM_MODE_CONNECTOR_HDMIA:
6998                 out->hb0 = 0x87; /* type */
6999                 out->hb1 = 0x01; /* version */
7000                 out->hb2 = 0x1A; /* length */
7001                 out->sb[0] = buf[3]; /* checksum */
7002                 i = 1;
7003                 break;
7004
7005         case DRM_MODE_CONNECTOR_DisplayPort:
7006         case DRM_MODE_CONNECTOR_eDP:
7007                 out->hb0 = 0x00; /* sdp id, zero */
7008                 out->hb1 = 0x87; /* type */
7009                 out->hb2 = 0x1D; /* payload len - 1 */
7010                 out->hb3 = (0x13 << 2); /* sdp version */
7011                 out->sb[0] = 0x01; /* version */
7012                 out->sb[1] = 0x1A; /* length */
7013                 i = 2;
7014                 break;
7015
7016         default:
7017                 return -EINVAL;
7018         }
7019
7020         memcpy(&out->sb[i], &buf[4], 26);
7021         out->valid = true;
7022
7023         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7024                        sizeof(out->sb), false);
7025
7026         return 0;
7027 }
7028
7029 static int
7030 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7031                                  struct drm_atomic_state *state)
7032 {
7033         struct drm_connector_state *new_con_state =
7034                 drm_atomic_get_new_connector_state(state, conn);
7035         struct drm_connector_state *old_con_state =
7036                 drm_atomic_get_old_connector_state(state, conn);
7037         struct drm_crtc *crtc = new_con_state->crtc;
7038         struct drm_crtc_state *new_crtc_state;
7039         int ret;
7040
7041         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7042
7043         if (!crtc)
7044                 return 0;
7045
7046         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7047                 struct dc_info_packet hdr_infopacket;
7048
7049                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7050                 if (ret)
7051                         return ret;
7052
7053                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7054                 if (IS_ERR(new_crtc_state))
7055                         return PTR_ERR(new_crtc_state);
7056
7057                 /*
7058                  * DC considers the stream backends changed if the
7059                  * static metadata changes. Forcing the modeset also
7060                  * gives a simple way for userspace to switch from
7061                  * 8bpc to 10bpc when setting the metadata to enter
7062                  * or exit HDR.
7063                  *
7064                  * Changing the static metadata after it's been
7065                  * set is permissible, however. So only force a
7066                  * modeset if we're entering or exiting HDR.
7067                  */
7068                 new_crtc_state->mode_changed =
7069                         !old_con_state->hdr_output_metadata ||
7070                         !new_con_state->hdr_output_metadata;
7071         }
7072
7073         return 0;
7074 }
7075
7076 static const struct drm_connector_helper_funcs
7077 amdgpu_dm_connector_helper_funcs = {
7078         /*
7079          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7080          * modes will be filtered by drm_mode_validate_size(), and those modes
7081          * are missing after user start lightdm. So we need to renew modes list.
7082          * in get_modes call back, not just return the modes count
7083          */
7084         .get_modes = get_modes,
7085         .mode_valid = amdgpu_dm_connector_mode_valid,
7086         .atomic_check = amdgpu_dm_connector_atomic_check,
7087 };
7088
7089 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7090 {
7091 }
7092
7093 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7094 {
7095         struct drm_atomic_state *state = new_crtc_state->state;
7096         struct drm_plane *plane;
7097         int num_active = 0;
7098
7099         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7100                 struct drm_plane_state *new_plane_state;
7101
7102                 /* Cursor planes are "fake". */
7103                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7104                         continue;
7105
7106                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7107
7108                 if (!new_plane_state) {
7109                         /*
7110                          * The plane is enable on the CRTC and hasn't changed
7111                          * state. This means that it previously passed
7112                          * validation and is therefore enabled.
7113                          */
7114                         num_active += 1;
7115                         continue;
7116                 }
7117
7118                 /* We need a framebuffer to be considered enabled. */
7119                 num_active += (new_plane_state->fb != NULL);
7120         }
7121
7122         return num_active;
7123 }
7124
7125 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7126                                          struct drm_crtc_state *new_crtc_state)
7127 {
7128         struct dm_crtc_state *dm_new_crtc_state =
7129                 to_dm_crtc_state(new_crtc_state);
7130
7131         dm_new_crtc_state->active_planes = 0;
7132
7133         if (!dm_new_crtc_state->stream)
7134                 return;
7135
7136         dm_new_crtc_state->active_planes =
7137                 count_crtc_active_planes(new_crtc_state);
7138 }
7139
7140 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7141                                        struct drm_atomic_state *state)
7142 {
7143         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7144                                                                           crtc);
7145         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7146         struct dc *dc = adev->dm.dc;
7147         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7148         int ret = -EINVAL;
7149
7150         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7151
7152         dm_update_crtc_active_planes(crtc, crtc_state);
7153
7154         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7155                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7156                 return ret;
7157         }
7158
7159         /*
7160          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7161          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7162          * planes are disabled, which is not supported by the hardware. And there is legacy
7163          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7164          */
7165         if (crtc_state->enable &&
7166             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7167                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7168                 return -EINVAL;
7169         }
7170
7171         /* In some use cases, like reset, no stream is attached */
7172         if (!dm_crtc_state->stream)
7173                 return 0;
7174
7175         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7176                 return 0;
7177
7178         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7179         return ret;
7180 }
7181
7182 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7183                                       const struct drm_display_mode *mode,
7184                                       struct drm_display_mode *adjusted_mode)
7185 {
7186         return true;
7187 }
7188
7189 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7190         .disable = dm_crtc_helper_disable,
7191         .atomic_check = dm_crtc_helper_atomic_check,
7192         .mode_fixup = dm_crtc_helper_mode_fixup,
7193         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7194 };
7195
7196 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7197 {
7198
7199 }
7200
7201 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7202 {
7203         switch (display_color_depth) {
7204                 case COLOR_DEPTH_666:
7205                         return 6;
7206                 case COLOR_DEPTH_888:
7207                         return 8;
7208                 case COLOR_DEPTH_101010:
7209                         return 10;
7210                 case COLOR_DEPTH_121212:
7211                         return 12;
7212                 case COLOR_DEPTH_141414:
7213                         return 14;
7214                 case COLOR_DEPTH_161616:
7215                         return 16;
7216                 default:
7217                         break;
7218                 }
7219         return 0;
7220 }
7221
7222 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7223                                           struct drm_crtc_state *crtc_state,
7224                                           struct drm_connector_state *conn_state)
7225 {
7226         struct drm_atomic_state *state = crtc_state->state;
7227         struct drm_connector *connector = conn_state->connector;
7228         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7229         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7230         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7231         struct drm_dp_mst_topology_mgr *mst_mgr;
7232         struct drm_dp_mst_port *mst_port;
7233         enum dc_color_depth color_depth;
7234         int clock, bpp = 0;
7235         bool is_y420 = false;
7236
7237         if (!aconnector->port || !aconnector->dc_sink)
7238                 return 0;
7239
7240         mst_port = aconnector->port;
7241         mst_mgr = &aconnector->mst_port->mst_mgr;
7242
7243         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7244                 return 0;
7245
7246         if (!state->duplicated) {
7247                 int max_bpc = conn_state->max_requested_bpc;
7248                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7249                                 aconnector->force_yuv420_output;
7250                 color_depth = convert_color_depth_from_display_info(connector,
7251                                                                     is_y420,
7252                                                                     max_bpc);
7253                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7254                 clock = adjusted_mode->clock;
7255                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7256         }
7257         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7258                                                                            mst_mgr,
7259                                                                            mst_port,
7260                                                                            dm_new_connector_state->pbn,
7261                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7262         if (dm_new_connector_state->vcpi_slots < 0) {
7263                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7264                 return dm_new_connector_state->vcpi_slots;
7265         }
7266         return 0;
7267 }
7268
7269 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7270         .disable = dm_encoder_helper_disable,
7271         .atomic_check = dm_encoder_helper_atomic_check
7272 };
7273
7274 #if defined(CONFIG_DRM_AMD_DC_DCN)
7275 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7276                                             struct dc_state *dc_state,
7277                                             struct dsc_mst_fairness_vars *vars)
7278 {
7279         struct dc_stream_state *stream = NULL;
7280         struct drm_connector *connector;
7281         struct drm_connector_state *new_con_state;
7282         struct amdgpu_dm_connector *aconnector;
7283         struct dm_connector_state *dm_conn_state;
7284         int i, j;
7285         int vcpi, pbn_div, pbn, slot_num = 0;
7286
7287         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7288
7289                 aconnector = to_amdgpu_dm_connector(connector);
7290
7291                 if (!aconnector->port)
7292                         continue;
7293
7294                 if (!new_con_state || !new_con_state->crtc)
7295                         continue;
7296
7297                 dm_conn_state = to_dm_connector_state(new_con_state);
7298
7299                 for (j = 0; j < dc_state->stream_count; j++) {
7300                         stream = dc_state->streams[j];
7301                         if (!stream)
7302                                 continue;
7303
7304                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7305                                 break;
7306
7307                         stream = NULL;
7308                 }
7309
7310                 if (!stream)
7311                         continue;
7312
7313                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7314                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7315                 for (j = 0; j < dc_state->stream_count; j++) {
7316                         if (vars[j].aconnector == aconnector) {
7317                                 pbn = vars[j].pbn;
7318                                 break;
7319                         }
7320                 }
7321
7322                 if (j == dc_state->stream_count)
7323                         continue;
7324
7325                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7326
7327                 if (stream->timing.flags.DSC != 1) {
7328                         dm_conn_state->pbn = pbn;
7329                         dm_conn_state->vcpi_slots = slot_num;
7330
7331                         drm_dp_mst_atomic_enable_dsc(state,
7332                                                      aconnector->port,
7333                                                      dm_conn_state->pbn,
7334                                                      0,
7335                                                      false);
7336                         continue;
7337                 }
7338
7339                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7340                                                     aconnector->port,
7341                                                     pbn, pbn_div,
7342                                                     true);
7343                 if (vcpi < 0)
7344                         return vcpi;
7345
7346                 dm_conn_state->pbn = pbn;
7347                 dm_conn_state->vcpi_slots = vcpi;
7348         }
7349         return 0;
7350 }
7351 #endif
7352
7353 static void dm_drm_plane_reset(struct drm_plane *plane)
7354 {
7355         struct dm_plane_state *amdgpu_state = NULL;
7356
7357         if (plane->state)
7358                 plane->funcs->atomic_destroy_state(plane, plane->state);
7359
7360         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7361         WARN_ON(amdgpu_state == NULL);
7362
7363         if (amdgpu_state)
7364                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7365 }
7366
7367 static struct drm_plane_state *
7368 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7369 {
7370         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7371
7372         old_dm_plane_state = to_dm_plane_state(plane->state);
7373         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7374         if (!dm_plane_state)
7375                 return NULL;
7376
7377         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7378
7379         if (old_dm_plane_state->dc_state) {
7380                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7381                 dc_plane_state_retain(dm_plane_state->dc_state);
7382         }
7383
7384         return &dm_plane_state->base;
7385 }
7386
7387 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7388                                 struct drm_plane_state *state)
7389 {
7390         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7391
7392         if (dm_plane_state->dc_state)
7393                 dc_plane_state_release(dm_plane_state->dc_state);
7394
7395         drm_atomic_helper_plane_destroy_state(plane, state);
7396 }
7397
7398 static const struct drm_plane_funcs dm_plane_funcs = {
7399         .update_plane   = drm_atomic_helper_update_plane,
7400         .disable_plane  = drm_atomic_helper_disable_plane,
7401         .destroy        = drm_primary_helper_destroy,
7402         .reset = dm_drm_plane_reset,
7403         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7404         .atomic_destroy_state = dm_drm_plane_destroy_state,
7405         .format_mod_supported = dm_plane_format_mod_supported,
7406 };
7407
7408 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7409                                       struct drm_plane_state *new_state)
7410 {
7411         struct amdgpu_framebuffer *afb;
7412         struct drm_gem_object *obj;
7413         struct amdgpu_device *adev;
7414         struct amdgpu_bo *rbo;
7415         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7416         struct list_head list;
7417         struct ttm_validate_buffer tv;
7418         struct ww_acquire_ctx ticket;
7419         uint32_t domain;
7420         int r;
7421
7422         if (!new_state->fb) {
7423                 DRM_DEBUG_KMS("No FB bound\n");
7424                 return 0;
7425         }
7426
7427         afb = to_amdgpu_framebuffer(new_state->fb);
7428         obj = new_state->fb->obj[0];
7429         rbo = gem_to_amdgpu_bo(obj);
7430         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7431         INIT_LIST_HEAD(&list);
7432
7433         tv.bo = &rbo->tbo;
7434         tv.num_shared = 1;
7435         list_add(&tv.head, &list);
7436
7437         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7438         if (r) {
7439                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7440                 return r;
7441         }
7442
7443         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7444                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7445         else
7446                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7447
7448         r = amdgpu_bo_pin(rbo, domain);
7449         if (unlikely(r != 0)) {
7450                 if (r != -ERESTARTSYS)
7451                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7452                 ttm_eu_backoff_reservation(&ticket, &list);
7453                 return r;
7454         }
7455
7456         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7457         if (unlikely(r != 0)) {
7458                 amdgpu_bo_unpin(rbo);
7459                 ttm_eu_backoff_reservation(&ticket, &list);
7460                 DRM_ERROR("%p bind failed\n", rbo);
7461                 return r;
7462         }
7463
7464         ttm_eu_backoff_reservation(&ticket, &list);
7465
7466         afb->address = amdgpu_bo_gpu_offset(rbo);
7467
7468         amdgpu_bo_ref(rbo);
7469
7470         /**
7471          * We don't do surface updates on planes that have been newly created,
7472          * but we also don't have the afb->address during atomic check.
7473          *
7474          * Fill in buffer attributes depending on the address here, but only on
7475          * newly created planes since they're not being used by DC yet and this
7476          * won't modify global state.
7477          */
7478         dm_plane_state_old = to_dm_plane_state(plane->state);
7479         dm_plane_state_new = to_dm_plane_state(new_state);
7480
7481         if (dm_plane_state_new->dc_state &&
7482             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7483                 struct dc_plane_state *plane_state =
7484                         dm_plane_state_new->dc_state;
7485                 bool force_disable_dcc = !plane_state->dcc.enable;
7486
7487                 fill_plane_buffer_attributes(
7488                         adev, afb, plane_state->format, plane_state->rotation,
7489                         afb->tiling_flags,
7490                         &plane_state->tiling_info, &plane_state->plane_size,
7491                         &plane_state->dcc, &plane_state->address,
7492                         afb->tmz_surface, force_disable_dcc);
7493         }
7494
7495         return 0;
7496 }
7497
7498 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7499                                        struct drm_plane_state *old_state)
7500 {
7501         struct amdgpu_bo *rbo;
7502         int r;
7503
7504         if (!old_state->fb)
7505                 return;
7506
7507         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7508         r = amdgpu_bo_reserve(rbo, false);
7509         if (unlikely(r)) {
7510                 DRM_ERROR("failed to reserve rbo before unpin\n");
7511                 return;
7512         }
7513
7514         amdgpu_bo_unpin(rbo);
7515         amdgpu_bo_unreserve(rbo);
7516         amdgpu_bo_unref(&rbo);
7517 }
7518
7519 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7520                                        struct drm_crtc_state *new_crtc_state)
7521 {
7522         struct drm_framebuffer *fb = state->fb;
7523         int min_downscale, max_upscale;
7524         int min_scale = 0;
7525         int max_scale = INT_MAX;
7526
7527         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7528         if (fb && state->crtc) {
7529                 /* Validate viewport to cover the case when only the position changes */
7530                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7531                         int viewport_width = state->crtc_w;
7532                         int viewport_height = state->crtc_h;
7533
7534                         if (state->crtc_x < 0)
7535                                 viewport_width += state->crtc_x;
7536                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7537                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7538
7539                         if (state->crtc_y < 0)
7540                                 viewport_height += state->crtc_y;
7541                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7542                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7543
7544                         if (viewport_width < 0 || viewport_height < 0) {
7545                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7546                                 return -EINVAL;
7547                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7548                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7549                                 return -EINVAL;
7550                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7551                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7552                                 return -EINVAL;
7553                         }
7554
7555                 }
7556
7557                 /* Get min/max allowed scaling factors from plane caps. */
7558                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7559                                              &min_downscale, &max_upscale);
7560                 /*
7561                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7562                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7563                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7564                  */
7565                 min_scale = (1000 << 16) / max_upscale;
7566                 max_scale = (1000 << 16) / min_downscale;
7567         }
7568
7569         return drm_atomic_helper_check_plane_state(
7570                 state, new_crtc_state, min_scale, max_scale, true, true);
7571 }
7572
7573 static int dm_plane_atomic_check(struct drm_plane *plane,
7574                                  struct drm_atomic_state *state)
7575 {
7576         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7577                                                                                  plane);
7578         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7579         struct dc *dc = adev->dm.dc;
7580         struct dm_plane_state *dm_plane_state;
7581         struct dc_scaling_info scaling_info;
7582         struct drm_crtc_state *new_crtc_state;
7583         int ret;
7584
7585         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7586
7587         dm_plane_state = to_dm_plane_state(new_plane_state);
7588
7589         if (!dm_plane_state->dc_state)
7590                 return 0;
7591
7592         new_crtc_state =
7593                 drm_atomic_get_new_crtc_state(state,
7594                                               new_plane_state->crtc);
7595         if (!new_crtc_state)
7596                 return -EINVAL;
7597
7598         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7599         if (ret)
7600                 return ret;
7601
7602         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7603         if (ret)
7604                 return ret;
7605
7606         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7607                 return 0;
7608
7609         return -EINVAL;
7610 }
7611
7612 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7613                                        struct drm_atomic_state *state)
7614 {
7615         /* Only support async updates on cursor planes. */
7616         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7617                 return -EINVAL;
7618
7619         return 0;
7620 }
7621
7622 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7623                                          struct drm_atomic_state *state)
7624 {
7625         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7626                                                                            plane);
7627         struct drm_plane_state *old_state =
7628                 drm_atomic_get_old_plane_state(state, plane);
7629
7630         trace_amdgpu_dm_atomic_update_cursor(new_state);
7631
7632         swap(plane->state->fb, new_state->fb);
7633
7634         plane->state->src_x = new_state->src_x;
7635         plane->state->src_y = new_state->src_y;
7636         plane->state->src_w = new_state->src_w;
7637         plane->state->src_h = new_state->src_h;
7638         plane->state->crtc_x = new_state->crtc_x;
7639         plane->state->crtc_y = new_state->crtc_y;
7640         plane->state->crtc_w = new_state->crtc_w;
7641         plane->state->crtc_h = new_state->crtc_h;
7642
7643         handle_cursor_update(plane, old_state);
7644 }
7645
7646 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7647         .prepare_fb = dm_plane_helper_prepare_fb,
7648         .cleanup_fb = dm_plane_helper_cleanup_fb,
7649         .atomic_check = dm_plane_atomic_check,
7650         .atomic_async_check = dm_plane_atomic_async_check,
7651         .atomic_async_update = dm_plane_atomic_async_update
7652 };
7653
7654 /*
7655  * TODO: these are currently initialized to rgb formats only.
7656  * For future use cases we should either initialize them dynamically based on
7657  * plane capabilities, or initialize this array to all formats, so internal drm
7658  * check will succeed, and let DC implement proper check
7659  */
7660 static const uint32_t rgb_formats[] = {
7661         DRM_FORMAT_XRGB8888,
7662         DRM_FORMAT_ARGB8888,
7663         DRM_FORMAT_RGBA8888,
7664         DRM_FORMAT_XRGB2101010,
7665         DRM_FORMAT_XBGR2101010,
7666         DRM_FORMAT_ARGB2101010,
7667         DRM_FORMAT_ABGR2101010,
7668         DRM_FORMAT_XRGB16161616,
7669         DRM_FORMAT_XBGR16161616,
7670         DRM_FORMAT_ARGB16161616,
7671         DRM_FORMAT_ABGR16161616,
7672         DRM_FORMAT_XBGR8888,
7673         DRM_FORMAT_ABGR8888,
7674         DRM_FORMAT_RGB565,
7675 };
7676
7677 static const uint32_t overlay_formats[] = {
7678         DRM_FORMAT_XRGB8888,
7679         DRM_FORMAT_ARGB8888,
7680         DRM_FORMAT_RGBA8888,
7681         DRM_FORMAT_XBGR8888,
7682         DRM_FORMAT_ABGR8888,
7683         DRM_FORMAT_RGB565
7684 };
7685
7686 static const u32 cursor_formats[] = {
7687         DRM_FORMAT_ARGB8888
7688 };
7689
7690 static int get_plane_formats(const struct drm_plane *plane,
7691                              const struct dc_plane_cap *plane_cap,
7692                              uint32_t *formats, int max_formats)
7693 {
7694         int i, num_formats = 0;
7695
7696         /*
7697          * TODO: Query support for each group of formats directly from
7698          * DC plane caps. This will require adding more formats to the
7699          * caps list.
7700          */
7701
7702         switch (plane->type) {
7703         case DRM_PLANE_TYPE_PRIMARY:
7704                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7705                         if (num_formats >= max_formats)
7706                                 break;
7707
7708                         formats[num_formats++] = rgb_formats[i];
7709                 }
7710
7711                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7712                         formats[num_formats++] = DRM_FORMAT_NV12;
7713                 if (plane_cap && plane_cap->pixel_format_support.p010)
7714                         formats[num_formats++] = DRM_FORMAT_P010;
7715                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7716                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7717                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7718                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7719                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7720                 }
7721                 break;
7722
7723         case DRM_PLANE_TYPE_OVERLAY:
7724                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7725                         if (num_formats >= max_formats)
7726                                 break;
7727
7728                         formats[num_formats++] = overlay_formats[i];
7729                 }
7730                 break;
7731
7732         case DRM_PLANE_TYPE_CURSOR:
7733                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7734                         if (num_formats >= max_formats)
7735                                 break;
7736
7737                         formats[num_formats++] = cursor_formats[i];
7738                 }
7739                 break;
7740         }
7741
7742         return num_formats;
7743 }
7744
7745 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7746                                 struct drm_plane *plane,
7747                                 unsigned long possible_crtcs,
7748                                 const struct dc_plane_cap *plane_cap)
7749 {
7750         uint32_t formats[32];
7751         int num_formats;
7752         int res = -EPERM;
7753         unsigned int supported_rotations;
7754         uint64_t *modifiers = NULL;
7755
7756         num_formats = get_plane_formats(plane, plane_cap, formats,
7757                                         ARRAY_SIZE(formats));
7758
7759         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7760         if (res)
7761                 return res;
7762
7763         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7764                                        &dm_plane_funcs, formats, num_formats,
7765                                        modifiers, plane->type, NULL);
7766         kfree(modifiers);
7767         if (res)
7768                 return res;
7769
7770         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7771             plane_cap && plane_cap->per_pixel_alpha) {
7772                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7773                                           BIT(DRM_MODE_BLEND_PREMULTI);
7774
7775                 drm_plane_create_alpha_property(plane);
7776                 drm_plane_create_blend_mode_property(plane, blend_caps);
7777         }
7778
7779         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7780             plane_cap &&
7781             (plane_cap->pixel_format_support.nv12 ||
7782              plane_cap->pixel_format_support.p010)) {
7783                 /* This only affects YUV formats. */
7784                 drm_plane_create_color_properties(
7785                         plane,
7786                         BIT(DRM_COLOR_YCBCR_BT601) |
7787                         BIT(DRM_COLOR_YCBCR_BT709) |
7788                         BIT(DRM_COLOR_YCBCR_BT2020),
7789                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7790                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7791                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7792         }
7793
7794         supported_rotations =
7795                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7796                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7797
7798         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7799             plane->type != DRM_PLANE_TYPE_CURSOR)
7800                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7801                                                    supported_rotations);
7802
7803         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7804
7805         /* Create (reset) the plane state */
7806         if (plane->funcs->reset)
7807                 plane->funcs->reset(plane);
7808
7809         return 0;
7810 }
7811
7812 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7813                                struct drm_plane *plane,
7814                                uint32_t crtc_index)
7815 {
7816         struct amdgpu_crtc *acrtc = NULL;
7817         struct drm_plane *cursor_plane;
7818
7819         int res = -ENOMEM;
7820
7821         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7822         if (!cursor_plane)
7823                 goto fail;
7824
7825         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7826         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7827
7828         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7829         if (!acrtc)
7830                 goto fail;
7831
7832         res = drm_crtc_init_with_planes(
7833                         dm->ddev,
7834                         &acrtc->base,
7835                         plane,
7836                         cursor_plane,
7837                         &amdgpu_dm_crtc_funcs, NULL);
7838
7839         if (res)
7840                 goto fail;
7841
7842         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7843
7844         /* Create (reset) the plane state */
7845         if (acrtc->base.funcs->reset)
7846                 acrtc->base.funcs->reset(&acrtc->base);
7847
7848         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7849         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7850
7851         acrtc->crtc_id = crtc_index;
7852         acrtc->base.enabled = false;
7853         acrtc->otg_inst = -1;
7854
7855         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7856         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7857                                    true, MAX_COLOR_LUT_ENTRIES);
7858         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7859
7860         return 0;
7861
7862 fail:
7863         kfree(acrtc);
7864         kfree(cursor_plane);
7865         return res;
7866 }
7867
7868
7869 static int to_drm_connector_type(enum signal_type st)
7870 {
7871         switch (st) {
7872         case SIGNAL_TYPE_HDMI_TYPE_A:
7873                 return DRM_MODE_CONNECTOR_HDMIA;
7874         case SIGNAL_TYPE_EDP:
7875                 return DRM_MODE_CONNECTOR_eDP;
7876         case SIGNAL_TYPE_LVDS:
7877                 return DRM_MODE_CONNECTOR_LVDS;
7878         case SIGNAL_TYPE_RGB:
7879                 return DRM_MODE_CONNECTOR_VGA;
7880         case SIGNAL_TYPE_DISPLAY_PORT:
7881         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7882                 return DRM_MODE_CONNECTOR_DisplayPort;
7883         case SIGNAL_TYPE_DVI_DUAL_LINK:
7884         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7885                 return DRM_MODE_CONNECTOR_DVID;
7886         case SIGNAL_TYPE_VIRTUAL:
7887                 return DRM_MODE_CONNECTOR_VIRTUAL;
7888
7889         default:
7890                 return DRM_MODE_CONNECTOR_Unknown;
7891         }
7892 }
7893
7894 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7895 {
7896         struct drm_encoder *encoder;
7897
7898         /* There is only one encoder per connector */
7899         drm_connector_for_each_possible_encoder(connector, encoder)
7900                 return encoder;
7901
7902         return NULL;
7903 }
7904
7905 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7906 {
7907         struct drm_encoder *encoder;
7908         struct amdgpu_encoder *amdgpu_encoder;
7909
7910         encoder = amdgpu_dm_connector_to_encoder(connector);
7911
7912         if (encoder == NULL)
7913                 return;
7914
7915         amdgpu_encoder = to_amdgpu_encoder(encoder);
7916
7917         amdgpu_encoder->native_mode.clock = 0;
7918
7919         if (!list_empty(&connector->probed_modes)) {
7920                 struct drm_display_mode *preferred_mode = NULL;
7921
7922                 list_for_each_entry(preferred_mode,
7923                                     &connector->probed_modes,
7924                                     head) {
7925                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7926                                 amdgpu_encoder->native_mode = *preferred_mode;
7927
7928                         break;
7929                 }
7930
7931         }
7932 }
7933
7934 static struct drm_display_mode *
7935 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7936                              char *name,
7937                              int hdisplay, int vdisplay)
7938 {
7939         struct drm_device *dev = encoder->dev;
7940         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7941         struct drm_display_mode *mode = NULL;
7942         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7943
7944         mode = drm_mode_duplicate(dev, native_mode);
7945
7946         if (mode == NULL)
7947                 return NULL;
7948
7949         mode->hdisplay = hdisplay;
7950         mode->vdisplay = vdisplay;
7951         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7952         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7953
7954         return mode;
7955
7956 }
7957
7958 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7959                                                  struct drm_connector *connector)
7960 {
7961         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7962         struct drm_display_mode *mode = NULL;
7963         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7964         struct amdgpu_dm_connector *amdgpu_dm_connector =
7965                                 to_amdgpu_dm_connector(connector);
7966         int i;
7967         int n;
7968         struct mode_size {
7969                 char name[DRM_DISPLAY_MODE_LEN];
7970                 int w;
7971                 int h;
7972         } common_modes[] = {
7973                 {  "640x480",  640,  480},
7974                 {  "800x600",  800,  600},
7975                 { "1024x768", 1024,  768},
7976                 { "1280x720", 1280,  720},
7977                 { "1280x800", 1280,  800},
7978                 {"1280x1024", 1280, 1024},
7979                 { "1440x900", 1440,  900},
7980                 {"1680x1050", 1680, 1050},
7981                 {"1600x1200", 1600, 1200},
7982                 {"1920x1080", 1920, 1080},
7983                 {"1920x1200", 1920, 1200}
7984         };
7985
7986         n = ARRAY_SIZE(common_modes);
7987
7988         for (i = 0; i < n; i++) {
7989                 struct drm_display_mode *curmode = NULL;
7990                 bool mode_existed = false;
7991
7992                 if (common_modes[i].w > native_mode->hdisplay ||
7993                     common_modes[i].h > native_mode->vdisplay ||
7994                    (common_modes[i].w == native_mode->hdisplay &&
7995                     common_modes[i].h == native_mode->vdisplay))
7996                         continue;
7997
7998                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7999                         if (common_modes[i].w == curmode->hdisplay &&
8000                             common_modes[i].h == curmode->vdisplay) {
8001                                 mode_existed = true;
8002                                 break;
8003                         }
8004                 }
8005
8006                 if (mode_existed)
8007                         continue;
8008
8009                 mode = amdgpu_dm_create_common_mode(encoder,
8010                                 common_modes[i].name, common_modes[i].w,
8011                                 common_modes[i].h);
8012                 drm_mode_probed_add(connector, mode);
8013                 amdgpu_dm_connector->num_modes++;
8014         }
8015 }
8016
8017 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8018 {
8019         struct drm_encoder *encoder;
8020         struct amdgpu_encoder *amdgpu_encoder;
8021         const struct drm_display_mode *native_mode;
8022
8023         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8024             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8025                 return;
8026
8027         encoder = amdgpu_dm_connector_to_encoder(connector);
8028         if (!encoder)
8029                 return;
8030
8031         amdgpu_encoder = to_amdgpu_encoder(encoder);
8032
8033         native_mode = &amdgpu_encoder->native_mode;
8034         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8035                 return;
8036
8037         drm_connector_set_panel_orientation_with_quirk(connector,
8038                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8039                                                        native_mode->hdisplay,
8040                                                        native_mode->vdisplay);
8041 }
8042
8043 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8044                                               struct edid *edid)
8045 {
8046         struct amdgpu_dm_connector *amdgpu_dm_connector =
8047                         to_amdgpu_dm_connector(connector);
8048
8049         if (edid) {
8050                 /* empty probed_modes */
8051                 INIT_LIST_HEAD(&connector->probed_modes);
8052                 amdgpu_dm_connector->num_modes =
8053                                 drm_add_edid_modes(connector, edid);
8054
8055                 /* sorting the probed modes before calling function
8056                  * amdgpu_dm_get_native_mode() since EDID can have
8057                  * more than one preferred mode. The modes that are
8058                  * later in the probed mode list could be of higher
8059                  * and preferred resolution. For example, 3840x2160
8060                  * resolution in base EDID preferred timing and 4096x2160
8061                  * preferred resolution in DID extension block later.
8062                  */
8063                 drm_mode_sort(&connector->probed_modes);
8064                 amdgpu_dm_get_native_mode(connector);
8065
8066                 /* Freesync capabilities are reset by calling
8067                  * drm_add_edid_modes() and need to be
8068                  * restored here.
8069                  */
8070                 amdgpu_dm_update_freesync_caps(connector, edid);
8071
8072                 amdgpu_set_panel_orientation(connector);
8073         } else {
8074                 amdgpu_dm_connector->num_modes = 0;
8075         }
8076 }
8077
8078 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8079                               struct drm_display_mode *mode)
8080 {
8081         struct drm_display_mode *m;
8082
8083         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8084                 if (drm_mode_equal(m, mode))
8085                         return true;
8086         }
8087
8088         return false;
8089 }
8090
8091 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8092 {
8093         const struct drm_display_mode *m;
8094         struct drm_display_mode *new_mode;
8095         uint i;
8096         uint32_t new_modes_count = 0;
8097
8098         /* Standard FPS values
8099          *
8100          * 23.976       - TV/NTSC
8101          * 24           - Cinema
8102          * 25           - TV/PAL
8103          * 29.97        - TV/NTSC
8104          * 30           - TV/NTSC
8105          * 48           - Cinema HFR
8106          * 50           - TV/PAL
8107          * 60           - Commonly used
8108          * 48,72,96,120 - Multiples of 24
8109          */
8110         static const uint32_t common_rates[] = {
8111                 23976, 24000, 25000, 29970, 30000,
8112                 48000, 50000, 60000, 72000, 96000, 120000
8113         };
8114
8115         /*
8116          * Find mode with highest refresh rate with the same resolution
8117          * as the preferred mode. Some monitors report a preferred mode
8118          * with lower resolution than the highest refresh rate supported.
8119          */
8120
8121         m = get_highest_refresh_rate_mode(aconnector, true);
8122         if (!m)
8123                 return 0;
8124
8125         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8126                 uint64_t target_vtotal, target_vtotal_diff;
8127                 uint64_t num, den;
8128
8129                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8130                         continue;
8131
8132                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8133                     common_rates[i] > aconnector->max_vfreq * 1000)
8134                         continue;
8135
8136                 num = (unsigned long long)m->clock * 1000 * 1000;
8137                 den = common_rates[i] * (unsigned long long)m->htotal;
8138                 target_vtotal = div_u64(num, den);
8139                 target_vtotal_diff = target_vtotal - m->vtotal;
8140
8141                 /* Check for illegal modes */
8142                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8143                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8144                     m->vtotal + target_vtotal_diff < m->vsync_end)
8145                         continue;
8146
8147                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8148                 if (!new_mode)
8149                         goto out;
8150
8151                 new_mode->vtotal += (u16)target_vtotal_diff;
8152                 new_mode->vsync_start += (u16)target_vtotal_diff;
8153                 new_mode->vsync_end += (u16)target_vtotal_diff;
8154                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8155                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8156
8157                 if (!is_duplicate_mode(aconnector, new_mode)) {
8158                         drm_mode_probed_add(&aconnector->base, new_mode);
8159                         new_modes_count += 1;
8160                 } else
8161                         drm_mode_destroy(aconnector->base.dev, new_mode);
8162         }
8163  out:
8164         return new_modes_count;
8165 }
8166
8167 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8168                                                    struct edid *edid)
8169 {
8170         struct amdgpu_dm_connector *amdgpu_dm_connector =
8171                 to_amdgpu_dm_connector(connector);
8172
8173         if (!(amdgpu_freesync_vid_mode && edid))
8174                 return;
8175
8176         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8177                 amdgpu_dm_connector->num_modes +=
8178                         add_fs_modes(amdgpu_dm_connector);
8179 }
8180
8181 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8182 {
8183         struct amdgpu_dm_connector *amdgpu_dm_connector =
8184                         to_amdgpu_dm_connector(connector);
8185         struct drm_encoder *encoder;
8186         struct edid *edid = amdgpu_dm_connector->edid;
8187
8188         encoder = amdgpu_dm_connector_to_encoder(connector);
8189
8190         if (!drm_edid_is_valid(edid)) {
8191                 amdgpu_dm_connector->num_modes =
8192                                 drm_add_modes_noedid(connector, 640, 480);
8193         } else {
8194                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8195                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8196                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8197         }
8198         amdgpu_dm_fbc_init(connector);
8199
8200         return amdgpu_dm_connector->num_modes;
8201 }
8202
8203 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8204                                      struct amdgpu_dm_connector *aconnector,
8205                                      int connector_type,
8206                                      struct dc_link *link,
8207                                      int link_index)
8208 {
8209         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8210
8211         /*
8212          * Some of the properties below require access to state, like bpc.
8213          * Allocate some default initial connector state with our reset helper.
8214          */
8215         if (aconnector->base.funcs->reset)
8216                 aconnector->base.funcs->reset(&aconnector->base);
8217
8218         aconnector->connector_id = link_index;
8219         aconnector->dc_link = link;
8220         aconnector->base.interlace_allowed = false;
8221         aconnector->base.doublescan_allowed = false;
8222         aconnector->base.stereo_allowed = false;
8223         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8224         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8225         aconnector->audio_inst = -1;
8226         mutex_init(&aconnector->hpd_lock);
8227
8228         /*
8229          * configure support HPD hot plug connector_>polled default value is 0
8230          * which means HPD hot plug not supported
8231          */
8232         switch (connector_type) {
8233         case DRM_MODE_CONNECTOR_HDMIA:
8234                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8235                 aconnector->base.ycbcr_420_allowed =
8236                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8237                 break;
8238         case DRM_MODE_CONNECTOR_DisplayPort:
8239                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8240                 if (link->is_dig_mapping_flexible &&
8241                     link->dc->res_pool->funcs->link_encs_assign) {
8242                         link->link_enc =
8243                                 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8244                         if (!link->link_enc)
8245                                 link->link_enc =
8246                                         link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8247                 }
8248
8249                 if (link->link_enc)
8250                         aconnector->base.ycbcr_420_allowed =
8251                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8252                 break;
8253         case DRM_MODE_CONNECTOR_DVID:
8254                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8255                 break;
8256         default:
8257                 break;
8258         }
8259
8260         drm_object_attach_property(&aconnector->base.base,
8261                                 dm->ddev->mode_config.scaling_mode_property,
8262                                 DRM_MODE_SCALE_NONE);
8263
8264         drm_object_attach_property(&aconnector->base.base,
8265                                 adev->mode_info.underscan_property,
8266                                 UNDERSCAN_OFF);
8267         drm_object_attach_property(&aconnector->base.base,
8268                                 adev->mode_info.underscan_hborder_property,
8269                                 0);
8270         drm_object_attach_property(&aconnector->base.base,
8271                                 adev->mode_info.underscan_vborder_property,
8272                                 0);
8273
8274         if (!aconnector->mst_port)
8275                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8276
8277         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8278         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8279         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8280
8281         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8282             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8283                 drm_object_attach_property(&aconnector->base.base,
8284                                 adev->mode_info.abm_level_property, 0);
8285         }
8286
8287         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8288             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8289             connector_type == DRM_MODE_CONNECTOR_eDP) {
8290                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8291
8292                 if (!aconnector->mst_port)
8293                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8294
8295 #ifdef CONFIG_DRM_AMD_DC_HDCP
8296                 if (adev->dm.hdcp_workqueue)
8297                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8298 #endif
8299         }
8300 }
8301
8302 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8303                               struct i2c_msg *msgs, int num)
8304 {
8305         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8306         struct ddc_service *ddc_service = i2c->ddc_service;
8307         struct i2c_command cmd;
8308         int i;
8309         int result = -EIO;
8310
8311         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8312
8313         if (!cmd.payloads)
8314                 return result;
8315
8316         cmd.number_of_payloads = num;
8317         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8318         cmd.speed = 100;
8319
8320         for (i = 0; i < num; i++) {
8321                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8322                 cmd.payloads[i].address = msgs[i].addr;
8323                 cmd.payloads[i].length = msgs[i].len;
8324                 cmd.payloads[i].data = msgs[i].buf;
8325         }
8326
8327         if (dc_submit_i2c(
8328                         ddc_service->ctx->dc,
8329                         ddc_service->ddc_pin->hw_info.ddc_channel,
8330                         &cmd))
8331                 result = num;
8332
8333         kfree(cmd.payloads);
8334         return result;
8335 }
8336
8337 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8338 {
8339         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8340 }
8341
8342 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8343         .master_xfer = amdgpu_dm_i2c_xfer,
8344         .functionality = amdgpu_dm_i2c_func,
8345 };
8346
8347 static struct amdgpu_i2c_adapter *
8348 create_i2c(struct ddc_service *ddc_service,
8349            int link_index,
8350            int *res)
8351 {
8352         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8353         struct amdgpu_i2c_adapter *i2c;
8354
8355         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8356         if (!i2c)
8357                 return NULL;
8358         i2c->base.owner = THIS_MODULE;
8359         i2c->base.class = I2C_CLASS_DDC;
8360         i2c->base.dev.parent = &adev->pdev->dev;
8361         i2c->base.algo = &amdgpu_dm_i2c_algo;
8362         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8363         i2c_set_adapdata(&i2c->base, i2c);
8364         i2c->ddc_service = ddc_service;
8365         if (i2c->ddc_service->ddc_pin)
8366                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8367
8368         return i2c;
8369 }
8370
8371
8372 /*
8373  * Note: this function assumes that dc_link_detect() was called for the
8374  * dc_link which will be represented by this aconnector.
8375  */
8376 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8377                                     struct amdgpu_dm_connector *aconnector,
8378                                     uint32_t link_index,
8379                                     struct amdgpu_encoder *aencoder)
8380 {
8381         int res = 0;
8382         int connector_type;
8383         struct dc *dc = dm->dc;
8384         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8385         struct amdgpu_i2c_adapter *i2c;
8386
8387         link->priv = aconnector;
8388
8389         DRM_DEBUG_DRIVER("%s()\n", __func__);
8390
8391         i2c = create_i2c(link->ddc, link->link_index, &res);
8392         if (!i2c) {
8393                 DRM_ERROR("Failed to create i2c adapter data\n");
8394                 return -ENOMEM;
8395         }
8396
8397         aconnector->i2c = i2c;
8398         res = i2c_add_adapter(&i2c->base);
8399
8400         if (res) {
8401                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8402                 goto out_free;
8403         }
8404
8405         connector_type = to_drm_connector_type(link->connector_signal);
8406
8407         res = drm_connector_init_with_ddc(
8408                         dm->ddev,
8409                         &aconnector->base,
8410                         &amdgpu_dm_connector_funcs,
8411                         connector_type,
8412                         &i2c->base);
8413
8414         if (res) {
8415                 DRM_ERROR("connector_init failed\n");
8416                 aconnector->connector_id = -1;
8417                 goto out_free;
8418         }
8419
8420         drm_connector_helper_add(
8421                         &aconnector->base,
8422                         &amdgpu_dm_connector_helper_funcs);
8423
8424         amdgpu_dm_connector_init_helper(
8425                 dm,
8426                 aconnector,
8427                 connector_type,
8428                 link,
8429                 link_index);
8430
8431         drm_connector_attach_encoder(
8432                 &aconnector->base, &aencoder->base);
8433
8434         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8435                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8436                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8437
8438 out_free:
8439         if (res) {
8440                 kfree(i2c);
8441                 aconnector->i2c = NULL;
8442         }
8443         return res;
8444 }
8445
8446 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8447 {
8448         switch (adev->mode_info.num_crtc) {
8449         case 1:
8450                 return 0x1;
8451         case 2:
8452                 return 0x3;
8453         case 3:
8454                 return 0x7;
8455         case 4:
8456                 return 0xf;
8457         case 5:
8458                 return 0x1f;
8459         case 6:
8460         default:
8461                 return 0x3f;
8462         }
8463 }
8464
8465 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8466                                   struct amdgpu_encoder *aencoder,
8467                                   uint32_t link_index)
8468 {
8469         struct amdgpu_device *adev = drm_to_adev(dev);
8470
8471         int res = drm_encoder_init(dev,
8472                                    &aencoder->base,
8473                                    &amdgpu_dm_encoder_funcs,
8474                                    DRM_MODE_ENCODER_TMDS,
8475                                    NULL);
8476
8477         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8478
8479         if (!res)
8480                 aencoder->encoder_id = link_index;
8481         else
8482                 aencoder->encoder_id = -1;
8483
8484         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8485
8486         return res;
8487 }
8488
8489 static void manage_dm_interrupts(struct amdgpu_device *adev,
8490                                  struct amdgpu_crtc *acrtc,
8491                                  bool enable)
8492 {
8493         /*
8494          * We have no guarantee that the frontend index maps to the same
8495          * backend index - some even map to more than one.
8496          *
8497          * TODO: Use a different interrupt or check DC itself for the mapping.
8498          */
8499         int irq_type =
8500                 amdgpu_display_crtc_idx_to_irq_type(
8501                         adev,
8502                         acrtc->crtc_id);
8503
8504         if (enable) {
8505                 drm_crtc_vblank_on(&acrtc->base);
8506                 amdgpu_irq_get(
8507                         adev,
8508                         &adev->pageflip_irq,
8509                         irq_type);
8510 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8511                 amdgpu_irq_get(
8512                         adev,
8513                         &adev->vline0_irq,
8514                         irq_type);
8515 #endif
8516         } else {
8517 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8518                 amdgpu_irq_put(
8519                         adev,
8520                         &adev->vline0_irq,
8521                         irq_type);
8522 #endif
8523                 amdgpu_irq_put(
8524                         adev,
8525                         &adev->pageflip_irq,
8526                         irq_type);
8527                 drm_crtc_vblank_off(&acrtc->base);
8528         }
8529 }
8530
8531 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8532                                       struct amdgpu_crtc *acrtc)
8533 {
8534         int irq_type =
8535                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8536
8537         /**
8538          * This reads the current state for the IRQ and force reapplies
8539          * the setting to hardware.
8540          */
8541         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8542 }
8543
8544 static bool
8545 is_scaling_state_different(const struct dm_connector_state *dm_state,
8546                            const struct dm_connector_state *old_dm_state)
8547 {
8548         if (dm_state->scaling != old_dm_state->scaling)
8549                 return true;
8550         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8551                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8552                         return true;
8553         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8554                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8555                         return true;
8556         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8557                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8558                 return true;
8559         return false;
8560 }
8561
8562 #ifdef CONFIG_DRM_AMD_DC_HDCP
8563 static bool is_content_protection_different(struct drm_connector_state *state,
8564                                             const struct drm_connector_state *old_state,
8565                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8566 {
8567         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8568         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8569
8570         /* Handle: Type0/1 change */
8571         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8572             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8573                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8574                 return true;
8575         }
8576
8577         /* CP is being re enabled, ignore this
8578          *
8579          * Handles:     ENABLED -> DESIRED
8580          */
8581         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8582             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8583                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8584                 return false;
8585         }
8586
8587         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8588          *
8589          * Handles:     UNDESIRED -> ENABLED
8590          */
8591         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8592             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8593                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8594
8595         /* Stream removed and re-enabled
8596          *
8597          * Can sometimes overlap with the HPD case,
8598          * thus set update_hdcp to false to avoid
8599          * setting HDCP multiple times.
8600          *
8601          * Handles:     DESIRED -> DESIRED (Special case)
8602          */
8603         if (!(old_state->crtc && old_state->crtc->enabled) &&
8604                 state->crtc && state->crtc->enabled &&
8605                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8606                 dm_con_state->update_hdcp = false;
8607                 return true;
8608         }
8609
8610         /* Hot-plug, headless s3, dpms
8611          *
8612          * Only start HDCP if the display is connected/enabled.
8613          * update_hdcp flag will be set to false until the next
8614          * HPD comes in.
8615          *
8616          * Handles:     DESIRED -> DESIRED (Special case)
8617          */
8618         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8619             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8620                 dm_con_state->update_hdcp = false;
8621                 return true;
8622         }
8623
8624         /*
8625          * Handles:     UNDESIRED -> UNDESIRED
8626          *              DESIRED -> DESIRED
8627          *              ENABLED -> ENABLED
8628          */
8629         if (old_state->content_protection == state->content_protection)
8630                 return false;
8631
8632         /*
8633          * Handles:     UNDESIRED -> DESIRED
8634          *              DESIRED -> UNDESIRED
8635          *              ENABLED -> UNDESIRED
8636          */
8637         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8638                 return true;
8639
8640         /*
8641          * Handles:     DESIRED -> ENABLED
8642          */
8643         return false;
8644 }
8645
8646 #endif
8647 static void remove_stream(struct amdgpu_device *adev,
8648                           struct amdgpu_crtc *acrtc,
8649                           struct dc_stream_state *stream)
8650 {
8651         /* this is the update mode case */
8652
8653         acrtc->otg_inst = -1;
8654         acrtc->enabled = false;
8655 }
8656
8657 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8658                                struct dc_cursor_position *position)
8659 {
8660         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8661         int x, y;
8662         int xorigin = 0, yorigin = 0;
8663
8664         if (!crtc || !plane->state->fb)
8665                 return 0;
8666
8667         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8668             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8669                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8670                           __func__,
8671                           plane->state->crtc_w,
8672                           plane->state->crtc_h);
8673                 return -EINVAL;
8674         }
8675
8676         x = plane->state->crtc_x;
8677         y = plane->state->crtc_y;
8678
8679         if (x <= -amdgpu_crtc->max_cursor_width ||
8680             y <= -amdgpu_crtc->max_cursor_height)
8681                 return 0;
8682
8683         if (x < 0) {
8684                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8685                 x = 0;
8686         }
8687         if (y < 0) {
8688                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8689                 y = 0;
8690         }
8691         position->enable = true;
8692         position->translate_by_source = true;
8693         position->x = x;
8694         position->y = y;
8695         position->x_hotspot = xorigin;
8696         position->y_hotspot = yorigin;
8697
8698         return 0;
8699 }
8700
8701 static void handle_cursor_update(struct drm_plane *plane,
8702                                  struct drm_plane_state *old_plane_state)
8703 {
8704         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8705         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8706         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8707         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8708         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8709         uint64_t address = afb ? afb->address : 0;
8710         struct dc_cursor_position position = {0};
8711         struct dc_cursor_attributes attributes;
8712         int ret;
8713
8714         if (!plane->state->fb && !old_plane_state->fb)
8715                 return;
8716
8717         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8718                       __func__,
8719                       amdgpu_crtc->crtc_id,
8720                       plane->state->crtc_w,
8721                       plane->state->crtc_h);
8722
8723         ret = get_cursor_position(plane, crtc, &position);
8724         if (ret)
8725                 return;
8726
8727         if (!position.enable) {
8728                 /* turn off cursor */
8729                 if (crtc_state && crtc_state->stream) {
8730                         mutex_lock(&adev->dm.dc_lock);
8731                         dc_stream_set_cursor_position(crtc_state->stream,
8732                                                       &position);
8733                         mutex_unlock(&adev->dm.dc_lock);
8734                 }
8735                 return;
8736         }
8737
8738         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8739         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8740
8741         memset(&attributes, 0, sizeof(attributes));
8742         attributes.address.high_part = upper_32_bits(address);
8743         attributes.address.low_part  = lower_32_bits(address);
8744         attributes.width             = plane->state->crtc_w;
8745         attributes.height            = plane->state->crtc_h;
8746         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8747         attributes.rotation_angle    = 0;
8748         attributes.attribute_flags.value = 0;
8749
8750         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8751
8752         if (crtc_state->stream) {
8753                 mutex_lock(&adev->dm.dc_lock);
8754                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8755                                                          &attributes))
8756                         DRM_ERROR("DC failed to set cursor attributes\n");
8757
8758                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8759                                                    &position))
8760                         DRM_ERROR("DC failed to set cursor position\n");
8761                 mutex_unlock(&adev->dm.dc_lock);
8762         }
8763 }
8764
8765 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8766 {
8767
8768         assert_spin_locked(&acrtc->base.dev->event_lock);
8769         WARN_ON(acrtc->event);
8770
8771         acrtc->event = acrtc->base.state->event;
8772
8773         /* Set the flip status */
8774         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8775
8776         /* Mark this event as consumed */
8777         acrtc->base.state->event = NULL;
8778
8779         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8780                      acrtc->crtc_id);
8781 }
8782
8783 static void update_freesync_state_on_stream(
8784         struct amdgpu_display_manager *dm,
8785         struct dm_crtc_state *new_crtc_state,
8786         struct dc_stream_state *new_stream,
8787         struct dc_plane_state *surface,
8788         u32 flip_timestamp_in_us)
8789 {
8790         struct mod_vrr_params vrr_params;
8791         struct dc_info_packet vrr_infopacket = {0};
8792         struct amdgpu_device *adev = dm->adev;
8793         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8794         unsigned long flags;
8795         bool pack_sdp_v1_3 = false;
8796
8797         if (!new_stream)
8798                 return;
8799
8800         /*
8801          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8802          * For now it's sufficient to just guard against these conditions.
8803          */
8804
8805         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8806                 return;
8807
8808         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8809         vrr_params = acrtc->dm_irq_params.vrr_params;
8810
8811         if (surface) {
8812                 mod_freesync_handle_preflip(
8813                         dm->freesync_module,
8814                         surface,
8815                         new_stream,
8816                         flip_timestamp_in_us,
8817                         &vrr_params);
8818
8819                 if (adev->family < AMDGPU_FAMILY_AI &&
8820                     amdgpu_dm_vrr_active(new_crtc_state)) {
8821                         mod_freesync_handle_v_update(dm->freesync_module,
8822                                                      new_stream, &vrr_params);
8823
8824                         /* Need to call this before the frame ends. */
8825                         dc_stream_adjust_vmin_vmax(dm->dc,
8826                                                    new_crtc_state->stream,
8827                                                    &vrr_params.adjust);
8828                 }
8829         }
8830
8831         mod_freesync_build_vrr_infopacket(
8832                 dm->freesync_module,
8833                 new_stream,
8834                 &vrr_params,
8835                 PACKET_TYPE_VRR,
8836                 TRANSFER_FUNC_UNKNOWN,
8837                 &vrr_infopacket,
8838                 pack_sdp_v1_3);
8839
8840         new_crtc_state->freesync_timing_changed |=
8841                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8842                         &vrr_params.adjust,
8843                         sizeof(vrr_params.adjust)) != 0);
8844
8845         new_crtc_state->freesync_vrr_info_changed |=
8846                 (memcmp(&new_crtc_state->vrr_infopacket,
8847                         &vrr_infopacket,
8848                         sizeof(vrr_infopacket)) != 0);
8849
8850         acrtc->dm_irq_params.vrr_params = vrr_params;
8851         new_crtc_state->vrr_infopacket = vrr_infopacket;
8852
8853         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8854         new_stream->vrr_infopacket = vrr_infopacket;
8855
8856         if (new_crtc_state->freesync_vrr_info_changed)
8857                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8858                               new_crtc_state->base.crtc->base.id,
8859                               (int)new_crtc_state->base.vrr_enabled,
8860                               (int)vrr_params.state);
8861
8862         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8863 }
8864
8865 static void update_stream_irq_parameters(
8866         struct amdgpu_display_manager *dm,
8867         struct dm_crtc_state *new_crtc_state)
8868 {
8869         struct dc_stream_state *new_stream = new_crtc_state->stream;
8870         struct mod_vrr_params vrr_params;
8871         struct mod_freesync_config config = new_crtc_state->freesync_config;
8872         struct amdgpu_device *adev = dm->adev;
8873         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8874         unsigned long flags;
8875
8876         if (!new_stream)
8877                 return;
8878
8879         /*
8880          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8881          * For now it's sufficient to just guard against these conditions.
8882          */
8883         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8884                 return;
8885
8886         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8887         vrr_params = acrtc->dm_irq_params.vrr_params;
8888
8889         if (new_crtc_state->vrr_supported &&
8890             config.min_refresh_in_uhz &&
8891             config.max_refresh_in_uhz) {
8892                 /*
8893                  * if freesync compatible mode was set, config.state will be set
8894                  * in atomic check
8895                  */
8896                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8897                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8898                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8899                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8900                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8901                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8902                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8903                 } else {
8904                         config.state = new_crtc_state->base.vrr_enabled ?
8905                                                      VRR_STATE_ACTIVE_VARIABLE :
8906                                                      VRR_STATE_INACTIVE;
8907                 }
8908         } else {
8909                 config.state = VRR_STATE_UNSUPPORTED;
8910         }
8911
8912         mod_freesync_build_vrr_params(dm->freesync_module,
8913                                       new_stream,
8914                                       &config, &vrr_params);
8915
8916         new_crtc_state->freesync_timing_changed |=
8917                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8918                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8919
8920         new_crtc_state->freesync_config = config;
8921         /* Copy state for access from DM IRQ handler */
8922         acrtc->dm_irq_params.freesync_config = config;
8923         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8924         acrtc->dm_irq_params.vrr_params = vrr_params;
8925         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8926 }
8927
8928 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8929                                             struct dm_crtc_state *new_state)
8930 {
8931         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8932         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8933
8934         if (!old_vrr_active && new_vrr_active) {
8935                 /* Transition VRR inactive -> active:
8936                  * While VRR is active, we must not disable vblank irq, as a
8937                  * reenable after disable would compute bogus vblank/pflip
8938                  * timestamps if it likely happened inside display front-porch.
8939                  *
8940                  * We also need vupdate irq for the actual core vblank handling
8941                  * at end of vblank.
8942                  */
8943                 dm_set_vupdate_irq(new_state->base.crtc, true);
8944                 drm_crtc_vblank_get(new_state->base.crtc);
8945                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8946                                  __func__, new_state->base.crtc->base.id);
8947         } else if (old_vrr_active && !new_vrr_active) {
8948                 /* Transition VRR active -> inactive:
8949                  * Allow vblank irq disable again for fixed refresh rate.
8950                  */
8951                 dm_set_vupdate_irq(new_state->base.crtc, false);
8952                 drm_crtc_vblank_put(new_state->base.crtc);
8953                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8954                                  __func__, new_state->base.crtc->base.id);
8955         }
8956 }
8957
8958 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8959 {
8960         struct drm_plane *plane;
8961         struct drm_plane_state *old_plane_state;
8962         int i;
8963
8964         /*
8965          * TODO: Make this per-stream so we don't issue redundant updates for
8966          * commits with multiple streams.
8967          */
8968         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8969                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8970                         handle_cursor_update(plane, old_plane_state);
8971 }
8972
8973 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8974                                     struct dc_state *dc_state,
8975                                     struct drm_device *dev,
8976                                     struct amdgpu_display_manager *dm,
8977                                     struct drm_crtc *pcrtc,
8978                                     bool wait_for_vblank)
8979 {
8980         uint32_t i;
8981         uint64_t timestamp_ns;
8982         struct drm_plane *plane;
8983         struct drm_plane_state *old_plane_state, *new_plane_state;
8984         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8985         struct drm_crtc_state *new_pcrtc_state =
8986                         drm_atomic_get_new_crtc_state(state, pcrtc);
8987         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8988         struct dm_crtc_state *dm_old_crtc_state =
8989                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8990         int planes_count = 0, vpos, hpos;
8991         long r;
8992         unsigned long flags;
8993         struct amdgpu_bo *abo;
8994         uint32_t target_vblank, last_flip_vblank;
8995         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8996         bool pflip_present = false;
8997         struct {
8998                 struct dc_surface_update surface_updates[MAX_SURFACES];
8999                 struct dc_plane_info plane_infos[MAX_SURFACES];
9000                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9001                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9002                 struct dc_stream_update stream_update;
9003         } *bundle;
9004
9005         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9006
9007         if (!bundle) {
9008                 dm_error("Failed to allocate update bundle\n");
9009                 goto cleanup;
9010         }
9011
9012         /*
9013          * Disable the cursor first if we're disabling all the planes.
9014          * It'll remain on the screen after the planes are re-enabled
9015          * if we don't.
9016          */
9017         if (acrtc_state->active_planes == 0)
9018                 amdgpu_dm_commit_cursors(state);
9019
9020         /* update planes when needed */
9021         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9022                 struct drm_crtc *crtc = new_plane_state->crtc;
9023                 struct drm_crtc_state *new_crtc_state;
9024                 struct drm_framebuffer *fb = new_plane_state->fb;
9025                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9026                 bool plane_needs_flip;
9027                 struct dc_plane_state *dc_plane;
9028                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9029
9030                 /* Cursor plane is handled after stream updates */
9031                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9032                         continue;
9033
9034                 if (!fb || !crtc || pcrtc != crtc)
9035                         continue;
9036
9037                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9038                 if (!new_crtc_state->active)
9039                         continue;
9040
9041                 dc_plane = dm_new_plane_state->dc_state;
9042
9043                 bundle->surface_updates[planes_count].surface = dc_plane;
9044                 if (new_pcrtc_state->color_mgmt_changed) {
9045                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9046                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9047                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9048                 }
9049
9050                 fill_dc_scaling_info(dm->adev, new_plane_state,
9051                                      &bundle->scaling_infos[planes_count]);
9052
9053                 bundle->surface_updates[planes_count].scaling_info =
9054                         &bundle->scaling_infos[planes_count];
9055
9056                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9057
9058                 pflip_present = pflip_present || plane_needs_flip;
9059
9060                 if (!plane_needs_flip) {
9061                         planes_count += 1;
9062                         continue;
9063                 }
9064
9065                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9066
9067                 /*
9068                  * Wait for all fences on this FB. Do limited wait to avoid
9069                  * deadlock during GPU reset when this fence will not signal
9070                  * but we hold reservation lock for the BO.
9071                  */
9072                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9073                                           msecs_to_jiffies(5000));
9074                 if (unlikely(r <= 0))
9075                         DRM_ERROR("Waiting for fences timed out!");
9076
9077                 fill_dc_plane_info_and_addr(
9078                         dm->adev, new_plane_state,
9079                         afb->tiling_flags,
9080                         &bundle->plane_infos[planes_count],
9081                         &bundle->flip_addrs[planes_count].address,
9082                         afb->tmz_surface, false);
9083
9084                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9085                                  new_plane_state->plane->index,
9086                                  bundle->plane_infos[planes_count].dcc.enable);
9087
9088                 bundle->surface_updates[planes_count].plane_info =
9089                         &bundle->plane_infos[planes_count];
9090
9091                 /*
9092                  * Only allow immediate flips for fast updates that don't
9093                  * change FB pitch, DCC state, rotation or mirroing.
9094                  */
9095                 bundle->flip_addrs[planes_count].flip_immediate =
9096                         crtc->state->async_flip &&
9097                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9098
9099                 timestamp_ns = ktime_get_ns();
9100                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9101                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9102                 bundle->surface_updates[planes_count].surface = dc_plane;
9103
9104                 if (!bundle->surface_updates[planes_count].surface) {
9105                         DRM_ERROR("No surface for CRTC: id=%d\n",
9106                                         acrtc_attach->crtc_id);
9107                         continue;
9108                 }
9109
9110                 if (plane == pcrtc->primary)
9111                         update_freesync_state_on_stream(
9112                                 dm,
9113                                 acrtc_state,
9114                                 acrtc_state->stream,
9115                                 dc_plane,
9116                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9117
9118                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9119                                  __func__,
9120                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9121                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9122
9123                 planes_count += 1;
9124
9125         }
9126
9127         if (pflip_present) {
9128                 if (!vrr_active) {
9129                         /* Use old throttling in non-vrr fixed refresh rate mode
9130                          * to keep flip scheduling based on target vblank counts
9131                          * working in a backwards compatible way, e.g., for
9132                          * clients using the GLX_OML_sync_control extension or
9133                          * DRI3/Present extension with defined target_msc.
9134                          */
9135                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9136                 }
9137                 else {
9138                         /* For variable refresh rate mode only:
9139                          * Get vblank of last completed flip to avoid > 1 vrr
9140                          * flips per video frame by use of throttling, but allow
9141                          * flip programming anywhere in the possibly large
9142                          * variable vrr vblank interval for fine-grained flip
9143                          * timing control and more opportunity to avoid stutter
9144                          * on late submission of flips.
9145                          */
9146                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9147                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9148                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9149                 }
9150
9151                 target_vblank = last_flip_vblank + wait_for_vblank;
9152
9153                 /*
9154                  * Wait until we're out of the vertical blank period before the one
9155                  * targeted by the flip
9156                  */
9157                 while ((acrtc_attach->enabled &&
9158                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9159                                                             0, &vpos, &hpos, NULL,
9160                                                             NULL, &pcrtc->hwmode)
9161                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9162                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9163                         (int)(target_vblank -
9164                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9165                         usleep_range(1000, 1100);
9166                 }
9167
9168                 /**
9169                  * Prepare the flip event for the pageflip interrupt to handle.
9170                  *
9171                  * This only works in the case where we've already turned on the
9172                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9173                  * from 0 -> n planes we have to skip a hardware generated event
9174                  * and rely on sending it from software.
9175                  */
9176                 if (acrtc_attach->base.state->event &&
9177                     acrtc_state->active_planes > 0 &&
9178                     !acrtc_state->force_dpms_off) {
9179                         drm_crtc_vblank_get(pcrtc);
9180
9181                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9182
9183                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9184                         prepare_flip_isr(acrtc_attach);
9185
9186                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9187                 }
9188
9189                 if (acrtc_state->stream) {
9190                         if (acrtc_state->freesync_vrr_info_changed)
9191                                 bundle->stream_update.vrr_infopacket =
9192                                         &acrtc_state->stream->vrr_infopacket;
9193                 }
9194         }
9195
9196         /* Update the planes if changed or disable if we don't have any. */
9197         if ((planes_count || acrtc_state->active_planes == 0) &&
9198                 acrtc_state->stream) {
9199 #if defined(CONFIG_DRM_AMD_DC_DCN)
9200                 /*
9201                  * If PSR or idle optimizations are enabled then flush out
9202                  * any pending work before hardware programming.
9203                  */
9204                 if (dm->vblank_control_workqueue)
9205                         flush_workqueue(dm->vblank_control_workqueue);
9206 #endif
9207
9208                 bundle->stream_update.stream = acrtc_state->stream;
9209                 if (new_pcrtc_state->mode_changed) {
9210                         bundle->stream_update.src = acrtc_state->stream->src;
9211                         bundle->stream_update.dst = acrtc_state->stream->dst;
9212                 }
9213
9214                 if (new_pcrtc_state->color_mgmt_changed) {
9215                         /*
9216                          * TODO: This isn't fully correct since we've actually
9217                          * already modified the stream in place.
9218                          */
9219                         bundle->stream_update.gamut_remap =
9220                                 &acrtc_state->stream->gamut_remap_matrix;
9221                         bundle->stream_update.output_csc_transform =
9222                                 &acrtc_state->stream->csc_color_matrix;
9223                         bundle->stream_update.out_transfer_func =
9224                                 acrtc_state->stream->out_transfer_func;
9225                 }
9226
9227                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9228                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9229                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9230
9231                 /*
9232                  * If FreeSync state on the stream has changed then we need to
9233                  * re-adjust the min/max bounds now that DC doesn't handle this
9234                  * as part of commit.
9235                  */
9236                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9237                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9238                         dc_stream_adjust_vmin_vmax(
9239                                 dm->dc, acrtc_state->stream,
9240                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9241                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9242                 }
9243                 mutex_lock(&dm->dc_lock);
9244                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9245                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9246                         amdgpu_dm_psr_disable(acrtc_state->stream);
9247
9248                 dc_commit_updates_for_stream(dm->dc,
9249                                                      bundle->surface_updates,
9250                                                      planes_count,
9251                                                      acrtc_state->stream,
9252                                                      &bundle->stream_update,
9253                                                      dc_state);
9254
9255                 /**
9256                  * Enable or disable the interrupts on the backend.
9257                  *
9258                  * Most pipes are put into power gating when unused.
9259                  *
9260                  * When power gating is enabled on a pipe we lose the
9261                  * interrupt enablement state when power gating is disabled.
9262                  *
9263                  * So we need to update the IRQ control state in hardware
9264                  * whenever the pipe turns on (since it could be previously
9265                  * power gated) or off (since some pipes can't be power gated
9266                  * on some ASICs).
9267                  */
9268                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9269                         dm_update_pflip_irq_state(drm_to_adev(dev),
9270                                                   acrtc_attach);
9271
9272                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9273                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9274                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9275                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9276
9277                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9278                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9279                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9280                         struct amdgpu_dm_connector *aconn =
9281                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9282
9283                         if (aconn->psr_skip_count > 0)
9284                                 aconn->psr_skip_count--;
9285
9286                         /* Allow PSR when skip count is 0. */
9287                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9288                 } else {
9289                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9290                 }
9291
9292                 mutex_unlock(&dm->dc_lock);
9293         }
9294
9295         /*
9296          * Update cursor state *after* programming all the planes.
9297          * This avoids redundant programming in the case where we're going
9298          * to be disabling a single plane - those pipes are being disabled.
9299          */
9300         if (acrtc_state->active_planes)
9301                 amdgpu_dm_commit_cursors(state);
9302
9303 cleanup:
9304         kfree(bundle);
9305 }
9306
9307 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9308                                    struct drm_atomic_state *state)
9309 {
9310         struct amdgpu_device *adev = drm_to_adev(dev);
9311         struct amdgpu_dm_connector *aconnector;
9312         struct drm_connector *connector;
9313         struct drm_connector_state *old_con_state, *new_con_state;
9314         struct drm_crtc_state *new_crtc_state;
9315         struct dm_crtc_state *new_dm_crtc_state;
9316         const struct dc_stream_status *status;
9317         int i, inst;
9318
9319         /* Notify device removals. */
9320         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9321                 if (old_con_state->crtc != new_con_state->crtc) {
9322                         /* CRTC changes require notification. */
9323                         goto notify;
9324                 }
9325
9326                 if (!new_con_state->crtc)
9327                         continue;
9328
9329                 new_crtc_state = drm_atomic_get_new_crtc_state(
9330                         state, new_con_state->crtc);
9331
9332                 if (!new_crtc_state)
9333                         continue;
9334
9335                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9336                         continue;
9337
9338         notify:
9339                 aconnector = to_amdgpu_dm_connector(connector);
9340
9341                 mutex_lock(&adev->dm.audio_lock);
9342                 inst = aconnector->audio_inst;
9343                 aconnector->audio_inst = -1;
9344                 mutex_unlock(&adev->dm.audio_lock);
9345
9346                 amdgpu_dm_audio_eld_notify(adev, inst);
9347         }
9348
9349         /* Notify audio device additions. */
9350         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9351                 if (!new_con_state->crtc)
9352                         continue;
9353
9354                 new_crtc_state = drm_atomic_get_new_crtc_state(
9355                         state, new_con_state->crtc);
9356
9357                 if (!new_crtc_state)
9358                         continue;
9359
9360                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9361                         continue;
9362
9363                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9364                 if (!new_dm_crtc_state->stream)
9365                         continue;
9366
9367                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9368                 if (!status)
9369                         continue;
9370
9371                 aconnector = to_amdgpu_dm_connector(connector);
9372
9373                 mutex_lock(&adev->dm.audio_lock);
9374                 inst = status->audio_inst;
9375                 aconnector->audio_inst = inst;
9376                 mutex_unlock(&adev->dm.audio_lock);
9377
9378                 amdgpu_dm_audio_eld_notify(adev, inst);
9379         }
9380 }
9381
9382 /*
9383  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9384  * @crtc_state: the DRM CRTC state
9385  * @stream_state: the DC stream state.
9386  *
9387  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9388  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9389  */
9390 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9391                                                 struct dc_stream_state *stream_state)
9392 {
9393         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9394 }
9395
9396 /**
9397  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9398  * @state: The atomic state to commit
9399  *
9400  * This will tell DC to commit the constructed DC state from atomic_check,
9401  * programming the hardware. Any failures here implies a hardware failure, since
9402  * atomic check should have filtered anything non-kosher.
9403  */
9404 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9405 {
9406         struct drm_device *dev = state->dev;
9407         struct amdgpu_device *adev = drm_to_adev(dev);
9408         struct amdgpu_display_manager *dm = &adev->dm;
9409         struct dm_atomic_state *dm_state;
9410         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9411         uint32_t i, j;
9412         struct drm_crtc *crtc;
9413         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9414         unsigned long flags;
9415         bool wait_for_vblank = true;
9416         struct drm_connector *connector;
9417         struct drm_connector_state *old_con_state, *new_con_state;
9418         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9419         int crtc_disable_count = 0;
9420         bool mode_set_reset_required = false;
9421
9422         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9423
9424         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9425
9426         dm_state = dm_atomic_get_new_state(state);
9427         if (dm_state && dm_state->context) {
9428                 dc_state = dm_state->context;
9429         } else {
9430                 /* No state changes, retain current state. */
9431                 dc_state_temp = dc_create_state(dm->dc);
9432                 ASSERT(dc_state_temp);
9433                 dc_state = dc_state_temp;
9434                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9435         }
9436
9437         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9438                                        new_crtc_state, i) {
9439                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9440
9441                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9442
9443                 if (old_crtc_state->active &&
9444                     (!new_crtc_state->active ||
9445                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9446                         manage_dm_interrupts(adev, acrtc, false);
9447                         dc_stream_release(dm_old_crtc_state->stream);
9448                 }
9449         }
9450
9451         drm_atomic_helper_calc_timestamping_constants(state);
9452
9453         /* update changed items */
9454         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9455                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9456
9457                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9458                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9459
9460                 DRM_DEBUG_ATOMIC(
9461                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9462                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9463                         "connectors_changed:%d\n",
9464                         acrtc->crtc_id,
9465                         new_crtc_state->enable,
9466                         new_crtc_state->active,
9467                         new_crtc_state->planes_changed,
9468                         new_crtc_state->mode_changed,
9469                         new_crtc_state->active_changed,
9470                         new_crtc_state->connectors_changed);
9471
9472                 /* Disable cursor if disabling crtc */
9473                 if (old_crtc_state->active && !new_crtc_state->active) {
9474                         struct dc_cursor_position position;
9475
9476                         memset(&position, 0, sizeof(position));
9477                         mutex_lock(&dm->dc_lock);
9478                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9479                         mutex_unlock(&dm->dc_lock);
9480                 }
9481
9482                 /* Copy all transient state flags into dc state */
9483                 if (dm_new_crtc_state->stream) {
9484                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9485                                                             dm_new_crtc_state->stream);
9486                 }
9487
9488                 /* handles headless hotplug case, updating new_state and
9489                  * aconnector as needed
9490                  */
9491
9492                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9493
9494                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9495
9496                         if (!dm_new_crtc_state->stream) {
9497                                 /*
9498                                  * this could happen because of issues with
9499                                  * userspace notifications delivery.
9500                                  * In this case userspace tries to set mode on
9501                                  * display which is disconnected in fact.
9502                                  * dc_sink is NULL in this case on aconnector.
9503                                  * We expect reset mode will come soon.
9504                                  *
9505                                  * This can also happen when unplug is done
9506                                  * during resume sequence ended
9507                                  *
9508                                  * In this case, we want to pretend we still
9509                                  * have a sink to keep the pipe running so that
9510                                  * hw state is consistent with the sw state
9511                                  */
9512                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9513                                                 __func__, acrtc->base.base.id);
9514                                 continue;
9515                         }
9516
9517                         if (dm_old_crtc_state->stream)
9518                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9519
9520                         pm_runtime_get_noresume(dev->dev);
9521
9522                         acrtc->enabled = true;
9523                         acrtc->hw_mode = new_crtc_state->mode;
9524                         crtc->hwmode = new_crtc_state->mode;
9525                         mode_set_reset_required = true;
9526                 } else if (modereset_required(new_crtc_state)) {
9527                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9528                         /* i.e. reset mode */
9529                         if (dm_old_crtc_state->stream)
9530                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9531
9532                         mode_set_reset_required = true;
9533                 }
9534         } /* for_each_crtc_in_state() */
9535
9536         if (dc_state) {
9537                 /* if there mode set or reset, disable eDP PSR */
9538                 if (mode_set_reset_required) {
9539 #if defined(CONFIG_DRM_AMD_DC_DCN)
9540                         if (dm->vblank_control_workqueue)
9541                                 flush_workqueue(dm->vblank_control_workqueue);
9542 #endif
9543                         amdgpu_dm_psr_disable_all(dm);
9544                 }
9545
9546                 dm_enable_per_frame_crtc_master_sync(dc_state);
9547                 mutex_lock(&dm->dc_lock);
9548                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9549 #if defined(CONFIG_DRM_AMD_DC_DCN)
9550                /* Allow idle optimization when vblank count is 0 for display off */
9551                if (dm->active_vblank_irq_count == 0)
9552                    dc_allow_idle_optimizations(dm->dc,true);
9553 #endif
9554                 mutex_unlock(&dm->dc_lock);
9555         }
9556
9557         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9558                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9559
9560                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9561
9562                 if (dm_new_crtc_state->stream != NULL) {
9563                         const struct dc_stream_status *status =
9564                                         dc_stream_get_status(dm_new_crtc_state->stream);
9565
9566                         if (!status)
9567                                 status = dc_stream_get_status_from_state(dc_state,
9568                                                                          dm_new_crtc_state->stream);
9569                         if (!status)
9570                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9571                         else
9572                                 acrtc->otg_inst = status->primary_otg_inst;
9573                 }
9574         }
9575 #ifdef CONFIG_DRM_AMD_DC_HDCP
9576         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9577                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9578                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9579                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9580
9581                 new_crtc_state = NULL;
9582
9583                 if (acrtc)
9584                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9585
9586                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9587
9588                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9589                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9590                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9591                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9592                         dm_new_con_state->update_hdcp = true;
9593                         continue;
9594                 }
9595
9596                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9597                         hdcp_update_display(
9598                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9599                                 new_con_state->hdcp_content_type,
9600                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9601         }
9602 #endif
9603
9604         /* Handle connector state changes */
9605         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9606                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9607                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9608                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9609                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9610                 struct dc_stream_update stream_update;
9611                 struct dc_info_packet hdr_packet;
9612                 struct dc_stream_status *status = NULL;
9613                 bool abm_changed, hdr_changed, scaling_changed;
9614
9615                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9616                 memset(&stream_update, 0, sizeof(stream_update));
9617
9618                 if (acrtc) {
9619                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9620                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9621                 }
9622
9623                 /* Skip any modesets/resets */
9624                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9625                         continue;
9626
9627                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9628                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9629
9630                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9631                                                              dm_old_con_state);
9632
9633                 abm_changed = dm_new_crtc_state->abm_level !=
9634                               dm_old_crtc_state->abm_level;
9635
9636                 hdr_changed =
9637                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9638
9639                 if (!scaling_changed && !abm_changed && !hdr_changed)
9640                         continue;
9641
9642                 stream_update.stream = dm_new_crtc_state->stream;
9643                 if (scaling_changed) {
9644                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9645                                         dm_new_con_state, dm_new_crtc_state->stream);
9646
9647                         stream_update.src = dm_new_crtc_state->stream->src;
9648                         stream_update.dst = dm_new_crtc_state->stream->dst;
9649                 }
9650
9651                 if (abm_changed) {
9652                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9653
9654                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9655                 }
9656
9657                 if (hdr_changed) {
9658                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9659                         stream_update.hdr_static_metadata = &hdr_packet;
9660                 }
9661
9662                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9663
9664                 if (WARN_ON(!status))
9665                         continue;
9666
9667                 WARN_ON(!status->plane_count);
9668
9669                 /*
9670                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9671                  * Here we create an empty update on each plane.
9672                  * To fix this, DC should permit updating only stream properties.
9673                  */
9674                 for (j = 0; j < status->plane_count; j++)
9675                         dummy_updates[j].surface = status->plane_states[0];
9676
9677
9678                 mutex_lock(&dm->dc_lock);
9679                 dc_commit_updates_for_stream(dm->dc,
9680                                                      dummy_updates,
9681                                                      status->plane_count,
9682                                                      dm_new_crtc_state->stream,
9683                                                      &stream_update,
9684                                                      dc_state);
9685                 mutex_unlock(&dm->dc_lock);
9686         }
9687
9688         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9689         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9690                                       new_crtc_state, i) {
9691                 if (old_crtc_state->active && !new_crtc_state->active)
9692                         crtc_disable_count++;
9693
9694                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9695                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9696
9697                 /* For freesync config update on crtc state and params for irq */
9698                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9699
9700                 /* Handle vrr on->off / off->on transitions */
9701                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9702                                                 dm_new_crtc_state);
9703         }
9704
9705         /**
9706          * Enable interrupts for CRTCs that are newly enabled or went through
9707          * a modeset. It was intentionally deferred until after the front end
9708          * state was modified to wait until the OTG was on and so the IRQ
9709          * handlers didn't access stale or invalid state.
9710          */
9711         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9712                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9713 #ifdef CONFIG_DEBUG_FS
9714                 bool configure_crc = false;
9715                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9716 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9717                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9718 #endif
9719                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9720                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9721                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9722 #endif
9723                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9724
9725                 if (new_crtc_state->active &&
9726                     (!old_crtc_state->active ||
9727                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9728                         dc_stream_retain(dm_new_crtc_state->stream);
9729                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9730                         manage_dm_interrupts(adev, acrtc, true);
9731
9732 #ifdef CONFIG_DEBUG_FS
9733                         /**
9734                          * Frontend may have changed so reapply the CRC capture
9735                          * settings for the stream.
9736                          */
9737                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9738
9739                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9740                                 configure_crc = true;
9741 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9742                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9743                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9744                                         acrtc->dm_irq_params.crc_window.update_win = true;
9745                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9746                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9747                                         crc_rd_wrk->crtc = crtc;
9748                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9749                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9750                                 }
9751 #endif
9752                         }
9753
9754                         if (configure_crc)
9755                                 if (amdgpu_dm_crtc_configure_crc_source(
9756                                         crtc, dm_new_crtc_state, cur_crc_src))
9757                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9758 #endif
9759                 }
9760         }
9761
9762         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9763                 if (new_crtc_state->async_flip)
9764                         wait_for_vblank = false;
9765
9766         /* update planes when needed per crtc*/
9767         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9768                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9769
9770                 if (dm_new_crtc_state->stream)
9771                         amdgpu_dm_commit_planes(state, dc_state, dev,
9772                                                 dm, crtc, wait_for_vblank);
9773         }
9774
9775         /* Update audio instances for each connector. */
9776         amdgpu_dm_commit_audio(dev, state);
9777
9778 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9779         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9780         /* restore the backlight level */
9781         for (i = 0; i < dm->num_of_edps; i++) {
9782                 if (dm->backlight_dev[i] &&
9783                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9784                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9785         }
9786 #endif
9787         /*
9788          * send vblank event on all events not handled in flip and
9789          * mark consumed event for drm_atomic_helper_commit_hw_done
9790          */
9791         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9792         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9793
9794                 if (new_crtc_state->event)
9795                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9796
9797                 new_crtc_state->event = NULL;
9798         }
9799         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9800
9801         /* Signal HW programming completion */
9802         drm_atomic_helper_commit_hw_done(state);
9803
9804         if (wait_for_vblank)
9805                 drm_atomic_helper_wait_for_flip_done(dev, state);
9806
9807         drm_atomic_helper_cleanup_planes(dev, state);
9808
9809         /* return the stolen vga memory back to VRAM */
9810         if (!adev->mman.keep_stolen_vga_memory)
9811                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9812         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9813
9814         /*
9815          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9816          * so we can put the GPU into runtime suspend if we're not driving any
9817          * displays anymore
9818          */
9819         for (i = 0; i < crtc_disable_count; i++)
9820                 pm_runtime_put_autosuspend(dev->dev);
9821         pm_runtime_mark_last_busy(dev->dev);
9822
9823         if (dc_state_temp)
9824                 dc_release_state(dc_state_temp);
9825 }
9826
9827
9828 static int dm_force_atomic_commit(struct drm_connector *connector)
9829 {
9830         int ret = 0;
9831         struct drm_device *ddev = connector->dev;
9832         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9833         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9834         struct drm_plane *plane = disconnected_acrtc->base.primary;
9835         struct drm_connector_state *conn_state;
9836         struct drm_crtc_state *crtc_state;
9837         struct drm_plane_state *plane_state;
9838
9839         if (!state)
9840                 return -ENOMEM;
9841
9842         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9843
9844         /* Construct an atomic state to restore previous display setting */
9845
9846         /*
9847          * Attach connectors to drm_atomic_state
9848          */
9849         conn_state = drm_atomic_get_connector_state(state, connector);
9850
9851         ret = PTR_ERR_OR_ZERO(conn_state);
9852         if (ret)
9853                 goto out;
9854
9855         /* Attach crtc to drm_atomic_state*/
9856         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9857
9858         ret = PTR_ERR_OR_ZERO(crtc_state);
9859         if (ret)
9860                 goto out;
9861
9862         /* force a restore */
9863         crtc_state->mode_changed = true;
9864
9865         /* Attach plane to drm_atomic_state */
9866         plane_state = drm_atomic_get_plane_state(state, plane);
9867
9868         ret = PTR_ERR_OR_ZERO(plane_state);
9869         if (ret)
9870                 goto out;
9871
9872         /* Call commit internally with the state we just constructed */
9873         ret = drm_atomic_commit(state);
9874
9875 out:
9876         drm_atomic_state_put(state);
9877         if (ret)
9878                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9879
9880         return ret;
9881 }
9882
9883 /*
9884  * This function handles all cases when set mode does not come upon hotplug.
9885  * This includes when a display is unplugged then plugged back into the
9886  * same port and when running without usermode desktop manager supprot
9887  */
9888 void dm_restore_drm_connector_state(struct drm_device *dev,
9889                                     struct drm_connector *connector)
9890 {
9891         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9892         struct amdgpu_crtc *disconnected_acrtc;
9893         struct dm_crtc_state *acrtc_state;
9894
9895         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9896                 return;
9897
9898         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9899         if (!disconnected_acrtc)
9900                 return;
9901
9902         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9903         if (!acrtc_state->stream)
9904                 return;
9905
9906         /*
9907          * If the previous sink is not released and different from the current,
9908          * we deduce we are in a state where we can not rely on usermode call
9909          * to turn on the display, so we do it here
9910          */
9911         if (acrtc_state->stream->sink != aconnector->dc_sink)
9912                 dm_force_atomic_commit(&aconnector->base);
9913 }
9914
9915 /*
9916  * Grabs all modesetting locks to serialize against any blocking commits,
9917  * Waits for completion of all non blocking commits.
9918  */
9919 static int do_aquire_global_lock(struct drm_device *dev,
9920                                  struct drm_atomic_state *state)
9921 {
9922         struct drm_crtc *crtc;
9923         struct drm_crtc_commit *commit;
9924         long ret;
9925
9926         /*
9927          * Adding all modeset locks to aquire_ctx will
9928          * ensure that when the framework release it the
9929          * extra locks we are locking here will get released to
9930          */
9931         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9932         if (ret)
9933                 return ret;
9934
9935         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9936                 spin_lock(&crtc->commit_lock);
9937                 commit = list_first_entry_or_null(&crtc->commit_list,
9938                                 struct drm_crtc_commit, commit_entry);
9939                 if (commit)
9940                         drm_crtc_commit_get(commit);
9941                 spin_unlock(&crtc->commit_lock);
9942
9943                 if (!commit)
9944                         continue;
9945
9946                 /*
9947                  * Make sure all pending HW programming completed and
9948                  * page flips done
9949                  */
9950                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9951
9952                 if (ret > 0)
9953                         ret = wait_for_completion_interruptible_timeout(
9954                                         &commit->flip_done, 10*HZ);
9955
9956                 if (ret == 0)
9957                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9958                                   "timed out\n", crtc->base.id, crtc->name);
9959
9960                 drm_crtc_commit_put(commit);
9961         }
9962
9963         return ret < 0 ? ret : 0;
9964 }
9965
9966 static void get_freesync_config_for_crtc(
9967         struct dm_crtc_state *new_crtc_state,
9968         struct dm_connector_state *new_con_state)
9969 {
9970         struct mod_freesync_config config = {0};
9971         struct amdgpu_dm_connector *aconnector =
9972                         to_amdgpu_dm_connector(new_con_state->base.connector);
9973         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9974         int vrefresh = drm_mode_vrefresh(mode);
9975         bool fs_vid_mode = false;
9976
9977         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9978                                         vrefresh >= aconnector->min_vfreq &&
9979                                         vrefresh <= aconnector->max_vfreq;
9980
9981         if (new_crtc_state->vrr_supported) {
9982                 new_crtc_state->stream->ignore_msa_timing_param = true;
9983                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9984
9985                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9986                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9987                 config.vsif_supported = true;
9988                 config.btr = true;
9989
9990                 if (fs_vid_mode) {
9991                         config.state = VRR_STATE_ACTIVE_FIXED;
9992                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9993                         goto out;
9994                 } else if (new_crtc_state->base.vrr_enabled) {
9995                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9996                 } else {
9997                         config.state = VRR_STATE_INACTIVE;
9998                 }
9999         }
10000 out:
10001         new_crtc_state->freesync_config = config;
10002 }
10003
10004 static void reset_freesync_config_for_crtc(
10005         struct dm_crtc_state *new_crtc_state)
10006 {
10007         new_crtc_state->vrr_supported = false;
10008
10009         memset(&new_crtc_state->vrr_infopacket, 0,
10010                sizeof(new_crtc_state->vrr_infopacket));
10011 }
10012
10013 static bool
10014 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10015                                  struct drm_crtc_state *new_crtc_state)
10016 {
10017         struct drm_display_mode old_mode, new_mode;
10018
10019         if (!old_crtc_state || !new_crtc_state)
10020                 return false;
10021
10022         old_mode = old_crtc_state->mode;
10023         new_mode = new_crtc_state->mode;
10024
10025         if (old_mode.clock       == new_mode.clock &&
10026             old_mode.hdisplay    == new_mode.hdisplay &&
10027             old_mode.vdisplay    == new_mode.vdisplay &&
10028             old_mode.htotal      == new_mode.htotal &&
10029             old_mode.vtotal      != new_mode.vtotal &&
10030             old_mode.hsync_start == new_mode.hsync_start &&
10031             old_mode.vsync_start != new_mode.vsync_start &&
10032             old_mode.hsync_end   == new_mode.hsync_end &&
10033             old_mode.vsync_end   != new_mode.vsync_end &&
10034             old_mode.hskew       == new_mode.hskew &&
10035             old_mode.vscan       == new_mode.vscan &&
10036             (old_mode.vsync_end - old_mode.vsync_start) ==
10037             (new_mode.vsync_end - new_mode.vsync_start))
10038                 return true;
10039
10040         return false;
10041 }
10042
10043 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10044         uint64_t num, den, res;
10045         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10046
10047         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10048
10049         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10050         den = (unsigned long long)new_crtc_state->mode.htotal *
10051               (unsigned long long)new_crtc_state->mode.vtotal;
10052
10053         res = div_u64(num, den);
10054         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10055 }
10056
10057 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10058                                 struct drm_atomic_state *state,
10059                                 struct drm_crtc *crtc,
10060                                 struct drm_crtc_state *old_crtc_state,
10061                                 struct drm_crtc_state *new_crtc_state,
10062                                 bool enable,
10063                                 bool *lock_and_validation_needed)
10064 {
10065         struct dm_atomic_state *dm_state = NULL;
10066         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10067         struct dc_stream_state *new_stream;
10068         int ret = 0;
10069
10070         /*
10071          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10072          * update changed items
10073          */
10074         struct amdgpu_crtc *acrtc = NULL;
10075         struct amdgpu_dm_connector *aconnector = NULL;
10076         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10077         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10078
10079         new_stream = NULL;
10080
10081         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10082         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10083         acrtc = to_amdgpu_crtc(crtc);
10084         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10085
10086         /* TODO This hack should go away */
10087         if (aconnector && enable) {
10088                 /* Make sure fake sink is created in plug-in scenario */
10089                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10090                                                             &aconnector->base);
10091                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10092                                                             &aconnector->base);
10093
10094                 if (IS_ERR(drm_new_conn_state)) {
10095                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10096                         goto fail;
10097                 }
10098
10099                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10100                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10101
10102                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10103                         goto skip_modeset;
10104
10105                 new_stream = create_validate_stream_for_sink(aconnector,
10106                                                              &new_crtc_state->mode,
10107                                                              dm_new_conn_state,
10108                                                              dm_old_crtc_state->stream);
10109
10110                 /*
10111                  * we can have no stream on ACTION_SET if a display
10112                  * was disconnected during S3, in this case it is not an
10113                  * error, the OS will be updated after detection, and
10114                  * will do the right thing on next atomic commit
10115                  */
10116
10117                 if (!new_stream) {
10118                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10119                                         __func__, acrtc->base.base.id);
10120                         ret = -ENOMEM;
10121                         goto fail;
10122                 }
10123
10124                 /*
10125                  * TODO: Check VSDB bits to decide whether this should
10126                  * be enabled or not.
10127                  */
10128                 new_stream->triggered_crtc_reset.enabled =
10129                         dm->force_timing_sync;
10130
10131                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10132
10133                 ret = fill_hdr_info_packet(drm_new_conn_state,
10134                                            &new_stream->hdr_static_metadata);
10135                 if (ret)
10136                         goto fail;
10137
10138                 /*
10139                  * If we already removed the old stream from the context
10140                  * (and set the new stream to NULL) then we can't reuse
10141                  * the old stream even if the stream and scaling are unchanged.
10142                  * We'll hit the BUG_ON and black screen.
10143                  *
10144                  * TODO: Refactor this function to allow this check to work
10145                  * in all conditions.
10146                  */
10147                 if (amdgpu_freesync_vid_mode &&
10148                     dm_new_crtc_state->stream &&
10149                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10150                         goto skip_modeset;
10151
10152                 if (dm_new_crtc_state->stream &&
10153                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10154                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10155                         new_crtc_state->mode_changed = false;
10156                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10157                                          new_crtc_state->mode_changed);
10158                 }
10159         }
10160
10161         /* mode_changed flag may get updated above, need to check again */
10162         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10163                 goto skip_modeset;
10164
10165         DRM_DEBUG_ATOMIC(
10166                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10167                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10168                 "connectors_changed:%d\n",
10169                 acrtc->crtc_id,
10170                 new_crtc_state->enable,
10171                 new_crtc_state->active,
10172                 new_crtc_state->planes_changed,
10173                 new_crtc_state->mode_changed,
10174                 new_crtc_state->active_changed,
10175                 new_crtc_state->connectors_changed);
10176
10177         /* Remove stream for any changed/disabled CRTC */
10178         if (!enable) {
10179
10180                 if (!dm_old_crtc_state->stream)
10181                         goto skip_modeset;
10182
10183                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10184                     is_timing_unchanged_for_freesync(new_crtc_state,
10185                                                      old_crtc_state)) {
10186                         new_crtc_state->mode_changed = false;
10187                         DRM_DEBUG_DRIVER(
10188                                 "Mode change not required for front porch change, "
10189                                 "setting mode_changed to %d",
10190                                 new_crtc_state->mode_changed);
10191
10192                         set_freesync_fixed_config(dm_new_crtc_state);
10193
10194                         goto skip_modeset;
10195                 } else if (amdgpu_freesync_vid_mode && aconnector &&
10196                            is_freesync_video_mode(&new_crtc_state->mode,
10197                                                   aconnector)) {
10198                         struct drm_display_mode *high_mode;
10199
10200                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10201                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10202                                 set_freesync_fixed_config(dm_new_crtc_state);
10203                         }
10204                 }
10205
10206                 ret = dm_atomic_get_state(state, &dm_state);
10207                 if (ret)
10208                         goto fail;
10209
10210                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10211                                 crtc->base.id);
10212
10213                 /* i.e. reset mode */
10214                 if (dc_remove_stream_from_ctx(
10215                                 dm->dc,
10216                                 dm_state->context,
10217                                 dm_old_crtc_state->stream) != DC_OK) {
10218                         ret = -EINVAL;
10219                         goto fail;
10220                 }
10221
10222                 dc_stream_release(dm_old_crtc_state->stream);
10223                 dm_new_crtc_state->stream = NULL;
10224
10225                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10226
10227                 *lock_and_validation_needed = true;
10228
10229         } else {/* Add stream for any updated/enabled CRTC */
10230                 /*
10231                  * Quick fix to prevent NULL pointer on new_stream when
10232                  * added MST connectors not found in existing crtc_state in the chained mode
10233                  * TODO: need to dig out the root cause of that
10234                  */
10235                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10236                         goto skip_modeset;
10237
10238                 if (modereset_required(new_crtc_state))
10239                         goto skip_modeset;
10240
10241                 if (modeset_required(new_crtc_state, new_stream,
10242                                      dm_old_crtc_state->stream)) {
10243
10244                         WARN_ON(dm_new_crtc_state->stream);
10245
10246                         ret = dm_atomic_get_state(state, &dm_state);
10247                         if (ret)
10248                                 goto fail;
10249
10250                         dm_new_crtc_state->stream = new_stream;
10251
10252                         dc_stream_retain(new_stream);
10253
10254                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10255                                          crtc->base.id);
10256
10257                         if (dc_add_stream_to_ctx(
10258                                         dm->dc,
10259                                         dm_state->context,
10260                                         dm_new_crtc_state->stream) != DC_OK) {
10261                                 ret = -EINVAL;
10262                                 goto fail;
10263                         }
10264
10265                         *lock_and_validation_needed = true;
10266                 }
10267         }
10268
10269 skip_modeset:
10270         /* Release extra reference */
10271         if (new_stream)
10272                  dc_stream_release(new_stream);
10273
10274         /*
10275          * We want to do dc stream updates that do not require a
10276          * full modeset below.
10277          */
10278         if (!(enable && aconnector && new_crtc_state->active))
10279                 return 0;
10280         /*
10281          * Given above conditions, the dc state cannot be NULL because:
10282          * 1. We're in the process of enabling CRTCs (just been added
10283          *    to the dc context, or already is on the context)
10284          * 2. Has a valid connector attached, and
10285          * 3. Is currently active and enabled.
10286          * => The dc stream state currently exists.
10287          */
10288         BUG_ON(dm_new_crtc_state->stream == NULL);
10289
10290         /* Scaling or underscan settings */
10291         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10292                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10293                 update_stream_scaling_settings(
10294                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10295
10296         /* ABM settings */
10297         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10298
10299         /*
10300          * Color management settings. We also update color properties
10301          * when a modeset is needed, to ensure it gets reprogrammed.
10302          */
10303         if (dm_new_crtc_state->base.color_mgmt_changed ||
10304             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10305                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10306                 if (ret)
10307                         goto fail;
10308         }
10309
10310         /* Update Freesync settings. */
10311         get_freesync_config_for_crtc(dm_new_crtc_state,
10312                                      dm_new_conn_state);
10313
10314         return ret;
10315
10316 fail:
10317         if (new_stream)
10318                 dc_stream_release(new_stream);
10319         return ret;
10320 }
10321
10322 static bool should_reset_plane(struct drm_atomic_state *state,
10323                                struct drm_plane *plane,
10324                                struct drm_plane_state *old_plane_state,
10325                                struct drm_plane_state *new_plane_state)
10326 {
10327         struct drm_plane *other;
10328         struct drm_plane_state *old_other_state, *new_other_state;
10329         struct drm_crtc_state *new_crtc_state;
10330         int i;
10331
10332         /*
10333          * TODO: Remove this hack once the checks below are sufficient
10334          * enough to determine when we need to reset all the planes on
10335          * the stream.
10336          */
10337         if (state->allow_modeset)
10338                 return true;
10339
10340         /* Exit early if we know that we're adding or removing the plane. */
10341         if (old_plane_state->crtc != new_plane_state->crtc)
10342                 return true;
10343
10344         /* old crtc == new_crtc == NULL, plane not in context. */
10345         if (!new_plane_state->crtc)
10346                 return false;
10347
10348         new_crtc_state =
10349                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10350
10351         if (!new_crtc_state)
10352                 return true;
10353
10354         /* CRTC Degamma changes currently require us to recreate planes. */
10355         if (new_crtc_state->color_mgmt_changed)
10356                 return true;
10357
10358         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10359                 return true;
10360
10361         /*
10362          * If there are any new primary or overlay planes being added or
10363          * removed then the z-order can potentially change. To ensure
10364          * correct z-order and pipe acquisition the current DC architecture
10365          * requires us to remove and recreate all existing planes.
10366          *
10367          * TODO: Come up with a more elegant solution for this.
10368          */
10369         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10370                 struct amdgpu_framebuffer *old_afb, *new_afb;
10371                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10372                         continue;
10373
10374                 if (old_other_state->crtc != new_plane_state->crtc &&
10375                     new_other_state->crtc != new_plane_state->crtc)
10376                         continue;
10377
10378                 if (old_other_state->crtc != new_other_state->crtc)
10379                         return true;
10380
10381                 /* Src/dst size and scaling updates. */
10382                 if (old_other_state->src_w != new_other_state->src_w ||
10383                     old_other_state->src_h != new_other_state->src_h ||
10384                     old_other_state->crtc_w != new_other_state->crtc_w ||
10385                     old_other_state->crtc_h != new_other_state->crtc_h)
10386                         return true;
10387
10388                 /* Rotation / mirroring updates. */
10389                 if (old_other_state->rotation != new_other_state->rotation)
10390                         return true;
10391
10392                 /* Blending updates. */
10393                 if (old_other_state->pixel_blend_mode !=
10394                     new_other_state->pixel_blend_mode)
10395                         return true;
10396
10397                 /* Alpha updates. */
10398                 if (old_other_state->alpha != new_other_state->alpha)
10399                         return true;
10400
10401                 /* Colorspace changes. */
10402                 if (old_other_state->color_range != new_other_state->color_range ||
10403                     old_other_state->color_encoding != new_other_state->color_encoding)
10404                         return true;
10405
10406                 /* Framebuffer checks fall at the end. */
10407                 if (!old_other_state->fb || !new_other_state->fb)
10408                         continue;
10409
10410                 /* Pixel format changes can require bandwidth updates. */
10411                 if (old_other_state->fb->format != new_other_state->fb->format)
10412                         return true;
10413
10414                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10415                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10416
10417                 /* Tiling and DCC changes also require bandwidth updates. */
10418                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10419                     old_afb->base.modifier != new_afb->base.modifier)
10420                         return true;
10421         }
10422
10423         return false;
10424 }
10425
10426 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10427                               struct drm_plane_state *new_plane_state,
10428                               struct drm_framebuffer *fb)
10429 {
10430         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10431         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10432         unsigned int pitch;
10433         bool linear;
10434
10435         if (fb->width > new_acrtc->max_cursor_width ||
10436             fb->height > new_acrtc->max_cursor_height) {
10437                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10438                                  new_plane_state->fb->width,
10439                                  new_plane_state->fb->height);
10440                 return -EINVAL;
10441         }
10442         if (new_plane_state->src_w != fb->width << 16 ||
10443             new_plane_state->src_h != fb->height << 16) {
10444                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10445                 return -EINVAL;
10446         }
10447
10448         /* Pitch in pixels */
10449         pitch = fb->pitches[0] / fb->format->cpp[0];
10450
10451         if (fb->width != pitch) {
10452                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10453                                  fb->width, pitch);
10454                 return -EINVAL;
10455         }
10456
10457         switch (pitch) {
10458         case 64:
10459         case 128:
10460         case 256:
10461                 /* FB pitch is supported by cursor plane */
10462                 break;
10463         default:
10464                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10465                 return -EINVAL;
10466         }
10467
10468         /* Core DRM takes care of checking FB modifiers, so we only need to
10469          * check tiling flags when the FB doesn't have a modifier. */
10470         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10471                 if (adev->family < AMDGPU_FAMILY_AI) {
10472                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10473                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10474                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10475                 } else {
10476                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10477                 }
10478                 if (!linear) {
10479                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10480                         return -EINVAL;
10481                 }
10482         }
10483
10484         return 0;
10485 }
10486
10487 static int dm_update_plane_state(struct dc *dc,
10488                                  struct drm_atomic_state *state,
10489                                  struct drm_plane *plane,
10490                                  struct drm_plane_state *old_plane_state,
10491                                  struct drm_plane_state *new_plane_state,
10492                                  bool enable,
10493                                  bool *lock_and_validation_needed)
10494 {
10495
10496         struct dm_atomic_state *dm_state = NULL;
10497         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10498         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10499         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10500         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10501         struct amdgpu_crtc *new_acrtc;
10502         bool needs_reset;
10503         int ret = 0;
10504
10505
10506         new_plane_crtc = new_plane_state->crtc;
10507         old_plane_crtc = old_plane_state->crtc;
10508         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10509         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10510
10511         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10512                 if (!enable || !new_plane_crtc ||
10513                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10514                         return 0;
10515
10516                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10517
10518                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10519                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10520                         return -EINVAL;
10521                 }
10522
10523                 if (new_plane_state->fb) {
10524                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10525                                                  new_plane_state->fb);
10526                         if (ret)
10527                                 return ret;
10528                 }
10529
10530                 return 0;
10531         }
10532
10533         needs_reset = should_reset_plane(state, plane, old_plane_state,
10534                                          new_plane_state);
10535
10536         /* Remove any changed/removed planes */
10537         if (!enable) {
10538                 if (!needs_reset)
10539                         return 0;
10540
10541                 if (!old_plane_crtc)
10542                         return 0;
10543
10544                 old_crtc_state = drm_atomic_get_old_crtc_state(
10545                                 state, old_plane_crtc);
10546                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10547
10548                 if (!dm_old_crtc_state->stream)
10549                         return 0;
10550
10551                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10552                                 plane->base.id, old_plane_crtc->base.id);
10553
10554                 ret = dm_atomic_get_state(state, &dm_state);
10555                 if (ret)
10556                         return ret;
10557
10558                 if (!dc_remove_plane_from_context(
10559                                 dc,
10560                                 dm_old_crtc_state->stream,
10561                                 dm_old_plane_state->dc_state,
10562                                 dm_state->context)) {
10563
10564                         return -EINVAL;
10565                 }
10566
10567
10568                 dc_plane_state_release(dm_old_plane_state->dc_state);
10569                 dm_new_plane_state->dc_state = NULL;
10570
10571                 *lock_and_validation_needed = true;
10572
10573         } else { /* Add new planes */
10574                 struct dc_plane_state *dc_new_plane_state;
10575
10576                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10577                         return 0;
10578
10579                 if (!new_plane_crtc)
10580                         return 0;
10581
10582                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10583                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10584
10585                 if (!dm_new_crtc_state->stream)
10586                         return 0;
10587
10588                 if (!needs_reset)
10589                         return 0;
10590
10591                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10592                 if (ret)
10593                         return ret;
10594
10595                 WARN_ON(dm_new_plane_state->dc_state);
10596
10597                 dc_new_plane_state = dc_create_plane_state(dc);
10598                 if (!dc_new_plane_state)
10599                         return -ENOMEM;
10600
10601                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10602                                  plane->base.id, new_plane_crtc->base.id);
10603
10604                 ret = fill_dc_plane_attributes(
10605                         drm_to_adev(new_plane_crtc->dev),
10606                         dc_new_plane_state,
10607                         new_plane_state,
10608                         new_crtc_state);
10609                 if (ret) {
10610                         dc_plane_state_release(dc_new_plane_state);
10611                         return ret;
10612                 }
10613
10614                 ret = dm_atomic_get_state(state, &dm_state);
10615                 if (ret) {
10616                         dc_plane_state_release(dc_new_plane_state);
10617                         return ret;
10618                 }
10619
10620                 /*
10621                  * Any atomic check errors that occur after this will
10622                  * not need a release. The plane state will be attached
10623                  * to the stream, and therefore part of the atomic
10624                  * state. It'll be released when the atomic state is
10625                  * cleaned.
10626                  */
10627                 if (!dc_add_plane_to_context(
10628                                 dc,
10629                                 dm_new_crtc_state->stream,
10630                                 dc_new_plane_state,
10631                                 dm_state->context)) {
10632
10633                         dc_plane_state_release(dc_new_plane_state);
10634                         return -EINVAL;
10635                 }
10636
10637                 dm_new_plane_state->dc_state = dc_new_plane_state;
10638
10639                 /* Tell DC to do a full surface update every time there
10640                  * is a plane change. Inefficient, but works for now.
10641                  */
10642                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10643
10644                 *lock_and_validation_needed = true;
10645         }
10646
10647
10648         return ret;
10649 }
10650
10651 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10652                                 struct drm_crtc *crtc,
10653                                 struct drm_crtc_state *new_crtc_state)
10654 {
10655         struct drm_plane *cursor = crtc->cursor, *underlying;
10656         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10657         int i;
10658         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10659
10660         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10661          * cursor per pipe but it's going to inherit the scaling and
10662          * positioning from the underlying pipe. Check the cursor plane's
10663          * blending properties match the underlying planes'. */
10664
10665         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10666         if (!new_cursor_state || !new_cursor_state->fb) {
10667                 return 0;
10668         }
10669
10670         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10671                          (new_cursor_state->src_w >> 16);
10672         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10673                          (new_cursor_state->src_h >> 16);
10674
10675         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10676                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10677                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10678                         continue;
10679
10680                 /* Ignore disabled planes */
10681                 if (!new_underlying_state->fb)
10682                         continue;
10683
10684                 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10685                                      (new_underlying_state->src_w >> 16);
10686                 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10687                                      (new_underlying_state->src_h >> 16);
10688
10689                 if (cursor_scale_w != underlying_scale_w ||
10690                     cursor_scale_h != underlying_scale_h) {
10691                         drm_dbg_atomic(crtc->dev,
10692                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10693                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10694                         return -EINVAL;
10695                 }
10696
10697                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10698                 if (new_underlying_state->crtc_x <= 0 &&
10699                     new_underlying_state->crtc_y <= 0 &&
10700                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10701                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10702                         break;
10703         }
10704
10705         return 0;
10706 }
10707
10708 #if defined(CONFIG_DRM_AMD_DC_DCN)
10709 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10710 {
10711         struct drm_connector *connector;
10712         struct drm_connector_state *conn_state;
10713         struct amdgpu_dm_connector *aconnector = NULL;
10714         int i;
10715         for_each_new_connector_in_state(state, connector, conn_state, i) {
10716                 if (conn_state->crtc != crtc)
10717                         continue;
10718
10719                 aconnector = to_amdgpu_dm_connector(connector);
10720                 if (!aconnector->port || !aconnector->mst_port)
10721                         aconnector = NULL;
10722                 else
10723                         break;
10724         }
10725
10726         if (!aconnector)
10727                 return 0;
10728
10729         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10730 }
10731 #endif
10732
10733 /**
10734  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10735  * @dev: The DRM device
10736  * @state: The atomic state to commit
10737  *
10738  * Validate that the given atomic state is programmable by DC into hardware.
10739  * This involves constructing a &struct dc_state reflecting the new hardware
10740  * state we wish to commit, then querying DC to see if it is programmable. It's
10741  * important not to modify the existing DC state. Otherwise, atomic_check
10742  * may unexpectedly commit hardware changes.
10743  *
10744  * When validating the DC state, it's important that the right locks are
10745  * acquired. For full updates case which removes/adds/updates streams on one
10746  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10747  * that any such full update commit will wait for completion of any outstanding
10748  * flip using DRMs synchronization events.
10749  *
10750  * Note that DM adds the affected connectors for all CRTCs in state, when that
10751  * might not seem necessary. This is because DC stream creation requires the
10752  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10753  * be possible but non-trivial - a possible TODO item.
10754  *
10755  * Return: -Error code if validation failed.
10756  */
10757 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10758                                   struct drm_atomic_state *state)
10759 {
10760         struct amdgpu_device *adev = drm_to_adev(dev);
10761         struct dm_atomic_state *dm_state = NULL;
10762         struct dc *dc = adev->dm.dc;
10763         struct drm_connector *connector;
10764         struct drm_connector_state *old_con_state, *new_con_state;
10765         struct drm_crtc *crtc;
10766         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10767         struct drm_plane *plane;
10768         struct drm_plane_state *old_plane_state, *new_plane_state;
10769         enum dc_status status;
10770         int ret, i;
10771         bool lock_and_validation_needed = false;
10772         struct dm_crtc_state *dm_old_crtc_state;
10773 #if defined(CONFIG_DRM_AMD_DC_DCN)
10774         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10775         struct drm_dp_mst_topology_state *mst_state;
10776         struct drm_dp_mst_topology_mgr *mgr;
10777 #endif
10778
10779         trace_amdgpu_dm_atomic_check_begin(state);
10780
10781         ret = drm_atomic_helper_check_modeset(dev, state);
10782         if (ret)
10783                 goto fail;
10784
10785         /* Check connector changes */
10786         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10787                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10788                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10789
10790                 /* Skip connectors that are disabled or part of modeset already. */
10791                 if (!old_con_state->crtc && !new_con_state->crtc)
10792                         continue;
10793
10794                 if (!new_con_state->crtc)
10795                         continue;
10796
10797                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10798                 if (IS_ERR(new_crtc_state)) {
10799                         ret = PTR_ERR(new_crtc_state);
10800                         goto fail;
10801                 }
10802
10803                 if (dm_old_con_state->abm_level !=
10804                     dm_new_con_state->abm_level)
10805                         new_crtc_state->connectors_changed = true;
10806         }
10807
10808 #if defined(CONFIG_DRM_AMD_DC_DCN)
10809         if (dc_resource_is_dsc_encoding_supported(dc)) {
10810                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10811                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10812                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10813                                 if (ret)
10814                                         goto fail;
10815                         }
10816                 }
10817         }
10818 #endif
10819         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10820                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10821
10822                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10823                     !new_crtc_state->color_mgmt_changed &&
10824                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10825                         dm_old_crtc_state->dsc_force_changed == false)
10826                         continue;
10827
10828                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10829                 if (ret)
10830                         goto fail;
10831
10832                 if (!new_crtc_state->enable)
10833                         continue;
10834
10835                 ret = drm_atomic_add_affected_connectors(state, crtc);
10836                 if (ret)
10837                         goto fail;
10838
10839                 ret = drm_atomic_add_affected_planes(state, crtc);
10840                 if (ret)
10841                         goto fail;
10842
10843                 if (dm_old_crtc_state->dsc_force_changed)
10844                         new_crtc_state->mode_changed = true;
10845         }
10846
10847         /*
10848          * Add all primary and overlay planes on the CRTC to the state
10849          * whenever a plane is enabled to maintain correct z-ordering
10850          * and to enable fast surface updates.
10851          */
10852         drm_for_each_crtc(crtc, dev) {
10853                 bool modified = false;
10854
10855                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10856                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10857                                 continue;
10858
10859                         if (new_plane_state->crtc == crtc ||
10860                             old_plane_state->crtc == crtc) {
10861                                 modified = true;
10862                                 break;
10863                         }
10864                 }
10865
10866                 if (!modified)
10867                         continue;
10868
10869                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10870                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10871                                 continue;
10872
10873                         new_plane_state =
10874                                 drm_atomic_get_plane_state(state, plane);
10875
10876                         if (IS_ERR(new_plane_state)) {
10877                                 ret = PTR_ERR(new_plane_state);
10878                                 goto fail;
10879                         }
10880                 }
10881         }
10882
10883         /* Remove exiting planes if they are modified */
10884         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10885                 ret = dm_update_plane_state(dc, state, plane,
10886                                             old_plane_state,
10887                                             new_plane_state,
10888                                             false,
10889                                             &lock_and_validation_needed);
10890                 if (ret)
10891                         goto fail;
10892         }
10893
10894         /* Disable all crtcs which require disable */
10895         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10896                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10897                                            old_crtc_state,
10898                                            new_crtc_state,
10899                                            false,
10900                                            &lock_and_validation_needed);
10901                 if (ret)
10902                         goto fail;
10903         }
10904
10905         /* Enable all crtcs which require enable */
10906         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10907                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10908                                            old_crtc_state,
10909                                            new_crtc_state,
10910                                            true,
10911                                            &lock_and_validation_needed);
10912                 if (ret)
10913                         goto fail;
10914         }
10915
10916         /* Add new/modified planes */
10917         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10918                 ret = dm_update_plane_state(dc, state, plane,
10919                                             old_plane_state,
10920                                             new_plane_state,
10921                                             true,
10922                                             &lock_and_validation_needed);
10923                 if (ret)
10924                         goto fail;
10925         }
10926
10927         /* Run this here since we want to validate the streams we created */
10928         ret = drm_atomic_helper_check_planes(dev, state);
10929         if (ret)
10930                 goto fail;
10931
10932         /* Check cursor planes scaling */
10933         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10934                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10935                 if (ret)
10936                         goto fail;
10937         }
10938
10939         if (state->legacy_cursor_update) {
10940                 /*
10941                  * This is a fast cursor update coming from the plane update
10942                  * helper, check if it can be done asynchronously for better
10943                  * performance.
10944                  */
10945                 state->async_update =
10946                         !drm_atomic_helper_async_check(dev, state);
10947
10948                 /*
10949                  * Skip the remaining global validation if this is an async
10950                  * update. Cursor updates can be done without affecting
10951                  * state or bandwidth calcs and this avoids the performance
10952                  * penalty of locking the private state object and
10953                  * allocating a new dc_state.
10954                  */
10955                 if (state->async_update)
10956                         return 0;
10957         }
10958
10959         /* Check scaling and underscan changes*/
10960         /* TODO Removed scaling changes validation due to inability to commit
10961          * new stream into context w\o causing full reset. Need to
10962          * decide how to handle.
10963          */
10964         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10965                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10966                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10967                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10968
10969                 /* Skip any modesets/resets */
10970                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10971                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10972                         continue;
10973
10974                 /* Skip any thing not scale or underscan changes */
10975                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10976                         continue;
10977
10978                 lock_and_validation_needed = true;
10979         }
10980
10981 #if defined(CONFIG_DRM_AMD_DC_DCN)
10982         /* set the slot info for each mst_state based on the link encoding format */
10983         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10984                 struct amdgpu_dm_connector *aconnector;
10985                 struct drm_connector *connector;
10986                 struct drm_connector_list_iter iter;
10987                 u8 link_coding_cap;
10988
10989                 if (!mgr->mst_state )
10990                         continue;
10991
10992                 drm_connector_list_iter_begin(dev, &iter);
10993                 drm_for_each_connector_iter(connector, &iter) {
10994                         int id = connector->index;
10995
10996                         if (id == mst_state->mgr->conn_base_id) {
10997                                 aconnector = to_amdgpu_dm_connector(connector);
10998                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10999                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11000
11001                                 break;
11002                         }
11003                 }
11004                 drm_connector_list_iter_end(&iter);
11005
11006         }
11007 #endif
11008         /**
11009          * Streams and planes are reset when there are changes that affect
11010          * bandwidth. Anything that affects bandwidth needs to go through
11011          * DC global validation to ensure that the configuration can be applied
11012          * to hardware.
11013          *
11014          * We have to currently stall out here in atomic_check for outstanding
11015          * commits to finish in this case because our IRQ handlers reference
11016          * DRM state directly - we can end up disabling interrupts too early
11017          * if we don't.
11018          *
11019          * TODO: Remove this stall and drop DM state private objects.
11020          */
11021         if (lock_and_validation_needed) {
11022                 ret = dm_atomic_get_state(state, &dm_state);
11023                 if (ret)
11024                         goto fail;
11025
11026                 ret = do_aquire_global_lock(dev, state);
11027                 if (ret)
11028                         goto fail;
11029
11030 #if defined(CONFIG_DRM_AMD_DC_DCN)
11031                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
11032                         goto fail;
11033
11034                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11035                 if (ret)
11036                         goto fail;
11037 #endif
11038
11039                 /*
11040                  * Perform validation of MST topology in the state:
11041                  * We need to perform MST atomic check before calling
11042                  * dc_validate_global_state(), or there is a chance
11043                  * to get stuck in an infinite loop and hang eventually.
11044                  */
11045                 ret = drm_dp_mst_atomic_check(state);
11046                 if (ret)
11047                         goto fail;
11048                 status = dc_validate_global_state(dc, dm_state->context, false);
11049                 if (status != DC_OK) {
11050                         drm_dbg_atomic(dev,
11051                                        "DC global validation failure: %s (%d)",
11052                                        dc_status_to_str(status), status);
11053                         ret = -EINVAL;
11054                         goto fail;
11055                 }
11056         } else {
11057                 /*
11058                  * The commit is a fast update. Fast updates shouldn't change
11059                  * the DC context, affect global validation, and can have their
11060                  * commit work done in parallel with other commits not touching
11061                  * the same resource. If we have a new DC context as part of
11062                  * the DM atomic state from validation we need to free it and
11063                  * retain the existing one instead.
11064                  *
11065                  * Furthermore, since the DM atomic state only contains the DC
11066                  * context and can safely be annulled, we can free the state
11067                  * and clear the associated private object now to free
11068                  * some memory and avoid a possible use-after-free later.
11069                  */
11070
11071                 for (i = 0; i < state->num_private_objs; i++) {
11072                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11073
11074                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11075                                 int j = state->num_private_objs-1;
11076
11077                                 dm_atomic_destroy_state(obj,
11078                                                 state->private_objs[i].state);
11079
11080                                 /* If i is not at the end of the array then the
11081                                  * last element needs to be moved to where i was
11082                                  * before the array can safely be truncated.
11083                                  */
11084                                 if (i != j)
11085                                         state->private_objs[i] =
11086                                                 state->private_objs[j];
11087
11088                                 state->private_objs[j].ptr = NULL;
11089                                 state->private_objs[j].state = NULL;
11090                                 state->private_objs[j].old_state = NULL;
11091                                 state->private_objs[j].new_state = NULL;
11092
11093                                 state->num_private_objs = j;
11094                                 break;
11095                         }
11096                 }
11097         }
11098
11099         /* Store the overall update type for use later in atomic check. */
11100         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11101                 struct dm_crtc_state *dm_new_crtc_state =
11102                         to_dm_crtc_state(new_crtc_state);
11103
11104                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11105                                                          UPDATE_TYPE_FULL :
11106                                                          UPDATE_TYPE_FAST;
11107         }
11108
11109         /* Must be success */
11110         WARN_ON(ret);
11111
11112         trace_amdgpu_dm_atomic_check_finish(state, ret);
11113
11114         return ret;
11115
11116 fail:
11117         if (ret == -EDEADLK)
11118                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11119         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11120                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11121         else
11122                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11123
11124         trace_amdgpu_dm_atomic_check_finish(state, ret);
11125
11126         return ret;
11127 }
11128
11129 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11130                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11131 {
11132         uint8_t dpcd_data;
11133         bool capable = false;
11134
11135         if (amdgpu_dm_connector->dc_link &&
11136                 dm_helpers_dp_read_dpcd(
11137                                 NULL,
11138                                 amdgpu_dm_connector->dc_link,
11139                                 DP_DOWN_STREAM_PORT_COUNT,
11140                                 &dpcd_data,
11141                                 sizeof(dpcd_data))) {
11142                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11143         }
11144
11145         return capable;
11146 }
11147
11148 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11149                 unsigned int offset,
11150                 unsigned int total_length,
11151                 uint8_t *data,
11152                 unsigned int length,
11153                 struct amdgpu_hdmi_vsdb_info *vsdb)
11154 {
11155         bool res;
11156         union dmub_rb_cmd cmd;
11157         struct dmub_cmd_send_edid_cea *input;
11158         struct dmub_cmd_edid_cea_output *output;
11159
11160         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11161                 return false;
11162
11163         memset(&cmd, 0, sizeof(cmd));
11164
11165         input = &cmd.edid_cea.data.input;
11166
11167         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11168         cmd.edid_cea.header.sub_type = 0;
11169         cmd.edid_cea.header.payload_bytes =
11170                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11171         input->offset = offset;
11172         input->length = length;
11173         input->total_length = total_length;
11174         memcpy(input->payload, data, length);
11175
11176         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11177         if (!res) {
11178                 DRM_ERROR("EDID CEA parser failed\n");
11179                 return false;
11180         }
11181
11182         output = &cmd.edid_cea.data.output;
11183
11184         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11185                 if (!output->ack.success) {
11186                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11187                                         output->ack.offset);
11188                 }
11189         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11190                 if (!output->amd_vsdb.vsdb_found)
11191                         return false;
11192
11193                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11194                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11195                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11196                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11197         } else {
11198                 DRM_WARN("Unknown EDID CEA parser results\n");
11199                 return false;
11200         }
11201
11202         return true;
11203 }
11204
11205 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11206                 uint8_t *edid_ext, int len,
11207                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11208 {
11209         int i;
11210
11211         /* send extension block to DMCU for parsing */
11212         for (i = 0; i < len; i += 8) {
11213                 bool res;
11214                 int offset;
11215
11216                 /* send 8 bytes a time */
11217                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11218                         return false;
11219
11220                 if (i+8 == len) {
11221                         /* EDID block sent completed, expect result */
11222                         int version, min_rate, max_rate;
11223
11224                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11225                         if (res) {
11226                                 /* amd vsdb found */
11227                                 vsdb_info->freesync_supported = 1;
11228                                 vsdb_info->amd_vsdb_version = version;
11229                                 vsdb_info->min_refresh_rate_hz = min_rate;
11230                                 vsdb_info->max_refresh_rate_hz = max_rate;
11231                                 return true;
11232                         }
11233                         /* not amd vsdb */
11234                         return false;
11235                 }
11236
11237                 /* check for ack*/
11238                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11239                 if (!res)
11240                         return false;
11241         }
11242
11243         return false;
11244 }
11245
11246 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11247                 uint8_t *edid_ext, int len,
11248                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11249 {
11250         int i;
11251
11252         /* send extension block to DMCU for parsing */
11253         for (i = 0; i < len; i += 8) {
11254                 /* send 8 bytes a time */
11255                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11256                         return false;
11257         }
11258
11259         return vsdb_info->freesync_supported;
11260 }
11261
11262 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11263                 uint8_t *edid_ext, int len,
11264                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11265 {
11266         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11267
11268         if (adev->dm.dmub_srv)
11269                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11270         else
11271                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11272 }
11273
11274 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11275                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11276 {
11277         uint8_t *edid_ext = NULL;
11278         int i;
11279         bool valid_vsdb_found = false;
11280
11281         /*----- drm_find_cea_extension() -----*/
11282         /* No EDID or EDID extensions */
11283         if (edid == NULL || edid->extensions == 0)
11284                 return -ENODEV;
11285
11286         /* Find CEA extension */
11287         for (i = 0; i < edid->extensions; i++) {
11288                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11289                 if (edid_ext[0] == CEA_EXT)
11290                         break;
11291         }
11292
11293         if (i == edid->extensions)
11294                 return -ENODEV;
11295
11296         /*----- cea_db_offsets() -----*/
11297         if (edid_ext[0] != CEA_EXT)
11298                 return -ENODEV;
11299
11300         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11301
11302         return valid_vsdb_found ? i : -ENODEV;
11303 }
11304
11305 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11306                                         struct edid *edid)
11307 {
11308         int i = 0;
11309         struct detailed_timing *timing;
11310         struct detailed_non_pixel *data;
11311         struct detailed_data_monitor_range *range;
11312         struct amdgpu_dm_connector *amdgpu_dm_connector =
11313                         to_amdgpu_dm_connector(connector);
11314         struct dm_connector_state *dm_con_state = NULL;
11315         struct dc_sink *sink;
11316
11317         struct drm_device *dev = connector->dev;
11318         struct amdgpu_device *adev = drm_to_adev(dev);
11319         bool freesync_capable = false;
11320         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11321
11322         if (!connector->state) {
11323                 DRM_ERROR("%s - Connector has no state", __func__);
11324                 goto update;
11325         }
11326
11327         sink = amdgpu_dm_connector->dc_sink ?
11328                 amdgpu_dm_connector->dc_sink :
11329                 amdgpu_dm_connector->dc_em_sink;
11330
11331         if (!edid || !sink) {
11332                 dm_con_state = to_dm_connector_state(connector->state);
11333
11334                 amdgpu_dm_connector->min_vfreq = 0;
11335                 amdgpu_dm_connector->max_vfreq = 0;
11336                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11337                 connector->display_info.monitor_range.min_vfreq = 0;
11338                 connector->display_info.monitor_range.max_vfreq = 0;
11339                 freesync_capable = false;
11340
11341                 goto update;
11342         }
11343
11344         dm_con_state = to_dm_connector_state(connector->state);
11345
11346         if (!adev->dm.freesync_module)
11347                 goto update;
11348
11349
11350         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11351                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11352                 bool edid_check_required = false;
11353
11354                 if (edid) {
11355                         edid_check_required = is_dp_capable_without_timing_msa(
11356                                                 adev->dm.dc,
11357                                                 amdgpu_dm_connector);
11358                 }
11359
11360                 if (edid_check_required == true && (edid->version > 1 ||
11361                    (edid->version == 1 && edid->revision > 1))) {
11362                         for (i = 0; i < 4; i++) {
11363
11364                                 timing  = &edid->detailed_timings[i];
11365                                 data    = &timing->data.other_data;
11366                                 range   = &data->data.range;
11367                                 /*
11368                                  * Check if monitor has continuous frequency mode
11369                                  */
11370                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11371                                         continue;
11372                                 /*
11373                                  * Check for flag range limits only. If flag == 1 then
11374                                  * no additional timing information provided.
11375                                  * Default GTF, GTF Secondary curve and CVT are not
11376                                  * supported
11377                                  */
11378                                 if (range->flags != 1)
11379                                         continue;
11380
11381                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11382                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11383                                 amdgpu_dm_connector->pixel_clock_mhz =
11384                                         range->pixel_clock_mhz * 10;
11385
11386                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11387                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11388
11389                                 break;
11390                         }
11391
11392                         if (amdgpu_dm_connector->max_vfreq -
11393                             amdgpu_dm_connector->min_vfreq > 10) {
11394
11395                                 freesync_capable = true;
11396                         }
11397                 }
11398         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11399                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11400                 if (i >= 0 && vsdb_info.freesync_supported) {
11401                         timing  = &edid->detailed_timings[i];
11402                         data    = &timing->data.other_data;
11403
11404                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11405                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11406                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11407                                 freesync_capable = true;
11408
11409                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11410                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11411                 }
11412         }
11413
11414 update:
11415         if (dm_con_state)
11416                 dm_con_state->freesync_capable = freesync_capable;
11417
11418         if (connector->vrr_capable_property)
11419                 drm_connector_set_vrr_capable_property(connector,
11420                                                        freesync_capable);
11421 }
11422
11423 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11424 {
11425         struct amdgpu_device *adev = drm_to_adev(dev);
11426         struct dc *dc = adev->dm.dc;
11427         int i;
11428
11429         mutex_lock(&adev->dm.dc_lock);
11430         if (dc->current_state) {
11431                 for (i = 0; i < dc->current_state->stream_count; ++i)
11432                         dc->current_state->streams[i]
11433                                 ->triggered_crtc_reset.enabled =
11434                                 adev->dm.force_timing_sync;
11435
11436                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11437                 dc_trigger_sync(dc, dc->current_state);
11438         }
11439         mutex_unlock(&adev->dm.dc_lock);
11440 }
11441
11442 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11443                        uint32_t value, const char *func_name)
11444 {
11445 #ifdef DM_CHECK_ADDR_0
11446         if (address == 0) {
11447                 DC_ERR("invalid register write. address = 0");
11448                 return;
11449         }
11450 #endif
11451         cgs_write_register(ctx->cgs_device, address, value);
11452         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11453 }
11454
11455 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11456                           const char *func_name)
11457 {
11458         uint32_t value;
11459 #ifdef DM_CHECK_ADDR_0
11460         if (address == 0) {
11461                 DC_ERR("invalid register read; address = 0\n");
11462                 return 0;
11463         }
11464 #endif
11465
11466         if (ctx->dmub_srv &&
11467             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11468             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11469                 ASSERT(false);
11470                 return 0;
11471         }
11472
11473         value = cgs_read_register(ctx->cgs_device, address);
11474
11475         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11476
11477         return value;
11478 }
11479
11480 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11481         uint8_t status_type, uint32_t *operation_result)
11482 {
11483         struct amdgpu_device *adev = ctx->driver_context;
11484         int return_status = -1;
11485         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11486
11487         if (is_cmd_aux) {
11488                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11489                         return_status = p_notify->aux_reply.length;
11490                         *operation_result = p_notify->result;
11491                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11492                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11493                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11494                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11495                 } else {
11496                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11497                 }
11498         } else {
11499                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11500                         return_status = 0;
11501                         *operation_result = p_notify->sc_status;
11502                 } else {
11503                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11504                 }
11505         }
11506
11507         return return_status;
11508 }
11509
11510 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11511         unsigned int link_index, void *cmd_payload, void *operation_result)
11512 {
11513         struct amdgpu_device *adev = ctx->driver_context;
11514         int ret = 0;
11515
11516         if (is_cmd_aux) {
11517                 dc_process_dmub_aux_transfer_async(ctx->dc,
11518                         link_index, (struct aux_payload *)cmd_payload);
11519         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11520                                         (struct set_config_cmd_payload *)cmd_payload,
11521                                         adev->dm.dmub_notify)) {
11522                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11523                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11524                                         (uint32_t *)operation_result);
11525         }
11526
11527         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11528         if (ret == 0) {
11529                 DRM_ERROR("wait_for_completion_timeout timeout!");
11530                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11531                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11532                                 (uint32_t *)operation_result);
11533         }
11534
11535         if (is_cmd_aux) {
11536                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11537                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11538
11539                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11540                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11541                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11542                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11543                                        adev->dm.dmub_notify->aux_reply.length);
11544                         }
11545                 }
11546         }
11547
11548         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11549                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11550                         (uint32_t *)operation_result);
11551 }