Merge tag 'drm-misc-next-2022-01-27' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95 #endif
96
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117
118 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147         switch (link->dpcd_caps.dongle_type) {
148         case DISPLAY_DONGLE_NONE:
149                 return DRM_MODE_SUBCONNECTOR_Native;
150         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151                 return DRM_MODE_SUBCONNECTOR_VGA;
152         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153         case DISPLAY_DONGLE_DP_DVI_DONGLE:
154                 return DRM_MODE_SUBCONNECTOR_DVID;
155         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_HDMIA;
158         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159         default:
160                 return DRM_MODE_SUBCONNECTOR_Unknown;
161         }
162 }
163
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166         struct dc_link *link = aconnector->dc_link;
167         struct drm_connector *connector = &aconnector->base;
168         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171                 return;
172
173         if (aconnector->dc_sink)
174                 subconnector = get_subconnector_type(link);
175
176         drm_object_property_set_value(&connector->base,
177                         connector->dev->mode_config.dp_subconnector_property,
178                         subconnector);
179 }
180
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193                                 struct drm_plane *plane,
194                                 unsigned long possible_crtcs,
195                                 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197                                struct drm_plane *plane,
198                                uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
201                                     uint32_t link_index,
202                                     struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204                                   struct amdgpu_encoder *aencoder,
205                                   uint32_t link_index);
206
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212                                   struct drm_atomic_state *state);
213
214 static void handle_cursor_update(struct drm_plane *plane,
215                                  struct drm_plane_state *old_plane_state);
216
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225                                  struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241         if (crtc >= adev->mode_info.num_crtc)
242                 return 0;
243         else {
244                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
246                 if (acrtc->dm_irq_params.stream == NULL) {
247                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248                                   crtc);
249                         return 0;
250                 }
251
252                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253         }
254 }
255
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257                                   u32 *vbl, u32 *position)
258 {
259         uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
261         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262                 return -EINVAL;
263         else {
264                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
266                 if (acrtc->dm_irq_params.stream ==  NULL) {
267                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268                                   crtc);
269                         return 0;
270                 }
271
272                 /*
273                  * TODO rework base driver to use values directly.
274                  * for now parse it back into reg-format
275                  */
276                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277                                          &v_blank_start,
278                                          &v_blank_end,
279                                          &h_position,
280                                          &v_position);
281
282                 *position = v_position | (h_position << 16);
283                 *vbl = v_blank_start | (v_blank_end << 16);
284         }
285
286         return 0;
287 }
288
289 static bool dm_is_idle(void *handle)
290 {
291         /* XXX todo */
292         return true;
293 }
294
295 static int dm_wait_for_idle(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static bool dm_check_soft_reset(void *handle)
302 {
303         return false;
304 }
305
306 static int dm_soft_reset(void *handle)
307 {
308         /* XXX todo */
309         return 0;
310 }
311
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314                      int otg_inst)
315 {
316         struct drm_device *dev = adev_to_drm(adev);
317         struct drm_crtc *crtc;
318         struct amdgpu_crtc *amdgpu_crtc;
319
320         if (WARN_ON(otg_inst == -1))
321                 return adev->mode_info.crtcs[0];
322
323         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324                 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326                 if (amdgpu_crtc->otg_inst == otg_inst)
327                         return amdgpu_crtc;
328         }
329
330         return NULL;
331 }
332
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335         return acrtc->dm_irq_params.freesync_config.state ==
336                        VRR_STATE_ACTIVE_VARIABLE ||
337                acrtc->dm_irq_params.freesync_config.state ==
338                        VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367         struct amdgpu_crtc *amdgpu_crtc;
368         struct common_irq_params *irq_params = interrupt_params;
369         struct amdgpu_device *adev = irq_params->adev;
370         unsigned long flags;
371         struct drm_pending_vblank_event *e;
372         uint32_t vpos, hpos, v_blank_start, v_blank_end;
373         bool vrr_active;
374
375         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377         /* IRQ could occur when in initial stage */
378         /* TODO work and BO cleanup */
379         if (amdgpu_crtc == NULL) {
380                 DC_LOG_PFLIP("CRTC is null, returning.\n");
381                 return;
382         }
383
384         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388                                                  amdgpu_crtc->pflip_status,
389                                                  AMDGPU_FLIP_SUBMITTED,
390                                                  amdgpu_crtc->crtc_id,
391                                                  amdgpu_crtc);
392                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393                 return;
394         }
395
396         /* page flip completed. */
397         e = amdgpu_crtc->event;
398         amdgpu_crtc->event = NULL;
399
400         WARN_ON(!e);
401
402         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405         if (!vrr_active ||
406             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407                                       &v_blank_end, &hpos, &vpos) ||
408             (vpos < v_blank_start)) {
409                 /* Update to correct count and vblank timestamp if racing with
410                  * vblank irq. This also updates to the correct vblank timestamp
411                  * even in VRR mode, as scanout is past the front-porch atm.
412                  */
413                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415                 /* Wake up userspace by sending the pageflip event with proper
416                  * count and timestamp of vblank of flip completion.
417                  */
418                 if (e) {
419                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421                         /* Event sent, so done with vblank for this flip */
422                         drm_crtc_vblank_put(&amdgpu_crtc->base);
423                 }
424         } else if (e) {
425                 /* VRR active and inside front-porch: vblank count and
426                  * timestamp for pageflip event will only be up to date after
427                  * drm_crtc_handle_vblank() has been executed from late vblank
428                  * irq handler after start of back-porch (vline 0). We queue the
429                  * pageflip event for send-out by drm_crtc_handle_vblank() with
430                  * updated timestamp and count, once it runs after us.
431                  *
432                  * We need to open-code this instead of using the helper
433                  * drm_crtc_arm_vblank_event(), as that helper would
434                  * call drm_crtc_accurate_vblank_count(), which we must
435                  * not call in VRR mode while we are in front-porch!
436                  */
437
438                 /* sequence will be replaced by real count during send-out. */
439                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440                 e->pipe = amdgpu_crtc->crtc_id;
441
442                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443                 e = NULL;
444         }
445
446         /* Keep track of vblank of this flip for flip throttling. We use the
447          * cooked hw counter, as that one incremented at start of this vblank
448          * of pageflip completion, so last_flip_vblank is the forbidden count
449          * for queueing new pageflips if vsync + VRR is enabled.
450          */
451         amdgpu_crtc->dm_irq_params.last_flip_vblank =
452                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458                      amdgpu_crtc->crtc_id, amdgpu_crtc,
459                      vrr_active, (int) !e);
460 }
461
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464         struct common_irq_params *irq_params = interrupt_params;
465         struct amdgpu_device *adev = irq_params->adev;
466         struct amdgpu_crtc *acrtc;
467         struct drm_device *drm_dev;
468         struct drm_vblank_crtc *vblank;
469         ktime_t frame_duration_ns, previous_timestamp;
470         unsigned long flags;
471         int vrr_active;
472
473         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475         if (acrtc) {
476                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477                 drm_dev = acrtc->base.dev;
478                 vblank = &drm_dev->vblank[acrtc->base.index];
479                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480                 frame_duration_ns = vblank->time - previous_timestamp;
481
482                 if (frame_duration_ns > 0) {
483                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
484                                                 frame_duration_ns,
485                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
487                 }
488
489                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490                               acrtc->crtc_id,
491                               vrr_active);
492
493                 /* Core vblank handling is done here after end of front-porch in
494                  * vrr mode, as vblank timestamping will give valid results
495                  * while now done after front-porch. This will also deliver
496                  * page-flip completion events that have been queued to us
497                  * if a pageflip happened inside front-porch.
498                  */
499                 if (vrr_active) {
500                         drm_crtc_handle_vblank(&acrtc->base);
501
502                         /* BTR processing for pre-DCE12 ASICs */
503                         if (acrtc->dm_irq_params.stream &&
504                             adev->family < AMDGPU_FAMILY_AI) {
505                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506                                 mod_freesync_handle_v_update(
507                                     adev->dm.freesync_module,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params);
510
511                                 dc_stream_adjust_vmin_vmax(
512                                     adev->dm.dc,
513                                     acrtc->dm_irq_params.stream,
514                                     &acrtc->dm_irq_params.vrr_params.adjust);
515                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516                         }
517                 }
518         }
519 }
520
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530         struct common_irq_params *irq_params = interrupt_params;
531         struct amdgpu_device *adev = irq_params->adev;
532         struct amdgpu_crtc *acrtc;
533         unsigned long flags;
534         int vrr_active;
535
536         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537         if (!acrtc)
538                 return;
539
540         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543                       vrr_active, acrtc->dm_irq_params.active_planes);
544
545         /**
546          * Core vblank handling at start of front-porch is only possible
547          * in non-vrr mode, as only there vblank timestamping will give
548          * valid results while done in front-porch. Otherwise defer it
549          * to dm_vupdate_high_irq after end of front-porch.
550          */
551         if (!vrr_active)
552                 drm_crtc_handle_vblank(&acrtc->base);
553
554         /**
555          * Following stuff must happen at start of vblank, for crc
556          * computation and below-the-range btr support in vrr mode.
557          */
558         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560         /* BTR updates need to happen before VUPDATE on Vega and above. */
561         if (adev->family < AMDGPU_FAMILY_AI)
562                 return;
563
564         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566         if (acrtc->dm_irq_params.stream &&
567             acrtc->dm_irq_params.vrr_params.supported &&
568             acrtc->dm_irq_params.freesync_config.state ==
569                     VRR_STATE_ACTIVE_VARIABLE) {
570                 mod_freesync_handle_v_update(adev->dm.freesync_module,
571                                              acrtc->dm_irq_params.stream,
572                                              &acrtc->dm_irq_params.vrr_params);
573
574                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575                                            &acrtc->dm_irq_params.vrr_params.adjust);
576         }
577
578         /*
579          * If there aren't any active_planes then DCH HUBP may be clock-gated.
580          * In that case, pageflip completion interrupts won't fire and pageflip
581          * completion events won't get delivered. Prevent this by sending
582          * pending pageflip events from here if a flip is still pending.
583          *
584          * If any planes are enabled, use dm_pflip_high_irq() instead, to
585          * avoid race conditions between flip programming and completion,
586          * which could cause too early flip completion events.
587          */
588         if (adev->family >= AMDGPU_FAMILY_RV &&
589             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590             acrtc->dm_irq_params.active_planes == 0) {
591                 if (acrtc->event) {
592                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593                         acrtc->event = NULL;
594                         drm_crtc_vblank_put(&acrtc->base);
595                 }
596                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597         }
598
599         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613         struct common_irq_params *irq_params = interrupt_params;
614         struct amdgpu_device *adev = irq_params->adev;
615         struct amdgpu_crtc *acrtc;
616
617         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619         if (!acrtc)
620                 return;
621
622         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625
626 /**
627  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
636                                         struct dmub_notification *notify)
637 {
638         if (adev->dm.dmub_notify)
639                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
640         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
641                 complete(&adev->dm.dmub_aux_transfer_done);
642 }
643
644 /**
645  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
646  * @adev: amdgpu_device pointer
647  * @notify: dmub notification structure
648  *
649  * Dmub Hpd interrupt processing callback. Gets displayindex through the
650  * ink index and calls helper to do the processing.
651  */
652 static void dmub_hpd_callback(struct amdgpu_device *adev,
653                               struct dmub_notification *notify)
654 {
655         struct amdgpu_dm_connector *aconnector;
656         struct amdgpu_dm_connector *hpd_aconnector = NULL;
657         struct drm_connector *connector;
658         struct drm_connector_list_iter iter;
659         struct dc_link *link;
660         uint8_t link_index = 0;
661         struct drm_device *dev;
662
663         if (adev == NULL)
664                 return;
665
666         if (notify == NULL) {
667                 DRM_ERROR("DMUB HPD callback notification was NULL");
668                 return;
669         }
670
671         if (notify->link_index > adev->dm.dc->link_count) {
672                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
673                 return;
674         }
675
676         link_index = notify->link_index;
677         link = adev->dm.dc->links[link_index];
678         dev = adev->dm.ddev;
679
680         drm_connector_list_iter_begin(dev, &iter);
681         drm_for_each_connector_iter(connector, &iter) {
682                 aconnector = to_amdgpu_dm_connector(connector);
683                 if (link && aconnector->dc_link == link) {
684                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
685                         hpd_aconnector = aconnector;
686                         break;
687                 }
688         }
689         drm_connector_list_iter_end(&iter);
690
691         if (hpd_aconnector) {
692                 if (notify->type == DMUB_NOTIFICATION_HPD)
693                         handle_hpd_irq_helper(hpd_aconnector);
694                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
695                         handle_hpd_rx_irq(hpd_aconnector);
696         }
697 }
698
699 /**
700  * register_dmub_notify_callback - Sets callback for DMUB notify
701  * @adev: amdgpu_device pointer
702  * @type: Type of dmub notification
703  * @callback: Dmub interrupt callback function
704  * @dmub_int_thread_offload: offload indicator
705  *
706  * API to register a dmub callback handler for a dmub notification
707  * Also sets indicator whether callback processing to be offloaded.
708  * to dmub interrupt handling thread
709  * Return: true if successfully registered, false if there is existing registration
710  */
711 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
712                                           enum dmub_notification_type type,
713                                           dmub_notify_interrupt_callback_t callback,
714                                           bool dmub_int_thread_offload)
715 {
716         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
717                 adev->dm.dmub_callback[type] = callback;
718                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
719         } else
720                 return false;
721
722         return true;
723 }
724
725 static void dm_handle_hpd_work(struct work_struct *work)
726 {
727         struct dmub_hpd_work *dmub_hpd_wrk;
728
729         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
730
731         if (!dmub_hpd_wrk->dmub_notify) {
732                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
733                 return;
734         }
735
736         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
737                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
738                 dmub_hpd_wrk->dmub_notify);
739         }
740
741         kfree(dmub_hpd_wrk->dmub_notify);
742         kfree(dmub_hpd_wrk);
743
744 }
745
746 #define DMUB_TRACE_MAX_READ 64
747 /**
748  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
749  * @interrupt_params: used for determining the Outbox instance
750  *
751  * Handles the Outbox Interrupt
752  * event handler.
753  */
754 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
755 {
756         struct dmub_notification notify;
757         struct common_irq_params *irq_params = interrupt_params;
758         struct amdgpu_device *adev = irq_params->adev;
759         struct amdgpu_display_manager *dm = &adev->dm;
760         struct dmcub_trace_buf_entry entry = { 0 };
761         uint32_t count = 0;
762         struct dmub_hpd_work *dmub_hpd_wrk;
763         struct dc_link *plink = NULL;
764
765         if (dc_enable_dmub_notifications(adev->dm.dc) &&
766                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
767
768                 do {
769                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
770                         if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
771                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
772                                 continue;
773                         }
774                         if (!dm->dmub_callback[notify.type]) {
775                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
776                                 continue;
777                         }
778                         if (dm->dmub_thread_offload[notify.type] == true) {
779                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
780                                 if (!dmub_hpd_wrk) {
781                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
782                                         return;
783                                 }
784                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
785                                 if (!dmub_hpd_wrk->dmub_notify) {
786                                         kfree(dmub_hpd_wrk);
787                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
788                                         return;
789                                 }
790                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
791                                 if (dmub_hpd_wrk->dmub_notify)
792                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
793                                 dmub_hpd_wrk->adev = adev;
794                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
795                                         plink = adev->dm.dc->links[notify.link_index];
796                                         if (plink) {
797                                                 plink->hpd_status =
798                                                         notify.hpd_status == DP_HPD_PLUG;
799                                         }
800                                 }
801                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
802                         } else {
803                                 dm->dmub_callback[notify.type](adev, &notify);
804                         }
805                 } while (notify.pending_notification);
806         }
807
808
809         do {
810                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
811                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
812                                                         entry.param0, entry.param1);
813
814                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
815                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
816                 } else
817                         break;
818
819                 count++;
820
821         } while (count <= DMUB_TRACE_MAX_READ);
822
823         if (count > DMUB_TRACE_MAX_READ)
824                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
825 }
826 #endif /* CONFIG_DRM_AMD_DC_DCN */
827
828 static int dm_set_clockgating_state(void *handle,
829                   enum amd_clockgating_state state)
830 {
831         return 0;
832 }
833
834 static int dm_set_powergating_state(void *handle,
835                   enum amd_powergating_state state)
836 {
837         return 0;
838 }
839
840 /* Prototypes of private functions */
841 static int dm_early_init(void* handle);
842
843 /* Allocate memory for FBC compressed data  */
844 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
845 {
846         struct drm_device *dev = connector->dev;
847         struct amdgpu_device *adev = drm_to_adev(dev);
848         struct dm_compressor_info *compressor = &adev->dm.compressor;
849         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
850         struct drm_display_mode *mode;
851         unsigned long max_size = 0;
852
853         if (adev->dm.dc->fbc_compressor == NULL)
854                 return;
855
856         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
857                 return;
858
859         if (compressor->bo_ptr)
860                 return;
861
862
863         list_for_each_entry(mode, &connector->modes, head) {
864                 if (max_size < mode->htotal * mode->vtotal)
865                         max_size = mode->htotal * mode->vtotal;
866         }
867
868         if (max_size) {
869                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
870                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
871                             &compressor->gpu_addr, &compressor->cpu_addr);
872
873                 if (r)
874                         DRM_ERROR("DM: Failed to initialize FBC\n");
875                 else {
876                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
877                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
878                 }
879
880         }
881
882 }
883
884 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
885                                           int pipe, bool *enabled,
886                                           unsigned char *buf, int max_bytes)
887 {
888         struct drm_device *dev = dev_get_drvdata(kdev);
889         struct amdgpu_device *adev = drm_to_adev(dev);
890         struct drm_connector *connector;
891         struct drm_connector_list_iter conn_iter;
892         struct amdgpu_dm_connector *aconnector;
893         int ret = 0;
894
895         *enabled = false;
896
897         mutex_lock(&adev->dm.audio_lock);
898
899         drm_connector_list_iter_begin(dev, &conn_iter);
900         drm_for_each_connector_iter(connector, &conn_iter) {
901                 aconnector = to_amdgpu_dm_connector(connector);
902                 if (aconnector->audio_inst != port)
903                         continue;
904
905                 *enabled = true;
906                 ret = drm_eld_size(connector->eld);
907                 memcpy(buf, connector->eld, min(max_bytes, ret));
908
909                 break;
910         }
911         drm_connector_list_iter_end(&conn_iter);
912
913         mutex_unlock(&adev->dm.audio_lock);
914
915         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
916
917         return ret;
918 }
919
920 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
921         .get_eld = amdgpu_dm_audio_component_get_eld,
922 };
923
924 static int amdgpu_dm_audio_component_bind(struct device *kdev,
925                                        struct device *hda_kdev, void *data)
926 {
927         struct drm_device *dev = dev_get_drvdata(kdev);
928         struct amdgpu_device *adev = drm_to_adev(dev);
929         struct drm_audio_component *acomp = data;
930
931         acomp->ops = &amdgpu_dm_audio_component_ops;
932         acomp->dev = kdev;
933         adev->dm.audio_component = acomp;
934
935         return 0;
936 }
937
938 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
939                                           struct device *hda_kdev, void *data)
940 {
941         struct drm_device *dev = dev_get_drvdata(kdev);
942         struct amdgpu_device *adev = drm_to_adev(dev);
943         struct drm_audio_component *acomp = data;
944
945         acomp->ops = NULL;
946         acomp->dev = NULL;
947         adev->dm.audio_component = NULL;
948 }
949
950 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
951         .bind   = amdgpu_dm_audio_component_bind,
952         .unbind = amdgpu_dm_audio_component_unbind,
953 };
954
955 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
956 {
957         int i, ret;
958
959         if (!amdgpu_audio)
960                 return 0;
961
962         adev->mode_info.audio.enabled = true;
963
964         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
965
966         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
967                 adev->mode_info.audio.pin[i].channels = -1;
968                 adev->mode_info.audio.pin[i].rate = -1;
969                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
970                 adev->mode_info.audio.pin[i].status_bits = 0;
971                 adev->mode_info.audio.pin[i].category_code = 0;
972                 adev->mode_info.audio.pin[i].connected = false;
973                 adev->mode_info.audio.pin[i].id =
974                         adev->dm.dc->res_pool->audios[i]->inst;
975                 adev->mode_info.audio.pin[i].offset = 0;
976         }
977
978         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
979         if (ret < 0)
980                 return ret;
981
982         adev->dm.audio_registered = true;
983
984         return 0;
985 }
986
987 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
988 {
989         if (!amdgpu_audio)
990                 return;
991
992         if (!adev->mode_info.audio.enabled)
993                 return;
994
995         if (adev->dm.audio_registered) {
996                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
997                 adev->dm.audio_registered = false;
998         }
999
1000         /* TODO: Disable audio? */
1001
1002         adev->mode_info.audio.enabled = false;
1003 }
1004
1005 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1006 {
1007         struct drm_audio_component *acomp = adev->dm.audio_component;
1008
1009         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1010                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1011
1012                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1013                                                  pin, -1);
1014         }
1015 }
1016
1017 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1018 {
1019         const struct dmcub_firmware_header_v1_0 *hdr;
1020         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1021         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1022         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1023         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1024         struct abm *abm = adev->dm.dc->res_pool->abm;
1025         struct dmub_srv_hw_params hw_params;
1026         enum dmub_status status;
1027         const unsigned char *fw_inst_const, *fw_bss_data;
1028         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1029         bool has_hw_support;
1030         struct dc *dc = adev->dm.dc;
1031
1032         if (!dmub_srv)
1033                 /* DMUB isn't supported on the ASIC. */
1034                 return 0;
1035
1036         if (!fb_info) {
1037                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1038                 return -EINVAL;
1039         }
1040
1041         if (!dmub_fw) {
1042                 /* Firmware required for DMUB support. */
1043                 DRM_ERROR("No firmware provided for DMUB.\n");
1044                 return -EINVAL;
1045         }
1046
1047         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1048         if (status != DMUB_STATUS_OK) {
1049                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1050                 return -EINVAL;
1051         }
1052
1053         if (!has_hw_support) {
1054                 DRM_INFO("DMUB unsupported on ASIC\n");
1055                 return 0;
1056         }
1057
1058         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1059         status = dmub_srv_hw_reset(dmub_srv);
1060         if (status != DMUB_STATUS_OK)
1061                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1062
1063         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1064
1065         fw_inst_const = dmub_fw->data +
1066                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1067                         PSP_HEADER_BYTES;
1068
1069         fw_bss_data = dmub_fw->data +
1070                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071                       le32_to_cpu(hdr->inst_const_bytes);
1072
1073         /* Copy firmware and bios info into FB memory. */
1074         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1075                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1076
1077         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1078
1079         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1080          * amdgpu_ucode_init_single_fw will load dmub firmware
1081          * fw_inst_const part to cw0; otherwise, the firmware back door load
1082          * will be done by dm_dmub_hw_init
1083          */
1084         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1085                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1086                                 fw_inst_const_size);
1087         }
1088
1089         if (fw_bss_data_size)
1090                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1091                        fw_bss_data, fw_bss_data_size);
1092
1093         /* Copy firmware bios info into FB memory. */
1094         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1095                adev->bios_size);
1096
1097         /* Reset regions that need to be reset. */
1098         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1099         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1100
1101         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1102                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1103
1104         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1105                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1106
1107         /* Initialize hardware. */
1108         memset(&hw_params, 0, sizeof(hw_params));
1109         hw_params.fb_base = adev->gmc.fb_start;
1110         hw_params.fb_offset = adev->gmc.aper_base;
1111
1112         /* backdoor load firmware and trigger dmub running */
1113         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1114                 hw_params.load_inst_const = true;
1115
1116         if (dmcu)
1117                 hw_params.psp_version = dmcu->psp_version;
1118
1119         for (i = 0; i < fb_info->num_fb; ++i)
1120                 hw_params.fb[i] = &fb_info->fb[i];
1121
1122         switch (adev->asic_type) {
1123         case CHIP_YELLOW_CARP:
1124                 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1125                         hw_params.dpia_supported = true;
1126 #if defined(CONFIG_DRM_AMD_DC_DCN)
1127                         hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1128 #endif
1129                 }
1130                 break;
1131         default:
1132                 break;
1133         }
1134
1135         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1136         if (status != DMUB_STATUS_OK) {
1137                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1138                 return -EINVAL;
1139         }
1140
1141         /* Wait for firmware load to finish. */
1142         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1143         if (status != DMUB_STATUS_OK)
1144                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1145
1146         /* Init DMCU and ABM if available. */
1147         if (dmcu && abm) {
1148                 dmcu->funcs->dmcu_init(dmcu);
1149                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1150         }
1151
1152         if (!adev->dm.dc->ctx->dmub_srv)
1153                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1154         if (!adev->dm.dc->ctx->dmub_srv) {
1155                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1156                 return -ENOMEM;
1157         }
1158
1159         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1160                  adev->dm.dmcub_fw_version);
1161
1162         return 0;
1163 }
1164
1165 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1166 {
1167         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1168         enum dmub_status status;
1169         bool init;
1170
1171         if (!dmub_srv) {
1172                 /* DMUB isn't supported on the ASIC. */
1173                 return;
1174         }
1175
1176         status = dmub_srv_is_hw_init(dmub_srv, &init);
1177         if (status != DMUB_STATUS_OK)
1178                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1179
1180         if (status == DMUB_STATUS_OK && init) {
1181                 /* Wait for firmware load to finish. */
1182                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1183                 if (status != DMUB_STATUS_OK)
1184                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1185         } else {
1186                 /* Perform the full hardware initialization. */
1187                 dm_dmub_hw_init(adev);
1188         }
1189 }
1190
1191 #if defined(CONFIG_DRM_AMD_DC_DCN)
1192 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1193 {
1194         uint64_t pt_base;
1195         uint32_t logical_addr_low;
1196         uint32_t logical_addr_high;
1197         uint32_t agp_base, agp_bot, agp_top;
1198         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1199
1200         memset(pa_config, 0, sizeof(*pa_config));
1201
1202         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1203         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1204
1205         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1206                 /*
1207                  * Raven2 has a HW issue that it is unable to use the vram which
1208                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1209                  * workaround that increase system aperture high address (add 1)
1210                  * to get rid of the VM fault and hardware hang.
1211                  */
1212                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1213         else
1214                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1215
1216         agp_base = 0;
1217         agp_bot = adev->gmc.agp_start >> 24;
1218         agp_top = adev->gmc.agp_end >> 24;
1219
1220
1221         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1222         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1223         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1224         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1225         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1226         page_table_base.low_part = lower_32_bits(pt_base);
1227
1228         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1229         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1230
1231         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1232         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1233         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1234
1235         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1236         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1237         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1238
1239         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1240         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1241         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1242
1243         pa_config->is_hvm_enabled = 0;
1244
1245 }
1246 #endif
1247 #if defined(CONFIG_DRM_AMD_DC_DCN)
1248 static void vblank_control_worker(struct work_struct *work)
1249 {
1250         struct vblank_control_work *vblank_work =
1251                 container_of(work, struct vblank_control_work, work);
1252         struct amdgpu_display_manager *dm = vblank_work->dm;
1253
1254         mutex_lock(&dm->dc_lock);
1255
1256         if (vblank_work->enable)
1257                 dm->active_vblank_irq_count++;
1258         else if(dm->active_vblank_irq_count)
1259                 dm->active_vblank_irq_count--;
1260
1261         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1262
1263         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1264
1265         /* Control PSR based on vblank requirements from OS */
1266         if (vblank_work->stream && vblank_work->stream->link) {
1267                 if (vblank_work->enable) {
1268                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1269                                 amdgpu_dm_psr_disable(vblank_work->stream);
1270                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1271                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1272                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1273                         amdgpu_dm_psr_enable(vblank_work->stream);
1274                 }
1275         }
1276
1277         mutex_unlock(&dm->dc_lock);
1278
1279         dc_stream_release(vblank_work->stream);
1280
1281         kfree(vblank_work);
1282 }
1283
1284 #endif
1285
1286 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1287 {
1288         struct hpd_rx_irq_offload_work *offload_work;
1289         struct amdgpu_dm_connector *aconnector;
1290         struct dc_link *dc_link;
1291         struct amdgpu_device *adev;
1292         enum dc_connection_type new_connection_type = dc_connection_none;
1293         unsigned long flags;
1294
1295         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1296         aconnector = offload_work->offload_wq->aconnector;
1297
1298         if (!aconnector) {
1299                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1300                 goto skip;
1301         }
1302
1303         adev = drm_to_adev(aconnector->base.dev);
1304         dc_link = aconnector->dc_link;
1305
1306         mutex_lock(&aconnector->hpd_lock);
1307         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1308                 DRM_ERROR("KMS: Failed to detect connector\n");
1309         mutex_unlock(&aconnector->hpd_lock);
1310
1311         if (new_connection_type == dc_connection_none)
1312                 goto skip;
1313
1314         if (amdgpu_in_reset(adev))
1315                 goto skip;
1316
1317         mutex_lock(&adev->dm.dc_lock);
1318         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1319                 dc_link_dp_handle_automated_test(dc_link);
1320         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1321                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1322                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1323                 dc_link_dp_handle_link_loss(dc_link);
1324                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1325                 offload_work->offload_wq->is_handling_link_loss = false;
1326                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1327         }
1328         mutex_unlock(&adev->dm.dc_lock);
1329
1330 skip:
1331         kfree(offload_work);
1332
1333 }
1334
1335 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1336 {
1337         int max_caps = dc->caps.max_links;
1338         int i = 0;
1339         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1340
1341         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1342
1343         if (!hpd_rx_offload_wq)
1344                 return NULL;
1345
1346
1347         for (i = 0; i < max_caps; i++) {
1348                 hpd_rx_offload_wq[i].wq =
1349                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1350
1351                 if (hpd_rx_offload_wq[i].wq == NULL) {
1352                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1353                         return NULL;
1354                 }
1355
1356                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1357         }
1358
1359         return hpd_rx_offload_wq;
1360 }
1361
1362 struct amdgpu_stutter_quirk {
1363         u16 chip_vendor;
1364         u16 chip_device;
1365         u16 subsys_vendor;
1366         u16 subsys_device;
1367         u8 revision;
1368 };
1369
1370 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1371         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1372         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1373         { 0, 0, 0, 0, 0 },
1374 };
1375
1376 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1377 {
1378         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1379
1380         while (p && p->chip_device != 0) {
1381                 if (pdev->vendor == p->chip_vendor &&
1382                     pdev->device == p->chip_device &&
1383                     pdev->subsystem_vendor == p->subsys_vendor &&
1384                     pdev->subsystem_device == p->subsys_device &&
1385                     pdev->revision == p->revision) {
1386                         return true;
1387                 }
1388                 ++p;
1389         }
1390         return false;
1391 }
1392
1393 static int amdgpu_dm_init(struct amdgpu_device *adev)
1394 {
1395         struct dc_init_data init_data;
1396 #ifdef CONFIG_DRM_AMD_DC_HDCP
1397         struct dc_callback_init init_params;
1398 #endif
1399         int r;
1400
1401         adev->dm.ddev = adev_to_drm(adev);
1402         adev->dm.adev = adev;
1403
1404         /* Zero all the fields */
1405         memset(&init_data, 0, sizeof(init_data));
1406 #ifdef CONFIG_DRM_AMD_DC_HDCP
1407         memset(&init_params, 0, sizeof(init_params));
1408 #endif
1409
1410         mutex_init(&adev->dm.dc_lock);
1411         mutex_init(&adev->dm.audio_lock);
1412 #if defined(CONFIG_DRM_AMD_DC_DCN)
1413         spin_lock_init(&adev->dm.vblank_lock);
1414 #endif
1415
1416         if(amdgpu_dm_irq_init(adev)) {
1417                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1418                 goto error;
1419         }
1420
1421         init_data.asic_id.chip_family = adev->family;
1422
1423         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1424         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1425         init_data.asic_id.chip_id = adev->pdev->device;
1426
1427         init_data.asic_id.vram_width = adev->gmc.vram_width;
1428         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1429         init_data.asic_id.atombios_base_address =
1430                 adev->mode_info.atom_context->bios;
1431
1432         init_data.driver = adev;
1433
1434         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1435
1436         if (!adev->dm.cgs_device) {
1437                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1438                 goto error;
1439         }
1440
1441         init_data.cgs_device = adev->dm.cgs_device;
1442
1443         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1444
1445         switch (adev->asic_type) {
1446         case CHIP_CARRIZO:
1447         case CHIP_STONEY:
1448                 init_data.flags.gpu_vm_support = true;
1449                 break;
1450         default:
1451                 switch (adev->ip_versions[DCE_HWIP][0]) {
1452                 case IP_VERSION(2, 1, 0):
1453                         init_data.flags.gpu_vm_support = true;
1454                         switch (adev->dm.dmcub_fw_version) {
1455                         case 0: /* development */
1456                         case 0x1: /* linux-firmware.git hash 6d9f399 */
1457                         case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1458                                 init_data.flags.disable_dmcu = false;
1459                                 break;
1460                         default:
1461                                 init_data.flags.disable_dmcu = true;
1462                         }
1463                         break;
1464                 case IP_VERSION(1, 0, 0):
1465                 case IP_VERSION(1, 0, 1):
1466                 case IP_VERSION(3, 0, 1):
1467                 case IP_VERSION(3, 1, 2):
1468                 case IP_VERSION(3, 1, 3):
1469                         init_data.flags.gpu_vm_support = true;
1470                         break;
1471                 case IP_VERSION(2, 0, 3):
1472                         init_data.flags.disable_dmcu = true;
1473                         break;
1474                 default:
1475                         break;
1476                 }
1477                 break;
1478         }
1479
1480         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1481                 init_data.flags.fbc_support = true;
1482
1483         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1484                 init_data.flags.multi_mon_pp_mclk_switch = true;
1485
1486         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1487                 init_data.flags.disable_fractional_pwm = true;
1488
1489         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1490                 init_data.flags.edp_no_power_sequencing = true;
1491
1492 #ifdef CONFIG_DRM_AMD_DC_DCN
1493         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1494                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1495         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1496                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1497 #endif
1498
1499         init_data.flags.power_down_display_on_boot = true;
1500
1501         if (check_seamless_boot_capability(adev)) {
1502                 init_data.flags.power_down_display_on_boot = false;
1503                 init_data.flags.allow_seamless_boot_optimization = true;
1504                 DRM_INFO("Seamless boot condition check passed\n");
1505         }
1506
1507         INIT_LIST_HEAD(&adev->dm.da_list);
1508         /* Display Core create. */
1509         adev->dm.dc = dc_create(&init_data);
1510
1511         if (adev->dm.dc) {
1512                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1513         } else {
1514                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1515                 goto error;
1516         }
1517
1518         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1519                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1520                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1521         }
1522
1523         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1524                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1525         if (dm_should_disable_stutter(adev->pdev))
1526                 adev->dm.dc->debug.disable_stutter = true;
1527
1528         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1529                 adev->dm.dc->debug.disable_stutter = true;
1530
1531         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1532                 adev->dm.dc->debug.disable_dsc = true;
1533                 adev->dm.dc->debug.disable_dsc_edp = true;
1534         }
1535
1536         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1537                 adev->dm.dc->debug.disable_clock_gate = true;
1538
1539         r = dm_dmub_hw_init(adev);
1540         if (r) {
1541                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1542                 goto error;
1543         }
1544
1545         dc_hardware_init(adev->dm.dc);
1546
1547         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1548         if (!adev->dm.hpd_rx_offload_wq) {
1549                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1550                 goto error;
1551         }
1552
1553 #if defined(CONFIG_DRM_AMD_DC_DCN)
1554         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1555                 struct dc_phy_addr_space_config pa_config;
1556
1557                 mmhub_read_system_context(adev, &pa_config);
1558
1559                 // Call the DC init_memory func
1560                 dc_setup_system_context(adev->dm.dc, &pa_config);
1561         }
1562 #endif
1563
1564         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1565         if (!adev->dm.freesync_module) {
1566                 DRM_ERROR(
1567                 "amdgpu: failed to initialize freesync_module.\n");
1568         } else
1569                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1570                                 adev->dm.freesync_module);
1571
1572         amdgpu_dm_init_color_mod();
1573
1574 #if defined(CONFIG_DRM_AMD_DC_DCN)
1575         if (adev->dm.dc->caps.max_links > 0) {
1576                 adev->dm.vblank_control_workqueue =
1577                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1578                 if (!adev->dm.vblank_control_workqueue)
1579                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1580         }
1581 #endif
1582
1583 #ifdef CONFIG_DRM_AMD_DC_HDCP
1584         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1585                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1586
1587                 if (!adev->dm.hdcp_workqueue)
1588                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1589                 else
1590                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1591
1592                 dc_init_callbacks(adev->dm.dc, &init_params);
1593         }
1594 #endif
1595 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1596         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1597 #endif
1598         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1599                 init_completion(&adev->dm.dmub_aux_transfer_done);
1600                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1601                 if (!adev->dm.dmub_notify) {
1602                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1603                         goto error;
1604                 }
1605
1606                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1607                 if (!adev->dm.delayed_hpd_wq) {
1608                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1609                         goto error;
1610                 }
1611
1612                 amdgpu_dm_outbox_init(adev);
1613 #if defined(CONFIG_DRM_AMD_DC_DCN)
1614                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1615                         dmub_aux_setconfig_callback, false)) {
1616                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1617                         goto error;
1618                 }
1619                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1620                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1621                         goto error;
1622                 }
1623                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1624                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1625                         goto error;
1626                 }
1627 #endif /* CONFIG_DRM_AMD_DC_DCN */
1628         }
1629
1630         if (amdgpu_dm_initialize_drm_device(adev)) {
1631                 DRM_ERROR(
1632                 "amdgpu: failed to initialize sw for display support.\n");
1633                 goto error;
1634         }
1635
1636         /* create fake encoders for MST */
1637         dm_dp_create_fake_mst_encoders(adev);
1638
1639         /* TODO: Add_display_info? */
1640
1641         /* TODO use dynamic cursor width */
1642         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1643         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1644
1645         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1646                 DRM_ERROR(
1647                 "amdgpu: failed to initialize sw for display support.\n");
1648                 goto error;
1649         }
1650
1651
1652         DRM_DEBUG_DRIVER("KMS initialized.\n");
1653
1654         return 0;
1655 error:
1656         amdgpu_dm_fini(adev);
1657
1658         return -EINVAL;
1659 }
1660
1661 static int amdgpu_dm_early_fini(void *handle)
1662 {
1663         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1664
1665         amdgpu_dm_audio_fini(adev);
1666
1667         return 0;
1668 }
1669
1670 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1671 {
1672         int i;
1673
1674 #if defined(CONFIG_DRM_AMD_DC_DCN)
1675         if (adev->dm.vblank_control_workqueue) {
1676                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1677                 adev->dm.vblank_control_workqueue = NULL;
1678         }
1679 #endif
1680
1681         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1682                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1683         }
1684
1685         amdgpu_dm_destroy_drm_device(&adev->dm);
1686
1687 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1688         if (adev->dm.crc_rd_wrk) {
1689                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1690                 kfree(adev->dm.crc_rd_wrk);
1691                 adev->dm.crc_rd_wrk = NULL;
1692         }
1693 #endif
1694 #ifdef CONFIG_DRM_AMD_DC_HDCP
1695         if (adev->dm.hdcp_workqueue) {
1696                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1697                 adev->dm.hdcp_workqueue = NULL;
1698         }
1699
1700         if (adev->dm.dc)
1701                 dc_deinit_callbacks(adev->dm.dc);
1702 #endif
1703
1704         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1705
1706         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1707                 kfree(adev->dm.dmub_notify);
1708                 adev->dm.dmub_notify = NULL;
1709                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1710                 adev->dm.delayed_hpd_wq = NULL;
1711         }
1712
1713         if (adev->dm.dmub_bo)
1714                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1715                                       &adev->dm.dmub_bo_gpu_addr,
1716                                       &adev->dm.dmub_bo_cpu_addr);
1717
1718         if (adev->dm.hpd_rx_offload_wq) {
1719                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1720                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1721                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1722                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1723                         }
1724                 }
1725
1726                 kfree(adev->dm.hpd_rx_offload_wq);
1727                 adev->dm.hpd_rx_offload_wq = NULL;
1728         }
1729
1730         /* DC Destroy TODO: Replace destroy DAL */
1731         if (adev->dm.dc)
1732                 dc_destroy(&adev->dm.dc);
1733         /*
1734          * TODO: pageflip, vlank interrupt
1735          *
1736          * amdgpu_dm_irq_fini(adev);
1737          */
1738
1739         if (adev->dm.cgs_device) {
1740                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1741                 adev->dm.cgs_device = NULL;
1742         }
1743         if (adev->dm.freesync_module) {
1744                 mod_freesync_destroy(adev->dm.freesync_module);
1745                 adev->dm.freesync_module = NULL;
1746         }
1747
1748         mutex_destroy(&adev->dm.audio_lock);
1749         mutex_destroy(&adev->dm.dc_lock);
1750
1751         return;
1752 }
1753
1754 static int load_dmcu_fw(struct amdgpu_device *adev)
1755 {
1756         const char *fw_name_dmcu = NULL;
1757         int r;
1758         const struct dmcu_firmware_header_v1_0 *hdr;
1759
1760         switch(adev->asic_type) {
1761 #if defined(CONFIG_DRM_AMD_DC_SI)
1762         case CHIP_TAHITI:
1763         case CHIP_PITCAIRN:
1764         case CHIP_VERDE:
1765         case CHIP_OLAND:
1766 #endif
1767         case CHIP_BONAIRE:
1768         case CHIP_HAWAII:
1769         case CHIP_KAVERI:
1770         case CHIP_KABINI:
1771         case CHIP_MULLINS:
1772         case CHIP_TONGA:
1773         case CHIP_FIJI:
1774         case CHIP_CARRIZO:
1775         case CHIP_STONEY:
1776         case CHIP_POLARIS11:
1777         case CHIP_POLARIS10:
1778         case CHIP_POLARIS12:
1779         case CHIP_VEGAM:
1780         case CHIP_VEGA10:
1781         case CHIP_VEGA12:
1782         case CHIP_VEGA20:
1783                 return 0;
1784         case CHIP_NAVI12:
1785                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1786                 break;
1787         case CHIP_RAVEN:
1788                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1789                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1790                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1791                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1792                 else
1793                         return 0;
1794                 break;
1795         default:
1796                 switch (adev->ip_versions[DCE_HWIP][0]) {
1797                 case IP_VERSION(2, 0, 2):
1798                 case IP_VERSION(2, 0, 3):
1799                 case IP_VERSION(2, 0, 0):
1800                 case IP_VERSION(2, 1, 0):
1801                 case IP_VERSION(3, 0, 0):
1802                 case IP_VERSION(3, 0, 2):
1803                 case IP_VERSION(3, 0, 3):
1804                 case IP_VERSION(3, 0, 1):
1805                 case IP_VERSION(3, 1, 2):
1806                 case IP_VERSION(3, 1, 3):
1807                         return 0;
1808                 default:
1809                         break;
1810                 }
1811                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1812                 return -EINVAL;
1813         }
1814
1815         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1816                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1817                 return 0;
1818         }
1819
1820         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1821         if (r == -ENOENT) {
1822                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1823                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1824                 adev->dm.fw_dmcu = NULL;
1825                 return 0;
1826         }
1827         if (r) {
1828                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1829                         fw_name_dmcu);
1830                 return r;
1831         }
1832
1833         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1834         if (r) {
1835                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1836                         fw_name_dmcu);
1837                 release_firmware(adev->dm.fw_dmcu);
1838                 adev->dm.fw_dmcu = NULL;
1839                 return r;
1840         }
1841
1842         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1843         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1844         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1845         adev->firmware.fw_size +=
1846                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1847
1848         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1849         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1850         adev->firmware.fw_size +=
1851                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1852
1853         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1854
1855         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1856
1857         return 0;
1858 }
1859
1860 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1861 {
1862         struct amdgpu_device *adev = ctx;
1863
1864         return dm_read_reg(adev->dm.dc->ctx, address);
1865 }
1866
1867 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1868                                      uint32_t value)
1869 {
1870         struct amdgpu_device *adev = ctx;
1871
1872         return dm_write_reg(adev->dm.dc->ctx, address, value);
1873 }
1874
1875 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1876 {
1877         struct dmub_srv_create_params create_params;
1878         struct dmub_srv_region_params region_params;
1879         struct dmub_srv_region_info region_info;
1880         struct dmub_srv_fb_params fb_params;
1881         struct dmub_srv_fb_info *fb_info;
1882         struct dmub_srv *dmub_srv;
1883         const struct dmcub_firmware_header_v1_0 *hdr;
1884         const char *fw_name_dmub;
1885         enum dmub_asic dmub_asic;
1886         enum dmub_status status;
1887         int r;
1888
1889         switch (adev->ip_versions[DCE_HWIP][0]) {
1890         case IP_VERSION(2, 1, 0):
1891                 dmub_asic = DMUB_ASIC_DCN21;
1892                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1893                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1894                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1895                 break;
1896         case IP_VERSION(3, 0, 0):
1897                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1898                         dmub_asic = DMUB_ASIC_DCN30;
1899                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1900                 } else {
1901                         dmub_asic = DMUB_ASIC_DCN30;
1902                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1903                 }
1904                 break;
1905         case IP_VERSION(3, 0, 1):
1906                 dmub_asic = DMUB_ASIC_DCN301;
1907                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1908                 break;
1909         case IP_VERSION(3, 0, 2):
1910                 dmub_asic = DMUB_ASIC_DCN302;
1911                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1912                 break;
1913         case IP_VERSION(3, 0, 3):
1914                 dmub_asic = DMUB_ASIC_DCN303;
1915                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1916                 break;
1917         case IP_VERSION(3, 1, 2):
1918         case IP_VERSION(3, 1, 3):
1919                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1920                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1921                 break;
1922
1923         default:
1924                 /* ASIC doesn't support DMUB. */
1925                 return 0;
1926         }
1927
1928         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1929         if (r) {
1930                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1931                 return 0;
1932         }
1933
1934         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1935         if (r) {
1936                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1937                 return 0;
1938         }
1939
1940         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1941         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1942
1943         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1944                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1945                         AMDGPU_UCODE_ID_DMCUB;
1946                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1947                         adev->dm.dmub_fw;
1948                 adev->firmware.fw_size +=
1949                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1950
1951                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1952                          adev->dm.dmcub_fw_version);
1953         }
1954
1955
1956         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1957         dmub_srv = adev->dm.dmub_srv;
1958
1959         if (!dmub_srv) {
1960                 DRM_ERROR("Failed to allocate DMUB service!\n");
1961                 return -ENOMEM;
1962         }
1963
1964         memset(&create_params, 0, sizeof(create_params));
1965         create_params.user_ctx = adev;
1966         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1967         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1968         create_params.asic = dmub_asic;
1969
1970         /* Create the DMUB service. */
1971         status = dmub_srv_create(dmub_srv, &create_params);
1972         if (status != DMUB_STATUS_OK) {
1973                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1974                 return -EINVAL;
1975         }
1976
1977         /* Calculate the size of all the regions for the DMUB service. */
1978         memset(&region_params, 0, sizeof(region_params));
1979
1980         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1981                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1982         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1983         region_params.vbios_size = adev->bios_size;
1984         region_params.fw_bss_data = region_params.bss_data_size ?
1985                 adev->dm.dmub_fw->data +
1986                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1987                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1988         region_params.fw_inst_const =
1989                 adev->dm.dmub_fw->data +
1990                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1991                 PSP_HEADER_BYTES;
1992
1993         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1994                                            &region_info);
1995
1996         if (status != DMUB_STATUS_OK) {
1997                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1998                 return -EINVAL;
1999         }
2000
2001         /*
2002          * Allocate a framebuffer based on the total size of all the regions.
2003          * TODO: Move this into GART.
2004          */
2005         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2006                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2007                                     &adev->dm.dmub_bo_gpu_addr,
2008                                     &adev->dm.dmub_bo_cpu_addr);
2009         if (r)
2010                 return r;
2011
2012         /* Rebase the regions on the framebuffer address. */
2013         memset(&fb_params, 0, sizeof(fb_params));
2014         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2015         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2016         fb_params.region_info = &region_info;
2017
2018         adev->dm.dmub_fb_info =
2019                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2020         fb_info = adev->dm.dmub_fb_info;
2021
2022         if (!fb_info) {
2023                 DRM_ERROR(
2024                         "Failed to allocate framebuffer info for DMUB service!\n");
2025                 return -ENOMEM;
2026         }
2027
2028         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2029         if (status != DMUB_STATUS_OK) {
2030                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2031                 return -EINVAL;
2032         }
2033
2034         return 0;
2035 }
2036
2037 static int dm_sw_init(void *handle)
2038 {
2039         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2040         int r;
2041
2042         r = dm_dmub_sw_init(adev);
2043         if (r)
2044                 return r;
2045
2046         return load_dmcu_fw(adev);
2047 }
2048
2049 static int dm_sw_fini(void *handle)
2050 {
2051         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2052
2053         kfree(adev->dm.dmub_fb_info);
2054         adev->dm.dmub_fb_info = NULL;
2055
2056         if (adev->dm.dmub_srv) {
2057                 dmub_srv_destroy(adev->dm.dmub_srv);
2058                 adev->dm.dmub_srv = NULL;
2059         }
2060
2061         release_firmware(adev->dm.dmub_fw);
2062         adev->dm.dmub_fw = NULL;
2063
2064         release_firmware(adev->dm.fw_dmcu);
2065         adev->dm.fw_dmcu = NULL;
2066
2067         return 0;
2068 }
2069
2070 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2071 {
2072         struct amdgpu_dm_connector *aconnector;
2073         struct drm_connector *connector;
2074         struct drm_connector_list_iter iter;
2075         int ret = 0;
2076
2077         drm_connector_list_iter_begin(dev, &iter);
2078         drm_for_each_connector_iter(connector, &iter) {
2079                 aconnector = to_amdgpu_dm_connector(connector);
2080                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2081                     aconnector->mst_mgr.aux) {
2082                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2083                                          aconnector,
2084                                          aconnector->base.base.id);
2085
2086                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2087                         if (ret < 0) {
2088                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2089                                 aconnector->dc_link->type =
2090                                         dc_connection_single;
2091                                 break;
2092                         }
2093                 }
2094         }
2095         drm_connector_list_iter_end(&iter);
2096
2097         return ret;
2098 }
2099
2100 static int dm_late_init(void *handle)
2101 {
2102         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2103
2104         struct dmcu_iram_parameters params;
2105         unsigned int linear_lut[16];
2106         int i;
2107         struct dmcu *dmcu = NULL;
2108
2109         dmcu = adev->dm.dc->res_pool->dmcu;
2110
2111         for (i = 0; i < 16; i++)
2112                 linear_lut[i] = 0xFFFF * i / 15;
2113
2114         params.set = 0;
2115         params.backlight_ramping_override = false;
2116         params.backlight_ramping_start = 0xCCCC;
2117         params.backlight_ramping_reduction = 0xCCCCCCCC;
2118         params.backlight_lut_array_size = 16;
2119         params.backlight_lut_array = linear_lut;
2120
2121         /* Min backlight level after ABM reduction,  Don't allow below 1%
2122          * 0xFFFF x 0.01 = 0x28F
2123          */
2124         params.min_abm_backlight = 0x28F;
2125         /* In the case where abm is implemented on dmcub,
2126         * dmcu object will be null.
2127         * ABM 2.4 and up are implemented on dmcub.
2128         */
2129         if (dmcu) {
2130                 if (!dmcu_load_iram(dmcu, params))
2131                         return -EINVAL;
2132         } else if (adev->dm.dc->ctx->dmub_srv) {
2133                 struct dc_link *edp_links[MAX_NUM_EDP];
2134                 int edp_num;
2135
2136                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2137                 for (i = 0; i < edp_num; i++) {
2138                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2139                                 return -EINVAL;
2140                 }
2141         }
2142
2143         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2144 }
2145
2146 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2147 {
2148         struct amdgpu_dm_connector *aconnector;
2149         struct drm_connector *connector;
2150         struct drm_connector_list_iter iter;
2151         struct drm_dp_mst_topology_mgr *mgr;
2152         int ret;
2153         bool need_hotplug = false;
2154
2155         drm_connector_list_iter_begin(dev, &iter);
2156         drm_for_each_connector_iter(connector, &iter) {
2157                 aconnector = to_amdgpu_dm_connector(connector);
2158                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2159                     aconnector->mst_port)
2160                         continue;
2161
2162                 mgr = &aconnector->mst_mgr;
2163
2164                 if (suspend) {
2165                         drm_dp_mst_topology_mgr_suspend(mgr);
2166                 } else {
2167                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2168                         if (ret < 0) {
2169                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2170                                 need_hotplug = true;
2171                         }
2172                 }
2173         }
2174         drm_connector_list_iter_end(&iter);
2175
2176         if (need_hotplug)
2177                 drm_kms_helper_hotplug_event(dev);
2178 }
2179
2180 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2181 {
2182         struct smu_context *smu = &adev->smu;
2183         int ret = 0;
2184
2185         if (!is_support_sw_smu(adev))
2186                 return 0;
2187
2188         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2189          * on window driver dc implementation.
2190          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2191          * should be passed to smu during boot up and resume from s3.
2192          * boot up: dc calculate dcn watermark clock settings within dc_create,
2193          * dcn20_resource_construct
2194          * then call pplib functions below to pass the settings to smu:
2195          * smu_set_watermarks_for_clock_ranges
2196          * smu_set_watermarks_table
2197          * navi10_set_watermarks_table
2198          * smu_write_watermarks_table
2199          *
2200          * For Renoir, clock settings of dcn watermark are also fixed values.
2201          * dc has implemented different flow for window driver:
2202          * dc_hardware_init / dc_set_power_state
2203          * dcn10_init_hw
2204          * notify_wm_ranges
2205          * set_wm_ranges
2206          * -- Linux
2207          * smu_set_watermarks_for_clock_ranges
2208          * renoir_set_watermarks_table
2209          * smu_write_watermarks_table
2210          *
2211          * For Linux,
2212          * dc_hardware_init -> amdgpu_dm_init
2213          * dc_set_power_state --> dm_resume
2214          *
2215          * therefore, this function apply to navi10/12/14 but not Renoir
2216          * *
2217          */
2218         switch (adev->ip_versions[DCE_HWIP][0]) {
2219         case IP_VERSION(2, 0, 2):
2220         case IP_VERSION(2, 0, 0):
2221                 break;
2222         default:
2223                 return 0;
2224         }
2225
2226         ret = smu_write_watermarks_table(smu);
2227         if (ret) {
2228                 DRM_ERROR("Failed to update WMTABLE!\n");
2229                 return ret;
2230         }
2231
2232         return 0;
2233 }
2234
2235 /**
2236  * dm_hw_init() - Initialize DC device
2237  * @handle: The base driver device containing the amdgpu_dm device.
2238  *
2239  * Initialize the &struct amdgpu_display_manager device. This involves calling
2240  * the initializers of each DM component, then populating the struct with them.
2241  *
2242  * Although the function implies hardware initialization, both hardware and
2243  * software are initialized here. Splitting them out to their relevant init
2244  * hooks is a future TODO item.
2245  *
2246  * Some notable things that are initialized here:
2247  *
2248  * - Display Core, both software and hardware
2249  * - DC modules that we need (freesync and color management)
2250  * - DRM software states
2251  * - Interrupt sources and handlers
2252  * - Vblank support
2253  * - Debug FS entries, if enabled
2254  */
2255 static int dm_hw_init(void *handle)
2256 {
2257         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2258         /* Create DAL display manager */
2259         amdgpu_dm_init(adev);
2260         amdgpu_dm_hpd_init(adev);
2261
2262         return 0;
2263 }
2264
2265 /**
2266  * dm_hw_fini() - Teardown DC device
2267  * @handle: The base driver device containing the amdgpu_dm device.
2268  *
2269  * Teardown components within &struct amdgpu_display_manager that require
2270  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2271  * were loaded. Also flush IRQ workqueues and disable them.
2272  */
2273 static int dm_hw_fini(void *handle)
2274 {
2275         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2276
2277         amdgpu_dm_hpd_fini(adev);
2278
2279         amdgpu_dm_irq_fini(adev);
2280         amdgpu_dm_fini(adev);
2281         return 0;
2282 }
2283
2284
2285 static int dm_enable_vblank(struct drm_crtc *crtc);
2286 static void dm_disable_vblank(struct drm_crtc *crtc);
2287
2288 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2289                                  struct dc_state *state, bool enable)
2290 {
2291         enum dc_irq_source irq_source;
2292         struct amdgpu_crtc *acrtc;
2293         int rc = -EBUSY;
2294         int i = 0;
2295
2296         for (i = 0; i < state->stream_count; i++) {
2297                 acrtc = get_crtc_by_otg_inst(
2298                                 adev, state->stream_status[i].primary_otg_inst);
2299
2300                 if (acrtc && state->stream_status[i].plane_count != 0) {
2301                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2302                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2303                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2304                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2305                         if (rc)
2306                                 DRM_WARN("Failed to %s pflip interrupts\n",
2307                                          enable ? "enable" : "disable");
2308
2309                         if (enable) {
2310                                 rc = dm_enable_vblank(&acrtc->base);
2311                                 if (rc)
2312                                         DRM_WARN("Failed to enable vblank interrupts\n");
2313                         } else {
2314                                 dm_disable_vblank(&acrtc->base);
2315                         }
2316
2317                 }
2318         }
2319
2320 }
2321
2322 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2323 {
2324         struct dc_state *context = NULL;
2325         enum dc_status res = DC_ERROR_UNEXPECTED;
2326         int i;
2327         struct dc_stream_state *del_streams[MAX_PIPES];
2328         int del_streams_count = 0;
2329
2330         memset(del_streams, 0, sizeof(del_streams));
2331
2332         context = dc_create_state(dc);
2333         if (context == NULL)
2334                 goto context_alloc_fail;
2335
2336         dc_resource_state_copy_construct_current(dc, context);
2337
2338         /* First remove from context all streams */
2339         for (i = 0; i < context->stream_count; i++) {
2340                 struct dc_stream_state *stream = context->streams[i];
2341
2342                 del_streams[del_streams_count++] = stream;
2343         }
2344
2345         /* Remove all planes for removed streams and then remove the streams */
2346         for (i = 0; i < del_streams_count; i++) {
2347                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2348                         res = DC_FAIL_DETACH_SURFACES;
2349                         goto fail;
2350                 }
2351
2352                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2353                 if (res != DC_OK)
2354                         goto fail;
2355         }
2356
2357         res = dc_commit_state(dc, context);
2358
2359 fail:
2360         dc_release_state(context);
2361
2362 context_alloc_fail:
2363         return res;
2364 }
2365
2366 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2367 {
2368         int i;
2369
2370         if (dm->hpd_rx_offload_wq) {
2371                 for (i = 0; i < dm->dc->caps.max_links; i++)
2372                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2373         }
2374 }
2375
2376 static int dm_suspend(void *handle)
2377 {
2378         struct amdgpu_device *adev = handle;
2379         struct amdgpu_display_manager *dm = &adev->dm;
2380         int ret = 0;
2381
2382         if (amdgpu_in_reset(adev)) {
2383                 mutex_lock(&dm->dc_lock);
2384
2385 #if defined(CONFIG_DRM_AMD_DC_DCN)
2386                 dc_allow_idle_optimizations(adev->dm.dc, false);
2387 #endif
2388
2389                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2390
2391                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2392
2393                 amdgpu_dm_commit_zero_streams(dm->dc);
2394
2395                 amdgpu_dm_irq_suspend(adev);
2396
2397                 hpd_rx_irq_work_suspend(dm);
2398
2399                 return ret;
2400         }
2401
2402         WARN_ON(adev->dm.cached_state);
2403         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2404
2405         s3_handle_mst(adev_to_drm(adev), true);
2406
2407         amdgpu_dm_irq_suspend(adev);
2408
2409         hpd_rx_irq_work_suspend(dm);
2410
2411         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2412
2413         return 0;
2414 }
2415
2416 static struct amdgpu_dm_connector *
2417 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2418                                              struct drm_crtc *crtc)
2419 {
2420         uint32_t i;
2421         struct drm_connector_state *new_con_state;
2422         struct drm_connector *connector;
2423         struct drm_crtc *crtc_from_state;
2424
2425         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2426                 crtc_from_state = new_con_state->crtc;
2427
2428                 if (crtc_from_state == crtc)
2429                         return to_amdgpu_dm_connector(connector);
2430         }
2431
2432         return NULL;
2433 }
2434
2435 static void emulated_link_detect(struct dc_link *link)
2436 {
2437         struct dc_sink_init_data sink_init_data = { 0 };
2438         struct display_sink_capability sink_caps = { 0 };
2439         enum dc_edid_status edid_status;
2440         struct dc_context *dc_ctx = link->ctx;
2441         struct dc_sink *sink = NULL;
2442         struct dc_sink *prev_sink = NULL;
2443
2444         link->type = dc_connection_none;
2445         prev_sink = link->local_sink;
2446
2447         if (prev_sink)
2448                 dc_sink_release(prev_sink);
2449
2450         switch (link->connector_signal) {
2451         case SIGNAL_TYPE_HDMI_TYPE_A: {
2452                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2453                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2454                 break;
2455         }
2456
2457         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2458                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2459                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2460                 break;
2461         }
2462
2463         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2464                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2465                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2466                 break;
2467         }
2468
2469         case SIGNAL_TYPE_LVDS: {
2470                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2471                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2472                 break;
2473         }
2474
2475         case SIGNAL_TYPE_EDP: {
2476                 sink_caps.transaction_type =
2477                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2478                 sink_caps.signal = SIGNAL_TYPE_EDP;
2479                 break;
2480         }
2481
2482         case SIGNAL_TYPE_DISPLAY_PORT: {
2483                 sink_caps.transaction_type =
2484                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2485                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2486                 break;
2487         }
2488
2489         default:
2490                 DC_ERROR("Invalid connector type! signal:%d\n",
2491                         link->connector_signal);
2492                 return;
2493         }
2494
2495         sink_init_data.link = link;
2496         sink_init_data.sink_signal = sink_caps.signal;
2497
2498         sink = dc_sink_create(&sink_init_data);
2499         if (!sink) {
2500                 DC_ERROR("Failed to create sink!\n");
2501                 return;
2502         }
2503
2504         /* dc_sink_create returns a new reference */
2505         link->local_sink = sink;
2506
2507         edid_status = dm_helpers_read_local_edid(
2508                         link->ctx,
2509                         link,
2510                         sink);
2511
2512         if (edid_status != EDID_OK)
2513                 DC_ERROR("Failed to read EDID");
2514
2515 }
2516
2517 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2518                                      struct amdgpu_display_manager *dm)
2519 {
2520         struct {
2521                 struct dc_surface_update surface_updates[MAX_SURFACES];
2522                 struct dc_plane_info plane_infos[MAX_SURFACES];
2523                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2524                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2525                 struct dc_stream_update stream_update;
2526         } * bundle;
2527         int k, m;
2528
2529         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2530
2531         if (!bundle) {
2532                 dm_error("Failed to allocate update bundle\n");
2533                 goto cleanup;
2534         }
2535
2536         for (k = 0; k < dc_state->stream_count; k++) {
2537                 bundle->stream_update.stream = dc_state->streams[k];
2538
2539                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2540                         bundle->surface_updates[m].surface =
2541                                 dc_state->stream_status->plane_states[m];
2542                         bundle->surface_updates[m].surface->force_full_update =
2543                                 true;
2544                 }
2545                 dc_commit_updates_for_stream(
2546                         dm->dc, bundle->surface_updates,
2547                         dc_state->stream_status->plane_count,
2548                         dc_state->streams[k], &bundle->stream_update, dc_state);
2549         }
2550
2551 cleanup:
2552         kfree(bundle);
2553
2554         return;
2555 }
2556
2557 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2558 {
2559         struct dc_stream_state *stream_state;
2560         struct amdgpu_dm_connector *aconnector = link->priv;
2561         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2562         struct dc_stream_update stream_update;
2563         bool dpms_off = true;
2564
2565         memset(&stream_update, 0, sizeof(stream_update));
2566         stream_update.dpms_off = &dpms_off;
2567
2568         mutex_lock(&adev->dm.dc_lock);
2569         stream_state = dc_stream_find_from_link(link);
2570
2571         if (stream_state == NULL) {
2572                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2573                 mutex_unlock(&adev->dm.dc_lock);
2574                 return;
2575         }
2576
2577         stream_update.stream = stream_state;
2578         acrtc_state->force_dpms_off = true;
2579         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2580                                      stream_state, &stream_update,
2581                                      stream_state->ctx->dc->current_state);
2582         mutex_unlock(&adev->dm.dc_lock);
2583 }
2584
2585 static int dm_resume(void *handle)
2586 {
2587         struct amdgpu_device *adev = handle;
2588         struct drm_device *ddev = adev_to_drm(adev);
2589         struct amdgpu_display_manager *dm = &adev->dm;
2590         struct amdgpu_dm_connector *aconnector;
2591         struct drm_connector *connector;
2592         struct drm_connector_list_iter iter;
2593         struct drm_crtc *crtc;
2594         struct drm_crtc_state *new_crtc_state;
2595         struct dm_crtc_state *dm_new_crtc_state;
2596         struct drm_plane *plane;
2597         struct drm_plane_state *new_plane_state;
2598         struct dm_plane_state *dm_new_plane_state;
2599         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2600         enum dc_connection_type new_connection_type = dc_connection_none;
2601         struct dc_state *dc_state;
2602         int i, r, j;
2603
2604         if (amdgpu_in_reset(adev)) {
2605                 dc_state = dm->cached_dc_state;
2606
2607                 /*
2608                  * The dc->current_state is backed up into dm->cached_dc_state
2609                  * before we commit 0 streams.
2610                  *
2611                  * DC will clear link encoder assignments on the real state
2612                  * but the changes won't propagate over to the copy we made
2613                  * before the 0 streams commit.
2614                  *
2615                  * DC expects that link encoder assignments are *not* valid
2616                  * when committing a state, so as a workaround it needs to be
2617                  * cleared here.
2618                  */
2619                 link_enc_cfg_init(dm->dc, dc_state);
2620
2621                 if (dc_enable_dmub_notifications(adev->dm.dc))
2622                         amdgpu_dm_outbox_init(adev);
2623
2624                 r = dm_dmub_hw_init(adev);
2625                 if (r)
2626                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2627
2628                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2629                 dc_resume(dm->dc);
2630
2631                 amdgpu_dm_irq_resume_early(adev);
2632
2633                 for (i = 0; i < dc_state->stream_count; i++) {
2634                         dc_state->streams[i]->mode_changed = true;
2635                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2636                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2637                                         = 0xffffffff;
2638                         }
2639                 }
2640
2641                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2642
2643                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2644
2645                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2646
2647                 dc_release_state(dm->cached_dc_state);
2648                 dm->cached_dc_state = NULL;
2649
2650                 amdgpu_dm_irq_resume_late(adev);
2651
2652                 mutex_unlock(&dm->dc_lock);
2653
2654                 return 0;
2655         }
2656         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2657         dc_release_state(dm_state->context);
2658         dm_state->context = dc_create_state(dm->dc);
2659         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2660         dc_resource_state_construct(dm->dc, dm_state->context);
2661
2662         /* Re-enable outbox interrupts for DPIA. */
2663         if (dc_enable_dmub_notifications(adev->dm.dc))
2664                 amdgpu_dm_outbox_init(adev);
2665
2666         /* Before powering on DC we need to re-initialize DMUB. */
2667         dm_dmub_hw_resume(adev);
2668
2669         /* power on hardware */
2670         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2671
2672         /* program HPD filter */
2673         dc_resume(dm->dc);
2674
2675         /*
2676          * early enable HPD Rx IRQ, should be done before set mode as short
2677          * pulse interrupts are used for MST
2678          */
2679         amdgpu_dm_irq_resume_early(adev);
2680
2681         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2682         s3_handle_mst(ddev, false);
2683
2684         /* Do detection*/
2685         drm_connector_list_iter_begin(ddev, &iter);
2686         drm_for_each_connector_iter(connector, &iter) {
2687                 aconnector = to_amdgpu_dm_connector(connector);
2688
2689                 /*
2690                  * this is the case when traversing through already created
2691                  * MST connectors, should be skipped
2692                  */
2693                 if (aconnector->mst_port)
2694                         continue;
2695
2696                 mutex_lock(&aconnector->hpd_lock);
2697                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2698                         DRM_ERROR("KMS: Failed to detect connector\n");
2699
2700                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2701                         emulated_link_detect(aconnector->dc_link);
2702                 else
2703                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2704
2705                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2706                         aconnector->fake_enable = false;
2707
2708                 if (aconnector->dc_sink)
2709                         dc_sink_release(aconnector->dc_sink);
2710                 aconnector->dc_sink = NULL;
2711                 amdgpu_dm_update_connector_after_detect(aconnector);
2712                 mutex_unlock(&aconnector->hpd_lock);
2713         }
2714         drm_connector_list_iter_end(&iter);
2715
2716         /* Force mode set in atomic commit */
2717         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2718                 new_crtc_state->active_changed = true;
2719
2720         /*
2721          * atomic_check is expected to create the dc states. We need to release
2722          * them here, since they were duplicated as part of the suspend
2723          * procedure.
2724          */
2725         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2726                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2727                 if (dm_new_crtc_state->stream) {
2728                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2729                         dc_stream_release(dm_new_crtc_state->stream);
2730                         dm_new_crtc_state->stream = NULL;
2731                 }
2732         }
2733
2734         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2735                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2736                 if (dm_new_plane_state->dc_state) {
2737                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2738                         dc_plane_state_release(dm_new_plane_state->dc_state);
2739                         dm_new_plane_state->dc_state = NULL;
2740                 }
2741         }
2742
2743         drm_atomic_helper_resume(ddev, dm->cached_state);
2744
2745         dm->cached_state = NULL;
2746
2747         amdgpu_dm_irq_resume_late(adev);
2748
2749         amdgpu_dm_smu_write_watermarks_table(adev);
2750
2751         return 0;
2752 }
2753
2754 /**
2755  * DOC: DM Lifecycle
2756  *
2757  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2758  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2759  * the base driver's device list to be initialized and torn down accordingly.
2760  *
2761  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2762  */
2763
2764 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2765         .name = "dm",
2766         .early_init = dm_early_init,
2767         .late_init = dm_late_init,
2768         .sw_init = dm_sw_init,
2769         .sw_fini = dm_sw_fini,
2770         .early_fini = amdgpu_dm_early_fini,
2771         .hw_init = dm_hw_init,
2772         .hw_fini = dm_hw_fini,
2773         .suspend = dm_suspend,
2774         .resume = dm_resume,
2775         .is_idle = dm_is_idle,
2776         .wait_for_idle = dm_wait_for_idle,
2777         .check_soft_reset = dm_check_soft_reset,
2778         .soft_reset = dm_soft_reset,
2779         .set_clockgating_state = dm_set_clockgating_state,
2780         .set_powergating_state = dm_set_powergating_state,
2781 };
2782
2783 const struct amdgpu_ip_block_version dm_ip_block =
2784 {
2785         .type = AMD_IP_BLOCK_TYPE_DCE,
2786         .major = 1,
2787         .minor = 0,
2788         .rev = 0,
2789         .funcs = &amdgpu_dm_funcs,
2790 };
2791
2792
2793 /**
2794  * DOC: atomic
2795  *
2796  * *WIP*
2797  */
2798
2799 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2800         .fb_create = amdgpu_display_user_framebuffer_create,
2801         .get_format_info = amd_get_format_info,
2802         .output_poll_changed = drm_fb_helper_output_poll_changed,
2803         .atomic_check = amdgpu_dm_atomic_check,
2804         .atomic_commit = drm_atomic_helper_commit,
2805 };
2806
2807 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2808         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2809 };
2810
2811 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2812 {
2813         u32 max_cll, min_cll, max, min, q, r;
2814         struct amdgpu_dm_backlight_caps *caps;
2815         struct amdgpu_display_manager *dm;
2816         struct drm_connector *conn_base;
2817         struct amdgpu_device *adev;
2818         struct dc_link *link = NULL;
2819         static const u8 pre_computed_values[] = {
2820                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2821                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2822         int i;
2823
2824         if (!aconnector || !aconnector->dc_link)
2825                 return;
2826
2827         link = aconnector->dc_link;
2828         if (link->connector_signal != SIGNAL_TYPE_EDP)
2829                 return;
2830
2831         conn_base = &aconnector->base;
2832         adev = drm_to_adev(conn_base->dev);
2833         dm = &adev->dm;
2834         for (i = 0; i < dm->num_of_edps; i++) {
2835                 if (link == dm->backlight_link[i])
2836                         break;
2837         }
2838         if (i >= dm->num_of_edps)
2839                 return;
2840         caps = &dm->backlight_caps[i];
2841         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2842         caps->aux_support = false;
2843         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2844         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2845
2846         if (caps->ext_caps->bits.oled == 1 /*||
2847             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2848             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2849                 caps->aux_support = true;
2850
2851         if (amdgpu_backlight == 0)
2852                 caps->aux_support = false;
2853         else if (amdgpu_backlight == 1)
2854                 caps->aux_support = true;
2855
2856         /* From the specification (CTA-861-G), for calculating the maximum
2857          * luminance we need to use:
2858          *      Luminance = 50*2**(CV/32)
2859          * Where CV is a one-byte value.
2860          * For calculating this expression we may need float point precision;
2861          * to avoid this complexity level, we take advantage that CV is divided
2862          * by a constant. From the Euclids division algorithm, we know that CV
2863          * can be written as: CV = 32*q + r. Next, we replace CV in the
2864          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2865          * need to pre-compute the value of r/32. For pre-computing the values
2866          * We just used the following Ruby line:
2867          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2868          * The results of the above expressions can be verified at
2869          * pre_computed_values.
2870          */
2871         q = max_cll >> 5;
2872         r = max_cll % 32;
2873         max = (1 << q) * pre_computed_values[r];
2874
2875         // min luminance: maxLum * (CV/255)^2 / 100
2876         q = DIV_ROUND_CLOSEST(min_cll, 255);
2877         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2878
2879         caps->aux_max_input_signal = max;
2880         caps->aux_min_input_signal = min;
2881 }
2882
2883 void amdgpu_dm_update_connector_after_detect(
2884                 struct amdgpu_dm_connector *aconnector)
2885 {
2886         struct drm_connector *connector = &aconnector->base;
2887         struct drm_device *dev = connector->dev;
2888         struct dc_sink *sink;
2889
2890         /* MST handled by drm_mst framework */
2891         if (aconnector->mst_mgr.mst_state == true)
2892                 return;
2893
2894         sink = aconnector->dc_link->local_sink;
2895         if (sink)
2896                 dc_sink_retain(sink);
2897
2898         /*
2899          * Edid mgmt connector gets first update only in mode_valid hook and then
2900          * the connector sink is set to either fake or physical sink depends on link status.
2901          * Skip if already done during boot.
2902          */
2903         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2904                         && aconnector->dc_em_sink) {
2905
2906                 /*
2907                  * For S3 resume with headless use eml_sink to fake stream
2908                  * because on resume connector->sink is set to NULL
2909                  */
2910                 mutex_lock(&dev->mode_config.mutex);
2911
2912                 if (sink) {
2913                         if (aconnector->dc_sink) {
2914                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2915                                 /*
2916                                  * retain and release below are used to
2917                                  * bump up refcount for sink because the link doesn't point
2918                                  * to it anymore after disconnect, so on next crtc to connector
2919                                  * reshuffle by UMD we will get into unwanted dc_sink release
2920                                  */
2921                                 dc_sink_release(aconnector->dc_sink);
2922                         }
2923                         aconnector->dc_sink = sink;
2924                         dc_sink_retain(aconnector->dc_sink);
2925                         amdgpu_dm_update_freesync_caps(connector,
2926                                         aconnector->edid);
2927                 } else {
2928                         amdgpu_dm_update_freesync_caps(connector, NULL);
2929                         if (!aconnector->dc_sink) {
2930                                 aconnector->dc_sink = aconnector->dc_em_sink;
2931                                 dc_sink_retain(aconnector->dc_sink);
2932                         }
2933                 }
2934
2935                 mutex_unlock(&dev->mode_config.mutex);
2936
2937                 if (sink)
2938                         dc_sink_release(sink);
2939                 return;
2940         }
2941
2942         /*
2943          * TODO: temporary guard to look for proper fix
2944          * if this sink is MST sink, we should not do anything
2945          */
2946         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2947                 dc_sink_release(sink);
2948                 return;
2949         }
2950
2951         if (aconnector->dc_sink == sink) {
2952                 /*
2953                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2954                  * Do nothing!!
2955                  */
2956                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2957                                 aconnector->connector_id);
2958                 if (sink)
2959                         dc_sink_release(sink);
2960                 return;
2961         }
2962
2963         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2964                 aconnector->connector_id, aconnector->dc_sink, sink);
2965
2966         mutex_lock(&dev->mode_config.mutex);
2967
2968         /*
2969          * 1. Update status of the drm connector
2970          * 2. Send an event and let userspace tell us what to do
2971          */
2972         if (sink) {
2973                 /*
2974                  * TODO: check if we still need the S3 mode update workaround.
2975                  * If yes, put it here.
2976                  */
2977                 if (aconnector->dc_sink) {
2978                         amdgpu_dm_update_freesync_caps(connector, NULL);
2979                         dc_sink_release(aconnector->dc_sink);
2980                 }
2981
2982                 aconnector->dc_sink = sink;
2983                 dc_sink_retain(aconnector->dc_sink);
2984                 if (sink->dc_edid.length == 0) {
2985                         aconnector->edid = NULL;
2986                         if (aconnector->dc_link->aux_mode) {
2987                                 drm_dp_cec_unset_edid(
2988                                         &aconnector->dm_dp_aux.aux);
2989                         }
2990                 } else {
2991                         aconnector->edid =
2992                                 (struct edid *)sink->dc_edid.raw_edid;
2993
2994                         if (aconnector->dc_link->aux_mode)
2995                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2996                                                     aconnector->edid);
2997                 }
2998
2999                 drm_connector_update_edid_property(connector, aconnector->edid);
3000                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3001                 update_connector_ext_caps(aconnector);
3002         } else {
3003                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3004                 amdgpu_dm_update_freesync_caps(connector, NULL);
3005                 drm_connector_update_edid_property(connector, NULL);
3006                 aconnector->num_modes = 0;
3007                 dc_sink_release(aconnector->dc_sink);
3008                 aconnector->dc_sink = NULL;
3009                 aconnector->edid = NULL;
3010 #ifdef CONFIG_DRM_AMD_DC_HDCP
3011                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3012                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3013                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3014 #endif
3015         }
3016
3017         mutex_unlock(&dev->mode_config.mutex);
3018
3019         update_subconnector_property(aconnector);
3020
3021         if (sink)
3022                 dc_sink_release(sink);
3023 }
3024
3025 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3026 {
3027         struct drm_connector *connector = &aconnector->base;
3028         struct drm_device *dev = connector->dev;
3029         enum dc_connection_type new_connection_type = dc_connection_none;
3030         struct amdgpu_device *adev = drm_to_adev(dev);
3031         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3032         struct dm_crtc_state *dm_crtc_state = NULL;
3033
3034         if (adev->dm.disable_hpd_irq)
3035                 return;
3036
3037         if (dm_con_state->base.state && dm_con_state->base.crtc)
3038                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3039                                         dm_con_state->base.state,
3040                                         dm_con_state->base.crtc));
3041         /*
3042          * In case of failure or MST no need to update connector status or notify the OS
3043          * since (for MST case) MST does this in its own context.
3044          */
3045         mutex_lock(&aconnector->hpd_lock);
3046
3047 #ifdef CONFIG_DRM_AMD_DC_HDCP
3048         if (adev->dm.hdcp_workqueue) {
3049                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3050                 dm_con_state->update_hdcp = true;
3051         }
3052 #endif
3053         if (aconnector->fake_enable)
3054                 aconnector->fake_enable = false;
3055
3056         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3057                 DRM_ERROR("KMS: Failed to detect connector\n");
3058
3059         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3060                 emulated_link_detect(aconnector->dc_link);
3061
3062                 drm_modeset_lock_all(dev);
3063                 dm_restore_drm_connector_state(dev, connector);
3064                 drm_modeset_unlock_all(dev);
3065
3066                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3067                         drm_kms_helper_connector_hotplug_event(connector);
3068
3069         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3070                 if (new_connection_type == dc_connection_none &&
3071                     aconnector->dc_link->type == dc_connection_none &&
3072                     dm_crtc_state)
3073                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3074
3075                 amdgpu_dm_update_connector_after_detect(aconnector);
3076
3077                 drm_modeset_lock_all(dev);
3078                 dm_restore_drm_connector_state(dev, connector);
3079                 drm_modeset_unlock_all(dev);
3080
3081                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3082                         drm_kms_helper_connector_hotplug_event(connector);
3083         }
3084         mutex_unlock(&aconnector->hpd_lock);
3085
3086 }
3087
3088 static void handle_hpd_irq(void *param)
3089 {
3090         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3091
3092         handle_hpd_irq_helper(aconnector);
3093
3094 }
3095
3096 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3097 {
3098         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3099         uint8_t dret;
3100         bool new_irq_handled = false;
3101         int dpcd_addr;
3102         int dpcd_bytes_to_read;
3103
3104         const int max_process_count = 30;
3105         int process_count = 0;
3106
3107         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3108
3109         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3110                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3111                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3112                 dpcd_addr = DP_SINK_COUNT;
3113         } else {
3114                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3115                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3116                 dpcd_addr = DP_SINK_COUNT_ESI;
3117         }
3118
3119         dret = drm_dp_dpcd_read(
3120                 &aconnector->dm_dp_aux.aux,
3121                 dpcd_addr,
3122                 esi,
3123                 dpcd_bytes_to_read);
3124
3125         while (dret == dpcd_bytes_to_read &&
3126                 process_count < max_process_count) {
3127                 uint8_t retry;
3128                 dret = 0;
3129
3130                 process_count++;
3131
3132                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3133                 /* handle HPD short pulse irq */
3134                 if (aconnector->mst_mgr.mst_state)
3135                         drm_dp_mst_hpd_irq(
3136                                 &aconnector->mst_mgr,
3137                                 esi,
3138                                 &new_irq_handled);
3139
3140                 if (new_irq_handled) {
3141                         /* ACK at DPCD to notify down stream */
3142                         const int ack_dpcd_bytes_to_write =
3143                                 dpcd_bytes_to_read - 1;
3144
3145                         for (retry = 0; retry < 3; retry++) {
3146                                 uint8_t wret;
3147
3148                                 wret = drm_dp_dpcd_write(
3149                                         &aconnector->dm_dp_aux.aux,
3150                                         dpcd_addr + 1,
3151                                         &esi[1],
3152                                         ack_dpcd_bytes_to_write);
3153                                 if (wret == ack_dpcd_bytes_to_write)
3154                                         break;
3155                         }
3156
3157                         /* check if there is new irq to be handled */
3158                         dret = drm_dp_dpcd_read(
3159                                 &aconnector->dm_dp_aux.aux,
3160                                 dpcd_addr,
3161                                 esi,
3162                                 dpcd_bytes_to_read);
3163
3164                         new_irq_handled = false;
3165                 } else {
3166                         break;
3167                 }
3168         }
3169
3170         if (process_count == max_process_count)
3171                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3172 }
3173
3174 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3175                                                         union hpd_irq_data hpd_irq_data)
3176 {
3177         struct hpd_rx_irq_offload_work *offload_work =
3178                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3179
3180         if (!offload_work) {
3181                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3182                 return;
3183         }
3184
3185         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3186         offload_work->data = hpd_irq_data;
3187         offload_work->offload_wq = offload_wq;
3188
3189         queue_work(offload_wq->wq, &offload_work->work);
3190         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3191 }
3192
3193 static void handle_hpd_rx_irq(void *param)
3194 {
3195         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3196         struct drm_connector *connector = &aconnector->base;
3197         struct drm_device *dev = connector->dev;
3198         struct dc_link *dc_link = aconnector->dc_link;
3199         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3200         bool result = false;
3201         enum dc_connection_type new_connection_type = dc_connection_none;
3202         struct amdgpu_device *adev = drm_to_adev(dev);
3203         union hpd_irq_data hpd_irq_data;
3204         bool link_loss = false;
3205         bool has_left_work = false;
3206         int idx = aconnector->base.index;
3207         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3208
3209         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3210
3211         if (adev->dm.disable_hpd_irq)
3212                 return;
3213
3214         /*
3215          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3216          * conflict, after implement i2c helper, this mutex should be
3217          * retired.
3218          */
3219         mutex_lock(&aconnector->hpd_lock);
3220
3221         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3222                                                 &link_loss, true, &has_left_work);
3223
3224         if (!has_left_work)
3225                 goto out;
3226
3227         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3228                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3229                 goto out;
3230         }
3231
3232         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3233                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3234                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3235                         dm_handle_mst_sideband_msg(aconnector);
3236                         goto out;
3237                 }
3238
3239                 if (link_loss) {
3240                         bool skip = false;
3241
3242                         spin_lock(&offload_wq->offload_lock);
3243                         skip = offload_wq->is_handling_link_loss;
3244
3245                         if (!skip)
3246                                 offload_wq->is_handling_link_loss = true;
3247
3248                         spin_unlock(&offload_wq->offload_lock);
3249
3250                         if (!skip)
3251                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3252
3253                         goto out;
3254                 }
3255         }
3256
3257 out:
3258         if (result && !is_mst_root_connector) {
3259                 /* Downstream Port status changed. */
3260                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3261                         DRM_ERROR("KMS: Failed to detect connector\n");
3262
3263                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3264                         emulated_link_detect(dc_link);
3265
3266                         if (aconnector->fake_enable)
3267                                 aconnector->fake_enable = false;
3268
3269                         amdgpu_dm_update_connector_after_detect(aconnector);
3270
3271
3272                         drm_modeset_lock_all(dev);
3273                         dm_restore_drm_connector_state(dev, connector);
3274                         drm_modeset_unlock_all(dev);
3275
3276                         drm_kms_helper_connector_hotplug_event(connector);
3277                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3278
3279                         if (aconnector->fake_enable)
3280                                 aconnector->fake_enable = false;
3281
3282                         amdgpu_dm_update_connector_after_detect(aconnector);
3283
3284
3285                         drm_modeset_lock_all(dev);
3286                         dm_restore_drm_connector_state(dev, connector);
3287                         drm_modeset_unlock_all(dev);
3288
3289                         drm_kms_helper_connector_hotplug_event(connector);
3290                 }
3291         }
3292 #ifdef CONFIG_DRM_AMD_DC_HDCP
3293         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3294                 if (adev->dm.hdcp_workqueue)
3295                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3296         }
3297 #endif
3298
3299         if (dc_link->type != dc_connection_mst_branch)
3300                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3301
3302         mutex_unlock(&aconnector->hpd_lock);
3303 }
3304
3305 static void register_hpd_handlers(struct amdgpu_device *adev)
3306 {
3307         struct drm_device *dev = adev_to_drm(adev);
3308         struct drm_connector *connector;
3309         struct amdgpu_dm_connector *aconnector;
3310         const struct dc_link *dc_link;
3311         struct dc_interrupt_params int_params = {0};
3312
3313         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3314         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3315
3316         list_for_each_entry(connector,
3317                         &dev->mode_config.connector_list, head) {
3318
3319                 aconnector = to_amdgpu_dm_connector(connector);
3320                 dc_link = aconnector->dc_link;
3321
3322                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3323                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3324                         int_params.irq_source = dc_link->irq_source_hpd;
3325
3326                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3327                                         handle_hpd_irq,
3328                                         (void *) aconnector);
3329                 }
3330
3331                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3332
3333                         /* Also register for DP short pulse (hpd_rx). */
3334                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3335                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3336
3337                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3338                                         handle_hpd_rx_irq,
3339                                         (void *) aconnector);
3340
3341                         if (adev->dm.hpd_rx_offload_wq)
3342                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3343                                         aconnector;
3344                 }
3345         }
3346 }
3347
3348 #if defined(CONFIG_DRM_AMD_DC_SI)
3349 /* Register IRQ sources and initialize IRQ callbacks */
3350 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3351 {
3352         struct dc *dc = adev->dm.dc;
3353         struct common_irq_params *c_irq_params;
3354         struct dc_interrupt_params int_params = {0};
3355         int r;
3356         int i;
3357         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3358
3359         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3360         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3361
3362         /*
3363          * Actions of amdgpu_irq_add_id():
3364          * 1. Register a set() function with base driver.
3365          *    Base driver will call set() function to enable/disable an
3366          *    interrupt in DC hardware.
3367          * 2. Register amdgpu_dm_irq_handler().
3368          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3369          *    coming from DC hardware.
3370          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3371          *    for acknowledging and handling. */
3372
3373         /* Use VBLANK interrupt */
3374         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3375                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3376                 if (r) {
3377                         DRM_ERROR("Failed to add crtc irq id!\n");
3378                         return r;
3379                 }
3380
3381                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3382                 int_params.irq_source =
3383                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3384
3385                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3386
3387                 c_irq_params->adev = adev;
3388                 c_irq_params->irq_src = int_params.irq_source;
3389
3390                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3391                                 dm_crtc_high_irq, c_irq_params);
3392         }
3393
3394         /* Use GRPH_PFLIP interrupt */
3395         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3396                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3397                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3398                 if (r) {
3399                         DRM_ERROR("Failed to add page flip irq id!\n");
3400                         return r;
3401                 }
3402
3403                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3404                 int_params.irq_source =
3405                         dc_interrupt_to_irq_source(dc, i, 0);
3406
3407                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3408
3409                 c_irq_params->adev = adev;
3410                 c_irq_params->irq_src = int_params.irq_source;
3411
3412                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3413                                 dm_pflip_high_irq, c_irq_params);
3414
3415         }
3416
3417         /* HPD */
3418         r = amdgpu_irq_add_id(adev, client_id,
3419                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3420         if (r) {
3421                 DRM_ERROR("Failed to add hpd irq id!\n");
3422                 return r;
3423         }
3424
3425         register_hpd_handlers(adev);
3426
3427         return 0;
3428 }
3429 #endif
3430
3431 /* Register IRQ sources and initialize IRQ callbacks */
3432 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3433 {
3434         struct dc *dc = adev->dm.dc;
3435         struct common_irq_params *c_irq_params;
3436         struct dc_interrupt_params int_params = {0};
3437         int r;
3438         int i;
3439         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3440
3441         if (adev->family >= AMDGPU_FAMILY_AI)
3442                 client_id = SOC15_IH_CLIENTID_DCE;
3443
3444         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3445         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3446
3447         /*
3448          * Actions of amdgpu_irq_add_id():
3449          * 1. Register a set() function with base driver.
3450          *    Base driver will call set() function to enable/disable an
3451          *    interrupt in DC hardware.
3452          * 2. Register amdgpu_dm_irq_handler().
3453          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3454          *    coming from DC hardware.
3455          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3456          *    for acknowledging and handling. */
3457
3458         /* Use VBLANK interrupt */
3459         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3460                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3461                 if (r) {
3462                         DRM_ERROR("Failed to add crtc irq id!\n");
3463                         return r;
3464                 }
3465
3466                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3467                 int_params.irq_source =
3468                         dc_interrupt_to_irq_source(dc, i, 0);
3469
3470                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3471
3472                 c_irq_params->adev = adev;
3473                 c_irq_params->irq_src = int_params.irq_source;
3474
3475                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3476                                 dm_crtc_high_irq, c_irq_params);
3477         }
3478
3479         /* Use VUPDATE interrupt */
3480         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3481                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3482                 if (r) {
3483                         DRM_ERROR("Failed to add vupdate irq id!\n");
3484                         return r;
3485                 }
3486
3487                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3488                 int_params.irq_source =
3489                         dc_interrupt_to_irq_source(dc, i, 0);
3490
3491                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3492
3493                 c_irq_params->adev = adev;
3494                 c_irq_params->irq_src = int_params.irq_source;
3495
3496                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3497                                 dm_vupdate_high_irq, c_irq_params);
3498         }
3499
3500         /* Use GRPH_PFLIP interrupt */
3501         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3502                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3503                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3504                 if (r) {
3505                         DRM_ERROR("Failed to add page flip irq id!\n");
3506                         return r;
3507                 }
3508
3509                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3510                 int_params.irq_source =
3511                         dc_interrupt_to_irq_source(dc, i, 0);
3512
3513                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3514
3515                 c_irq_params->adev = adev;
3516                 c_irq_params->irq_src = int_params.irq_source;
3517
3518                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3519                                 dm_pflip_high_irq, c_irq_params);
3520
3521         }
3522
3523         /* HPD */
3524         r = amdgpu_irq_add_id(adev, client_id,
3525                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3526         if (r) {
3527                 DRM_ERROR("Failed to add hpd irq id!\n");
3528                 return r;
3529         }
3530
3531         register_hpd_handlers(adev);
3532
3533         return 0;
3534 }
3535
3536 #if defined(CONFIG_DRM_AMD_DC_DCN)
3537 /* Register IRQ sources and initialize IRQ callbacks */
3538 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3539 {
3540         struct dc *dc = adev->dm.dc;
3541         struct common_irq_params *c_irq_params;
3542         struct dc_interrupt_params int_params = {0};
3543         int r;
3544         int i;
3545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3546         static const unsigned int vrtl_int_srcid[] = {
3547                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3548                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3549                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3550                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3551                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3552                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3553         };
3554 #endif
3555
3556         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3557         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3558
3559         /*
3560          * Actions of amdgpu_irq_add_id():
3561          * 1. Register a set() function with base driver.
3562          *    Base driver will call set() function to enable/disable an
3563          *    interrupt in DC hardware.
3564          * 2. Register amdgpu_dm_irq_handler().
3565          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3566          *    coming from DC hardware.
3567          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3568          *    for acknowledging and handling.
3569          */
3570
3571         /* Use VSTARTUP interrupt */
3572         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3573                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3574                         i++) {
3575                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3576
3577                 if (r) {
3578                         DRM_ERROR("Failed to add crtc irq id!\n");
3579                         return r;
3580                 }
3581
3582                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3583                 int_params.irq_source =
3584                         dc_interrupt_to_irq_source(dc, i, 0);
3585
3586                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3587
3588                 c_irq_params->adev = adev;
3589                 c_irq_params->irq_src = int_params.irq_source;
3590
3591                 amdgpu_dm_irq_register_interrupt(
3592                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3593         }
3594
3595         /* Use otg vertical line interrupt */
3596 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3597         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3598                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3599                                 vrtl_int_srcid[i], &adev->vline0_irq);
3600
3601                 if (r) {
3602                         DRM_ERROR("Failed to add vline0 irq id!\n");
3603                         return r;
3604                 }
3605
3606                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3607                 int_params.irq_source =
3608                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3609
3610                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3611                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3612                         break;
3613                 }
3614
3615                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3616                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3617
3618                 c_irq_params->adev = adev;
3619                 c_irq_params->irq_src = int_params.irq_source;
3620
3621                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3622                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3623         }
3624 #endif
3625
3626         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3627          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3628          * to trigger at end of each vblank, regardless of state of the lock,
3629          * matching DCE behaviour.
3630          */
3631         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3632              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3633              i++) {
3634                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3635
3636                 if (r) {
3637                         DRM_ERROR("Failed to add vupdate irq id!\n");
3638                         return r;
3639                 }
3640
3641                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3642                 int_params.irq_source =
3643                         dc_interrupt_to_irq_source(dc, i, 0);
3644
3645                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3646
3647                 c_irq_params->adev = adev;
3648                 c_irq_params->irq_src = int_params.irq_source;
3649
3650                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3651                                 dm_vupdate_high_irq, c_irq_params);
3652         }
3653
3654         /* Use GRPH_PFLIP interrupt */
3655         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3656                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3657                         i++) {
3658                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3659                 if (r) {
3660                         DRM_ERROR("Failed to add page flip irq id!\n");
3661                         return r;
3662                 }
3663
3664                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3665                 int_params.irq_source =
3666                         dc_interrupt_to_irq_source(dc, i, 0);
3667
3668                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3669
3670                 c_irq_params->adev = adev;
3671                 c_irq_params->irq_src = int_params.irq_source;
3672
3673                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3674                                 dm_pflip_high_irq, c_irq_params);
3675
3676         }
3677
3678         /* HPD */
3679         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3680                         &adev->hpd_irq);
3681         if (r) {
3682                 DRM_ERROR("Failed to add hpd irq id!\n");
3683                 return r;
3684         }
3685
3686         register_hpd_handlers(adev);
3687
3688         return 0;
3689 }
3690 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3691 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3692 {
3693         struct dc *dc = adev->dm.dc;
3694         struct common_irq_params *c_irq_params;
3695         struct dc_interrupt_params int_params = {0};
3696         int r, i;
3697
3698         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3699         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3700
3701         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3702                         &adev->dmub_outbox_irq);
3703         if (r) {
3704                 DRM_ERROR("Failed to add outbox irq id!\n");
3705                 return r;
3706         }
3707
3708         if (dc->ctx->dmub_srv) {
3709                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3710                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3711                 int_params.irq_source =
3712                 dc_interrupt_to_irq_source(dc, i, 0);
3713
3714                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3715
3716                 c_irq_params->adev = adev;
3717                 c_irq_params->irq_src = int_params.irq_source;
3718
3719                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3720                                 dm_dmub_outbox1_low_irq, c_irq_params);
3721         }
3722
3723         return 0;
3724 }
3725 #endif
3726
3727 /*
3728  * Acquires the lock for the atomic state object and returns
3729  * the new atomic state.
3730  *
3731  * This should only be called during atomic check.
3732  */
3733 static int dm_atomic_get_state(struct drm_atomic_state *state,
3734                                struct dm_atomic_state **dm_state)
3735 {
3736         struct drm_device *dev = state->dev;
3737         struct amdgpu_device *adev = drm_to_adev(dev);
3738         struct amdgpu_display_manager *dm = &adev->dm;
3739         struct drm_private_state *priv_state;
3740
3741         if (*dm_state)
3742                 return 0;
3743
3744         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3745         if (IS_ERR(priv_state))
3746                 return PTR_ERR(priv_state);
3747
3748         *dm_state = to_dm_atomic_state(priv_state);
3749
3750         return 0;
3751 }
3752
3753 static struct dm_atomic_state *
3754 dm_atomic_get_new_state(struct drm_atomic_state *state)
3755 {
3756         struct drm_device *dev = state->dev;
3757         struct amdgpu_device *adev = drm_to_adev(dev);
3758         struct amdgpu_display_manager *dm = &adev->dm;
3759         struct drm_private_obj *obj;
3760         struct drm_private_state *new_obj_state;
3761         int i;
3762
3763         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3764                 if (obj->funcs == dm->atomic_obj.funcs)
3765                         return to_dm_atomic_state(new_obj_state);
3766         }
3767
3768         return NULL;
3769 }
3770
3771 static struct drm_private_state *
3772 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3773 {
3774         struct dm_atomic_state *old_state, *new_state;
3775
3776         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3777         if (!new_state)
3778                 return NULL;
3779
3780         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3781
3782         old_state = to_dm_atomic_state(obj->state);
3783
3784         if (old_state && old_state->context)
3785                 new_state->context = dc_copy_state(old_state->context);
3786
3787         if (!new_state->context) {
3788                 kfree(new_state);
3789                 return NULL;
3790         }
3791
3792         return &new_state->base;
3793 }
3794
3795 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3796                                     struct drm_private_state *state)
3797 {
3798         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3799
3800         if (dm_state && dm_state->context)
3801                 dc_release_state(dm_state->context);
3802
3803         kfree(dm_state);
3804 }
3805
3806 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3807         .atomic_duplicate_state = dm_atomic_duplicate_state,
3808         .atomic_destroy_state = dm_atomic_destroy_state,
3809 };
3810
3811 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3812 {
3813         struct dm_atomic_state *state;
3814         int r;
3815
3816         adev->mode_info.mode_config_initialized = true;
3817
3818         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3819         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3820
3821         adev_to_drm(adev)->mode_config.max_width = 16384;
3822         adev_to_drm(adev)->mode_config.max_height = 16384;
3823
3824         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3825         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3826         /* indicates support for immediate flip */
3827         adev_to_drm(adev)->mode_config.async_page_flip = true;
3828
3829         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3830
3831         state = kzalloc(sizeof(*state), GFP_KERNEL);
3832         if (!state)
3833                 return -ENOMEM;
3834
3835         state->context = dc_create_state(adev->dm.dc);
3836         if (!state->context) {
3837                 kfree(state);
3838                 return -ENOMEM;
3839         }
3840
3841         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3842
3843         drm_atomic_private_obj_init(adev_to_drm(adev),
3844                                     &adev->dm.atomic_obj,
3845                                     &state->base,
3846                                     &dm_atomic_state_funcs);
3847
3848         r = amdgpu_display_modeset_create_props(adev);
3849         if (r) {
3850                 dc_release_state(state->context);
3851                 kfree(state);
3852                 return r;
3853         }
3854
3855         r = amdgpu_dm_audio_init(adev);
3856         if (r) {
3857                 dc_release_state(state->context);
3858                 kfree(state);
3859                 return r;
3860         }
3861
3862         return 0;
3863 }
3864
3865 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3866 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3867 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3868
3869 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3870         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3871
3872 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3873                                             int bl_idx)
3874 {
3875 #if defined(CONFIG_ACPI)
3876         struct amdgpu_dm_backlight_caps caps;
3877
3878         memset(&caps, 0, sizeof(caps));
3879
3880         if (dm->backlight_caps[bl_idx].caps_valid)
3881                 return;
3882
3883         amdgpu_acpi_get_backlight_caps(&caps);
3884         if (caps.caps_valid) {
3885                 dm->backlight_caps[bl_idx].caps_valid = true;
3886                 if (caps.aux_support)
3887                         return;
3888                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3889                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3890         } else {
3891                 dm->backlight_caps[bl_idx].min_input_signal =
3892                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3893                 dm->backlight_caps[bl_idx].max_input_signal =
3894                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3895         }
3896 #else
3897         if (dm->backlight_caps[bl_idx].aux_support)
3898                 return;
3899
3900         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3901         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3902 #endif
3903 }
3904
3905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3906                                 unsigned *min, unsigned *max)
3907 {
3908         if (!caps)
3909                 return 0;
3910
3911         if (caps->aux_support) {
3912                 // Firmware limits are in nits, DC API wants millinits.
3913                 *max = 1000 * caps->aux_max_input_signal;
3914                 *min = 1000 * caps->aux_min_input_signal;
3915         } else {
3916                 // Firmware limits are 8-bit, PWM control is 16-bit.
3917                 *max = 0x101 * caps->max_input_signal;
3918                 *min = 0x101 * caps->min_input_signal;
3919         }
3920         return 1;
3921 }
3922
3923 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3924                                         uint32_t brightness)
3925 {
3926         unsigned min, max;
3927
3928         if (!get_brightness_range(caps, &min, &max))
3929                 return brightness;
3930
3931         // Rescale 0..255 to min..max
3932         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3933                                        AMDGPU_MAX_BL_LEVEL);
3934 }
3935
3936 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3937                                       uint32_t brightness)
3938 {
3939         unsigned min, max;
3940
3941         if (!get_brightness_range(caps, &min, &max))
3942                 return brightness;
3943
3944         if (brightness < min)
3945                 return 0;
3946         // Rescale min..max to 0..255
3947         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3948                                  max - min);
3949 }
3950
3951 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3952                                          int bl_idx,
3953                                          u32 user_brightness)
3954 {
3955         struct amdgpu_dm_backlight_caps caps;
3956         struct dc_link *link;
3957         u32 brightness;
3958         bool rc;
3959
3960         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3961         caps = dm->backlight_caps[bl_idx];
3962
3963         dm->brightness[bl_idx] = user_brightness;
3964         /* update scratch register */
3965         if (bl_idx == 0)
3966                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3967         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3968         link = (struct dc_link *)dm->backlight_link[bl_idx];
3969
3970         /* Change brightness based on AUX property */
3971         if (caps.aux_support) {
3972                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3973                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3974                 if (!rc)
3975                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3976         } else {
3977                 rc = dc_link_set_backlight_level(link, brightness, 0);
3978                 if (!rc)
3979                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3980         }
3981
3982         return rc ? 0 : 1;
3983 }
3984
3985 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3986 {
3987         struct amdgpu_display_manager *dm = bl_get_data(bd);
3988         int i;
3989
3990         for (i = 0; i < dm->num_of_edps; i++) {
3991                 if (bd == dm->backlight_dev[i])
3992                         break;
3993         }
3994         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3995                 i = 0;
3996         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3997
3998         return 0;
3999 }
4000
4001 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4002                                          int bl_idx)
4003 {
4004         struct amdgpu_dm_backlight_caps caps;
4005         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4006
4007         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4008         caps = dm->backlight_caps[bl_idx];
4009
4010         if (caps.aux_support) {
4011                 u32 avg, peak;
4012                 bool rc;
4013
4014                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4015                 if (!rc)
4016                         return dm->brightness[bl_idx];
4017                 return convert_brightness_to_user(&caps, avg);
4018         } else {
4019                 int ret = dc_link_get_backlight_level(link);
4020
4021                 if (ret == DC_ERROR_UNEXPECTED)
4022                         return dm->brightness[bl_idx];
4023                 return convert_brightness_to_user(&caps, ret);
4024         }
4025 }
4026
4027 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4028 {
4029         struct amdgpu_display_manager *dm = bl_get_data(bd);
4030         int i;
4031
4032         for (i = 0; i < dm->num_of_edps; i++) {
4033                 if (bd == dm->backlight_dev[i])
4034                         break;
4035         }
4036         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4037                 i = 0;
4038         return amdgpu_dm_backlight_get_level(dm, i);
4039 }
4040
4041 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4042         .options = BL_CORE_SUSPENDRESUME,
4043         .get_brightness = amdgpu_dm_backlight_get_brightness,
4044         .update_status  = amdgpu_dm_backlight_update_status,
4045 };
4046
4047 static void
4048 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4049 {
4050         char bl_name[16];
4051         struct backlight_properties props = { 0 };
4052
4053         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4054         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4055
4056         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4057         props.brightness = AMDGPU_MAX_BL_LEVEL;
4058         props.type = BACKLIGHT_RAW;
4059
4060         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4061                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4062
4063         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4064                                                                        adev_to_drm(dm->adev)->dev,
4065                                                                        dm,
4066                                                                        &amdgpu_dm_backlight_ops,
4067                                                                        &props);
4068
4069         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4070                 DRM_ERROR("DM: Backlight registration failed!\n");
4071         else
4072                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4073 }
4074 #endif
4075
4076 static int initialize_plane(struct amdgpu_display_manager *dm,
4077                             struct amdgpu_mode_info *mode_info, int plane_id,
4078                             enum drm_plane_type plane_type,
4079                             const struct dc_plane_cap *plane_cap)
4080 {
4081         struct drm_plane *plane;
4082         unsigned long possible_crtcs;
4083         int ret = 0;
4084
4085         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4086         if (!plane) {
4087                 DRM_ERROR("KMS: Failed to allocate plane\n");
4088                 return -ENOMEM;
4089         }
4090         plane->type = plane_type;
4091
4092         /*
4093          * HACK: IGT tests expect that the primary plane for a CRTC
4094          * can only have one possible CRTC. Only expose support for
4095          * any CRTC if they're not going to be used as a primary plane
4096          * for a CRTC - like overlay or underlay planes.
4097          */
4098         possible_crtcs = 1 << plane_id;
4099         if (plane_id >= dm->dc->caps.max_streams)
4100                 possible_crtcs = 0xff;
4101
4102         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4103
4104         if (ret) {
4105                 DRM_ERROR("KMS: Failed to initialize plane\n");
4106                 kfree(plane);
4107                 return ret;
4108         }
4109
4110         if (mode_info)
4111                 mode_info->planes[plane_id] = plane;
4112
4113         return ret;
4114 }
4115
4116
4117 static void register_backlight_device(struct amdgpu_display_manager *dm,
4118                                       struct dc_link *link)
4119 {
4120 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4121         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4122
4123         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4124             link->type != dc_connection_none) {
4125                 /*
4126                  * Event if registration failed, we should continue with
4127                  * DM initialization because not having a backlight control
4128                  * is better then a black screen.
4129                  */
4130                 if (!dm->backlight_dev[dm->num_of_edps])
4131                         amdgpu_dm_register_backlight_device(dm);
4132
4133                 if (dm->backlight_dev[dm->num_of_edps]) {
4134                         dm->backlight_link[dm->num_of_edps] = link;
4135                         dm->num_of_edps++;
4136                 }
4137         }
4138 #endif
4139 }
4140
4141
4142 /*
4143  * In this architecture, the association
4144  * connector -> encoder -> crtc
4145  * id not really requried. The crtc and connector will hold the
4146  * display_index as an abstraction to use with DAL component
4147  *
4148  * Returns 0 on success
4149  */
4150 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4151 {
4152         struct amdgpu_display_manager *dm = &adev->dm;
4153         int32_t i;
4154         struct amdgpu_dm_connector *aconnector = NULL;
4155         struct amdgpu_encoder *aencoder = NULL;
4156         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4157         uint32_t link_cnt;
4158         int32_t primary_planes;
4159         enum dc_connection_type new_connection_type = dc_connection_none;
4160         const struct dc_plane_cap *plane;
4161         bool psr_feature_enabled = false;
4162
4163         dm->display_indexes_num = dm->dc->caps.max_streams;
4164         /* Update the actual used number of crtc */
4165         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4166
4167         link_cnt = dm->dc->caps.max_links;
4168         if (amdgpu_dm_mode_config_init(dm->adev)) {
4169                 DRM_ERROR("DM: Failed to initialize mode config\n");
4170                 return -EINVAL;
4171         }
4172
4173         /* There is one primary plane per CRTC */
4174         primary_planes = dm->dc->caps.max_streams;
4175         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4176
4177         /*
4178          * Initialize primary planes, implicit planes for legacy IOCTLS.
4179          * Order is reversed to match iteration order in atomic check.
4180          */
4181         for (i = (primary_planes - 1); i >= 0; i--) {
4182                 plane = &dm->dc->caps.planes[i];
4183
4184                 if (initialize_plane(dm, mode_info, i,
4185                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4186                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4187                         goto fail;
4188                 }
4189         }
4190
4191         /*
4192          * Initialize overlay planes, index starting after primary planes.
4193          * These planes have a higher DRM index than the primary planes since
4194          * they should be considered as having a higher z-order.
4195          * Order is reversed to match iteration order in atomic check.
4196          *
4197          * Only support DCN for now, and only expose one so we don't encourage
4198          * userspace to use up all the pipes.
4199          */
4200         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4201                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4202
4203                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4204                         continue;
4205
4206                 if (!plane->blends_with_above || !plane->blends_with_below)
4207                         continue;
4208
4209                 if (!plane->pixel_format_support.argb8888)
4210                         continue;
4211
4212                 if (initialize_plane(dm, NULL, primary_planes + i,
4213                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4214                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4215                         goto fail;
4216                 }
4217
4218                 /* Only create one overlay plane. */
4219                 break;
4220         }
4221
4222         for (i = 0; i < dm->dc->caps.max_streams; i++)
4223                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4224                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4225                         goto fail;
4226                 }
4227
4228 #if defined(CONFIG_DRM_AMD_DC_DCN)
4229         /* Use Outbox interrupt */
4230         switch (adev->ip_versions[DCE_HWIP][0]) {
4231         case IP_VERSION(3, 0, 0):
4232         case IP_VERSION(3, 1, 2):
4233         case IP_VERSION(3, 1, 3):
4234         case IP_VERSION(2, 1, 0):
4235                 if (register_outbox_irq_handlers(dm->adev)) {
4236                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4237                         goto fail;
4238                 }
4239                 break;
4240         default:
4241                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4242                               adev->ip_versions[DCE_HWIP][0]);
4243         }
4244
4245         /* Determine whether to enable PSR support by default. */
4246         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4247                 switch (adev->ip_versions[DCE_HWIP][0]) {
4248                 case IP_VERSION(3, 1, 2):
4249                 case IP_VERSION(3, 1, 3):
4250                         psr_feature_enabled = true;
4251                         break;
4252                 default:
4253                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4254                         break;
4255                 }
4256         }
4257 #endif
4258
4259         /* loops over all connectors on the board */
4260         for (i = 0; i < link_cnt; i++) {
4261                 struct dc_link *link = NULL;
4262
4263                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4264                         DRM_ERROR(
4265                                 "KMS: Cannot support more than %d display indexes\n",
4266                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4267                         continue;
4268                 }
4269
4270                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4271                 if (!aconnector)
4272                         goto fail;
4273
4274                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4275                 if (!aencoder)
4276                         goto fail;
4277
4278                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4279                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4280                         goto fail;
4281                 }
4282
4283                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4284                         DRM_ERROR("KMS: Failed to initialize connector\n");
4285                         goto fail;
4286                 }
4287
4288                 link = dc_get_link_at_index(dm->dc, i);
4289
4290                 if (!dc_link_detect_sink(link, &new_connection_type))
4291                         DRM_ERROR("KMS: Failed to detect connector\n");
4292
4293                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4294                         emulated_link_detect(link);
4295                         amdgpu_dm_update_connector_after_detect(aconnector);
4296
4297                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4298                         amdgpu_dm_update_connector_after_detect(aconnector);
4299                         register_backlight_device(dm, link);
4300                         if (dm->num_of_edps)
4301                                 update_connector_ext_caps(aconnector);
4302                         if (psr_feature_enabled)
4303                                 amdgpu_dm_set_psr_caps(link);
4304                 }
4305
4306
4307         }
4308
4309         /*
4310          * Disable vblank IRQs aggressively for power-saving.
4311          *
4312          * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4313          * is also supported.
4314          */
4315         adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4316
4317         /* Software is initialized. Now we can register interrupt handlers. */
4318         switch (adev->asic_type) {
4319 #if defined(CONFIG_DRM_AMD_DC_SI)
4320         case CHIP_TAHITI:
4321         case CHIP_PITCAIRN:
4322         case CHIP_VERDE:
4323         case CHIP_OLAND:
4324                 if (dce60_register_irq_handlers(dm->adev)) {
4325                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4326                         goto fail;
4327                 }
4328                 break;
4329 #endif
4330         case CHIP_BONAIRE:
4331         case CHIP_HAWAII:
4332         case CHIP_KAVERI:
4333         case CHIP_KABINI:
4334         case CHIP_MULLINS:
4335         case CHIP_TONGA:
4336         case CHIP_FIJI:
4337         case CHIP_CARRIZO:
4338         case CHIP_STONEY:
4339         case CHIP_POLARIS11:
4340         case CHIP_POLARIS10:
4341         case CHIP_POLARIS12:
4342         case CHIP_VEGAM:
4343         case CHIP_VEGA10:
4344         case CHIP_VEGA12:
4345         case CHIP_VEGA20:
4346                 if (dce110_register_irq_handlers(dm->adev)) {
4347                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4348                         goto fail;
4349                 }
4350                 break;
4351         default:
4352 #if defined(CONFIG_DRM_AMD_DC_DCN)
4353                 switch (adev->ip_versions[DCE_HWIP][0]) {
4354                 case IP_VERSION(1, 0, 0):
4355                 case IP_VERSION(1, 0, 1):
4356                 case IP_VERSION(2, 0, 2):
4357                 case IP_VERSION(2, 0, 3):
4358                 case IP_VERSION(2, 0, 0):
4359                 case IP_VERSION(2, 1, 0):
4360                 case IP_VERSION(3, 0, 0):
4361                 case IP_VERSION(3, 0, 2):
4362                 case IP_VERSION(3, 0, 3):
4363                 case IP_VERSION(3, 0, 1):
4364                 case IP_VERSION(3, 1, 2):
4365                 case IP_VERSION(3, 1, 3):
4366                         if (dcn10_register_irq_handlers(dm->adev)) {
4367                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4368                                 goto fail;
4369                         }
4370                         break;
4371                 default:
4372                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4373                                         adev->ip_versions[DCE_HWIP][0]);
4374                         goto fail;
4375                 }
4376 #endif
4377                 break;
4378         }
4379
4380         return 0;
4381 fail:
4382         kfree(aencoder);
4383         kfree(aconnector);
4384
4385         return -EINVAL;
4386 }
4387
4388 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4389 {
4390         drm_atomic_private_obj_fini(&dm->atomic_obj);
4391         return;
4392 }
4393
4394 /******************************************************************************
4395  * amdgpu_display_funcs functions
4396  *****************************************************************************/
4397
4398 /*
4399  * dm_bandwidth_update - program display watermarks
4400  *
4401  * @adev: amdgpu_device pointer
4402  *
4403  * Calculate and program the display watermarks and line buffer allocation.
4404  */
4405 static void dm_bandwidth_update(struct amdgpu_device *adev)
4406 {
4407         /* TODO: implement later */
4408 }
4409
4410 static const struct amdgpu_display_funcs dm_display_funcs = {
4411         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4412         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4413         .backlight_set_level = NULL, /* never called for DC */
4414         .backlight_get_level = NULL, /* never called for DC */
4415         .hpd_sense = NULL,/* called unconditionally */
4416         .hpd_set_polarity = NULL, /* called unconditionally */
4417         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4418         .page_flip_get_scanoutpos =
4419                 dm_crtc_get_scanoutpos,/* called unconditionally */
4420         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4421         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4422 };
4423
4424 #if defined(CONFIG_DEBUG_KERNEL_DC)
4425
4426 static ssize_t s3_debug_store(struct device *device,
4427                               struct device_attribute *attr,
4428                               const char *buf,
4429                               size_t count)
4430 {
4431         int ret;
4432         int s3_state;
4433         struct drm_device *drm_dev = dev_get_drvdata(device);
4434         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4435
4436         ret = kstrtoint(buf, 0, &s3_state);
4437
4438         if (ret == 0) {
4439                 if (s3_state) {
4440                         dm_resume(adev);
4441                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4442                 } else
4443                         dm_suspend(adev);
4444         }
4445
4446         return ret == 0 ? count : 0;
4447 }
4448
4449 DEVICE_ATTR_WO(s3_debug);
4450
4451 #endif
4452
4453 static int dm_early_init(void *handle)
4454 {
4455         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4456
4457         switch (adev->asic_type) {
4458 #if defined(CONFIG_DRM_AMD_DC_SI)
4459         case CHIP_TAHITI:
4460         case CHIP_PITCAIRN:
4461         case CHIP_VERDE:
4462                 adev->mode_info.num_crtc = 6;
4463                 adev->mode_info.num_hpd = 6;
4464                 adev->mode_info.num_dig = 6;
4465                 break;
4466         case CHIP_OLAND:
4467                 adev->mode_info.num_crtc = 2;
4468                 adev->mode_info.num_hpd = 2;
4469                 adev->mode_info.num_dig = 2;
4470                 break;
4471 #endif
4472         case CHIP_BONAIRE:
4473         case CHIP_HAWAII:
4474                 adev->mode_info.num_crtc = 6;
4475                 adev->mode_info.num_hpd = 6;
4476                 adev->mode_info.num_dig = 6;
4477                 break;
4478         case CHIP_KAVERI:
4479                 adev->mode_info.num_crtc = 4;
4480                 adev->mode_info.num_hpd = 6;
4481                 adev->mode_info.num_dig = 7;
4482                 break;
4483         case CHIP_KABINI:
4484         case CHIP_MULLINS:
4485                 adev->mode_info.num_crtc = 2;
4486                 adev->mode_info.num_hpd = 6;
4487                 adev->mode_info.num_dig = 6;
4488                 break;
4489         case CHIP_FIJI:
4490         case CHIP_TONGA:
4491                 adev->mode_info.num_crtc = 6;
4492                 adev->mode_info.num_hpd = 6;
4493                 adev->mode_info.num_dig = 7;
4494                 break;
4495         case CHIP_CARRIZO:
4496                 adev->mode_info.num_crtc = 3;
4497                 adev->mode_info.num_hpd = 6;
4498                 adev->mode_info.num_dig = 9;
4499                 break;
4500         case CHIP_STONEY:
4501                 adev->mode_info.num_crtc = 2;
4502                 adev->mode_info.num_hpd = 6;
4503                 adev->mode_info.num_dig = 9;
4504                 break;
4505         case CHIP_POLARIS11:
4506         case CHIP_POLARIS12:
4507                 adev->mode_info.num_crtc = 5;
4508                 adev->mode_info.num_hpd = 5;
4509                 adev->mode_info.num_dig = 5;
4510                 break;
4511         case CHIP_POLARIS10:
4512         case CHIP_VEGAM:
4513                 adev->mode_info.num_crtc = 6;
4514                 adev->mode_info.num_hpd = 6;
4515                 adev->mode_info.num_dig = 6;
4516                 break;
4517         case CHIP_VEGA10:
4518         case CHIP_VEGA12:
4519         case CHIP_VEGA20:
4520                 adev->mode_info.num_crtc = 6;
4521                 adev->mode_info.num_hpd = 6;
4522                 adev->mode_info.num_dig = 6;
4523                 break;
4524         default:
4525 #if defined(CONFIG_DRM_AMD_DC_DCN)
4526                 switch (adev->ip_versions[DCE_HWIP][0]) {
4527                 case IP_VERSION(2, 0, 2):
4528                 case IP_VERSION(3, 0, 0):
4529                         adev->mode_info.num_crtc = 6;
4530                         adev->mode_info.num_hpd = 6;
4531                         adev->mode_info.num_dig = 6;
4532                         break;
4533                 case IP_VERSION(2, 0, 0):
4534                 case IP_VERSION(3, 0, 2):
4535                         adev->mode_info.num_crtc = 5;
4536                         adev->mode_info.num_hpd = 5;
4537                         adev->mode_info.num_dig = 5;
4538                         break;
4539                 case IP_VERSION(2, 0, 3):
4540                 case IP_VERSION(3, 0, 3):
4541                         adev->mode_info.num_crtc = 2;
4542                         adev->mode_info.num_hpd = 2;
4543                         adev->mode_info.num_dig = 2;
4544                         break;
4545                 case IP_VERSION(1, 0, 0):
4546                 case IP_VERSION(1, 0, 1):
4547                 case IP_VERSION(3, 0, 1):
4548                 case IP_VERSION(2, 1, 0):
4549                 case IP_VERSION(3, 1, 2):
4550                 case IP_VERSION(3, 1, 3):
4551                         adev->mode_info.num_crtc = 4;
4552                         adev->mode_info.num_hpd = 4;
4553                         adev->mode_info.num_dig = 4;
4554                         break;
4555                 default:
4556                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4557                                         adev->ip_versions[DCE_HWIP][0]);
4558                         return -EINVAL;
4559                 }
4560 #endif
4561                 break;
4562         }
4563
4564         amdgpu_dm_set_irq_funcs(adev);
4565
4566         if (adev->mode_info.funcs == NULL)
4567                 adev->mode_info.funcs = &dm_display_funcs;
4568
4569         /*
4570          * Note: Do NOT change adev->audio_endpt_rreg and
4571          * adev->audio_endpt_wreg because they are initialised in
4572          * amdgpu_device_init()
4573          */
4574 #if defined(CONFIG_DEBUG_KERNEL_DC)
4575         device_create_file(
4576                 adev_to_drm(adev)->dev,
4577                 &dev_attr_s3_debug);
4578 #endif
4579
4580         return 0;
4581 }
4582
4583 static bool modeset_required(struct drm_crtc_state *crtc_state,
4584                              struct dc_stream_state *new_stream,
4585                              struct dc_stream_state *old_stream)
4586 {
4587         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4588 }
4589
4590 static bool modereset_required(struct drm_crtc_state *crtc_state)
4591 {
4592         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4593 }
4594
4595 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4596 {
4597         drm_encoder_cleanup(encoder);
4598         kfree(encoder);
4599 }
4600
4601 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4602         .destroy = amdgpu_dm_encoder_destroy,
4603 };
4604
4605
4606 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4607                                          struct drm_framebuffer *fb,
4608                                          int *min_downscale, int *max_upscale)
4609 {
4610         struct amdgpu_device *adev = drm_to_adev(dev);
4611         struct dc *dc = adev->dm.dc;
4612         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4613         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4614
4615         switch (fb->format->format) {
4616         case DRM_FORMAT_P010:
4617         case DRM_FORMAT_NV12:
4618         case DRM_FORMAT_NV21:
4619                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4620                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4621                 break;
4622
4623         case DRM_FORMAT_XRGB16161616F:
4624         case DRM_FORMAT_ARGB16161616F:
4625         case DRM_FORMAT_XBGR16161616F:
4626         case DRM_FORMAT_ABGR16161616F:
4627                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4628                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4629                 break;
4630
4631         default:
4632                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4633                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4634                 break;
4635         }
4636
4637         /*
4638          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4639          * scaling factor of 1.0 == 1000 units.
4640          */
4641         if (*max_upscale == 1)
4642                 *max_upscale = 1000;
4643
4644         if (*min_downscale == 1)
4645                 *min_downscale = 1000;
4646 }
4647
4648
4649 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4650                                 const struct drm_plane_state *state,
4651                                 struct dc_scaling_info *scaling_info)
4652 {
4653         int scale_w, scale_h, min_downscale, max_upscale;
4654
4655         memset(scaling_info, 0, sizeof(*scaling_info));
4656
4657         /* Source is fixed 16.16 but we ignore mantissa for now... */
4658         scaling_info->src_rect.x = state->src_x >> 16;
4659         scaling_info->src_rect.y = state->src_y >> 16;
4660
4661         /*
4662          * For reasons we don't (yet) fully understand a non-zero
4663          * src_y coordinate into an NV12 buffer can cause a
4664          * system hang on DCN1x.
4665          * To avoid hangs (and maybe be overly cautious)
4666          * let's reject both non-zero src_x and src_y.
4667          *
4668          * We currently know of only one use-case to reproduce a
4669          * scenario with non-zero src_x and src_y for NV12, which
4670          * is to gesture the YouTube Android app into full screen
4671          * on ChromeOS.
4672          */
4673         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4674             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4675             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4676             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4677                 return -EINVAL;
4678
4679         scaling_info->src_rect.width = state->src_w >> 16;
4680         if (scaling_info->src_rect.width == 0)
4681                 return -EINVAL;
4682
4683         scaling_info->src_rect.height = state->src_h >> 16;
4684         if (scaling_info->src_rect.height == 0)
4685                 return -EINVAL;
4686
4687         scaling_info->dst_rect.x = state->crtc_x;
4688         scaling_info->dst_rect.y = state->crtc_y;
4689
4690         if (state->crtc_w == 0)
4691                 return -EINVAL;
4692
4693         scaling_info->dst_rect.width = state->crtc_w;
4694
4695         if (state->crtc_h == 0)
4696                 return -EINVAL;
4697
4698         scaling_info->dst_rect.height = state->crtc_h;
4699
4700         /* DRM doesn't specify clipping on destination output. */
4701         scaling_info->clip_rect = scaling_info->dst_rect;
4702
4703         /* Validate scaling per-format with DC plane caps */
4704         if (state->plane && state->plane->dev && state->fb) {
4705                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4706                                              &min_downscale, &max_upscale);
4707         } else {
4708                 min_downscale = 250;
4709                 max_upscale = 16000;
4710         }
4711
4712         scale_w = scaling_info->dst_rect.width * 1000 /
4713                   scaling_info->src_rect.width;
4714
4715         if (scale_w < min_downscale || scale_w > max_upscale)
4716                 return -EINVAL;
4717
4718         scale_h = scaling_info->dst_rect.height * 1000 /
4719                   scaling_info->src_rect.height;
4720
4721         if (scale_h < min_downscale || scale_h > max_upscale)
4722                 return -EINVAL;
4723
4724         /*
4725          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4726          * assume reasonable defaults based on the format.
4727          */
4728
4729         return 0;
4730 }
4731
4732 static void
4733 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4734                                  uint64_t tiling_flags)
4735 {
4736         /* Fill GFX8 params */
4737         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4738                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4739
4740                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4741                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4742                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4743                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4744                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4745
4746                 /* XXX fix me for VI */
4747                 tiling_info->gfx8.num_banks = num_banks;
4748                 tiling_info->gfx8.array_mode =
4749                                 DC_ARRAY_2D_TILED_THIN1;
4750                 tiling_info->gfx8.tile_split = tile_split;
4751                 tiling_info->gfx8.bank_width = bankw;
4752                 tiling_info->gfx8.bank_height = bankh;
4753                 tiling_info->gfx8.tile_aspect = mtaspect;
4754                 tiling_info->gfx8.tile_mode =
4755                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4756         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4757                         == DC_ARRAY_1D_TILED_THIN1) {
4758                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4759         }
4760
4761         tiling_info->gfx8.pipe_config =
4762                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4763 }
4764
4765 static void
4766 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4767                                   union dc_tiling_info *tiling_info)
4768 {
4769         tiling_info->gfx9.num_pipes =
4770                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4771         tiling_info->gfx9.num_banks =
4772                 adev->gfx.config.gb_addr_config_fields.num_banks;
4773         tiling_info->gfx9.pipe_interleave =
4774                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4775         tiling_info->gfx9.num_shader_engines =
4776                 adev->gfx.config.gb_addr_config_fields.num_se;
4777         tiling_info->gfx9.max_compressed_frags =
4778                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4779         tiling_info->gfx9.num_rb_per_se =
4780                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4781         tiling_info->gfx9.shaderEnable = 1;
4782         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4783                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4784 }
4785
4786 static int
4787 validate_dcc(struct amdgpu_device *adev,
4788              const enum surface_pixel_format format,
4789              const enum dc_rotation_angle rotation,
4790              const union dc_tiling_info *tiling_info,
4791              const struct dc_plane_dcc_param *dcc,
4792              const struct dc_plane_address *address,
4793              const struct plane_size *plane_size)
4794 {
4795         struct dc *dc = adev->dm.dc;
4796         struct dc_dcc_surface_param input;
4797         struct dc_surface_dcc_cap output;
4798
4799         memset(&input, 0, sizeof(input));
4800         memset(&output, 0, sizeof(output));
4801
4802         if (!dcc->enable)
4803                 return 0;
4804
4805         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4806             !dc->cap_funcs.get_dcc_compression_cap)
4807                 return -EINVAL;
4808
4809         input.format = format;
4810         input.surface_size.width = plane_size->surface_size.width;
4811         input.surface_size.height = plane_size->surface_size.height;
4812         input.swizzle_mode = tiling_info->gfx9.swizzle;
4813
4814         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4815                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4816         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4817                 input.scan = SCAN_DIRECTION_VERTICAL;
4818
4819         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4820                 return -EINVAL;
4821
4822         if (!output.capable)
4823                 return -EINVAL;
4824
4825         if (dcc->independent_64b_blks == 0 &&
4826             output.grph.rgb.independent_64b_blks != 0)
4827                 return -EINVAL;
4828
4829         return 0;
4830 }
4831
4832 static bool
4833 modifier_has_dcc(uint64_t modifier)
4834 {
4835         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4836 }
4837
4838 static unsigned
4839 modifier_gfx9_swizzle_mode(uint64_t modifier)
4840 {
4841         if (modifier == DRM_FORMAT_MOD_LINEAR)
4842                 return 0;
4843
4844         return AMD_FMT_MOD_GET(TILE, modifier);
4845 }
4846
4847 static const struct drm_format_info *
4848 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4849 {
4850         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4851 }
4852
4853 static void
4854 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4855                                     union dc_tiling_info *tiling_info,
4856                                     uint64_t modifier)
4857 {
4858         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4859         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4860         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4861         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4862
4863         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4864
4865         if (!IS_AMD_FMT_MOD(modifier))
4866                 return;
4867
4868         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4869         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4870
4871         if (adev->family >= AMDGPU_FAMILY_NV) {
4872                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4873         } else {
4874                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4875
4876                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4877         }
4878 }
4879
4880 enum dm_micro_swizzle {
4881         MICRO_SWIZZLE_Z = 0,
4882         MICRO_SWIZZLE_S = 1,
4883         MICRO_SWIZZLE_D = 2,
4884         MICRO_SWIZZLE_R = 3
4885 };
4886
4887 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4888                                           uint32_t format,
4889                                           uint64_t modifier)
4890 {
4891         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4892         const struct drm_format_info *info = drm_format_info(format);
4893         int i;
4894
4895         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4896
4897         if (!info)
4898                 return false;
4899
4900         /*
4901          * We always have to allow these modifiers:
4902          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4903          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4904          */
4905         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4906             modifier == DRM_FORMAT_MOD_INVALID) {
4907                 return true;
4908         }
4909
4910         /* Check that the modifier is on the list of the plane's supported modifiers. */
4911         for (i = 0; i < plane->modifier_count; i++) {
4912                 if (modifier == plane->modifiers[i])
4913                         break;
4914         }
4915         if (i == plane->modifier_count)
4916                 return false;
4917
4918         /*
4919          * For D swizzle the canonical modifier depends on the bpp, so check
4920          * it here.
4921          */
4922         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4923             adev->family >= AMDGPU_FAMILY_NV) {
4924                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4925                         return false;
4926         }
4927
4928         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4929             info->cpp[0] < 8)
4930                 return false;
4931
4932         if (modifier_has_dcc(modifier)) {
4933                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4934                 if (info->cpp[0] != 4)
4935                         return false;
4936                 /* We support multi-planar formats, but not when combined with
4937                  * additional DCC metadata planes. */
4938                 if (info->num_planes > 1)
4939                         return false;
4940         }
4941
4942         return true;
4943 }
4944
4945 static void
4946 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4947 {
4948         if (!*mods)
4949                 return;
4950
4951         if (*cap - *size < 1) {
4952                 uint64_t new_cap = *cap * 2;
4953                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4954
4955                 if (!new_mods) {
4956                         kfree(*mods);
4957                         *mods = NULL;
4958                         return;
4959                 }
4960
4961                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4962                 kfree(*mods);
4963                 *mods = new_mods;
4964                 *cap = new_cap;
4965         }
4966
4967         (*mods)[*size] = mod;
4968         *size += 1;
4969 }
4970
4971 static void
4972 add_gfx9_modifiers(const struct amdgpu_device *adev,
4973                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4974 {
4975         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4976         int pipe_xor_bits = min(8, pipes +
4977                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4978         int bank_xor_bits = min(8 - pipe_xor_bits,
4979                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4980         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4981                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4982
4983
4984         if (adev->family == AMDGPU_FAMILY_RV) {
4985                 /* Raven2 and later */
4986                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4987
4988                 /*
4989                  * No _D DCC swizzles yet because we only allow 32bpp, which
4990                  * doesn't support _D on DCN
4991                  */
4992
4993                 if (has_constant_encode) {
4994                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4995                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4996                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4997                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4998                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4999                                     AMD_FMT_MOD_SET(DCC, 1) |
5000                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5001                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5002                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5003                 }
5004
5005                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5006                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5007                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5008                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5009                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5010                             AMD_FMT_MOD_SET(DCC, 1) |
5011                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5012                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5013                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5014
5015                 if (has_constant_encode) {
5016                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5017                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5018                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5019                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5020                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5021                                     AMD_FMT_MOD_SET(DCC, 1) |
5022                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5023                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5024                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5025
5026                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5027                                     AMD_FMT_MOD_SET(RB, rb) |
5028                                     AMD_FMT_MOD_SET(PIPE, pipes));
5029                 }
5030
5031                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5032                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5033                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5034                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5035                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5036                             AMD_FMT_MOD_SET(DCC, 1) |
5037                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5038                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5039                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5040                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5041                             AMD_FMT_MOD_SET(RB, rb) |
5042                             AMD_FMT_MOD_SET(PIPE, pipes));
5043         }
5044
5045         /*
5046          * Only supported for 64bpp on Raven, will be filtered on format in
5047          * dm_plane_format_mod_supported.
5048          */
5049         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5050                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5051                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5052                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5053                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5054
5055         if (adev->family == AMDGPU_FAMILY_RV) {
5056                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5057                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5058                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5059                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5060                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5061         }
5062
5063         /*
5064          * Only supported for 64bpp on Raven, will be filtered on format in
5065          * dm_plane_format_mod_supported.
5066          */
5067         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5068                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5069                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5070
5071         if (adev->family == AMDGPU_FAMILY_RV) {
5072                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5074                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5075         }
5076 }
5077
5078 static void
5079 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5080                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5081 {
5082         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5083
5084         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5085                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5086                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5087                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5088                     AMD_FMT_MOD_SET(DCC, 1) |
5089                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5090                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5091                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5092
5093         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5094                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5095                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5096                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5097                     AMD_FMT_MOD_SET(DCC, 1) |
5098                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5099                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5100                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5101                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5102
5103         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5104                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5105                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5106                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5107
5108         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5110                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5111                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5112
5113
5114         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5115         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5116                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5117                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5118
5119         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5121                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5122 }
5123
5124 static void
5125 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5126                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5127 {
5128         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5129         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5130
5131         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5132                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5133                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5134                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5135                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5136                     AMD_FMT_MOD_SET(DCC, 1) |
5137                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5138                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5139                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5140                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5141
5142         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5143                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5144                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5145                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5146                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5147                     AMD_FMT_MOD_SET(DCC, 1) |
5148                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5149                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5150                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5151
5152         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5154                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5155                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5156                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5157                     AMD_FMT_MOD_SET(DCC, 1) |
5158                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5159                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5160                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5161                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5162                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5163
5164         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5165                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5166                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5167                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5168                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5169                     AMD_FMT_MOD_SET(DCC, 1) |
5170                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5171                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5172                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5173                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5174
5175         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5176                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5177                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5178                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5179                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5180
5181         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5183                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5184                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5185                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5186
5187         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5188         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5190                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5191
5192         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5193                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5194                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5195 }
5196
5197 static int
5198 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5199 {
5200         uint64_t size = 0, capacity = 128;
5201         *mods = NULL;
5202
5203         /* We have not hooked up any pre-GFX9 modifiers. */
5204         if (adev->family < AMDGPU_FAMILY_AI)
5205                 return 0;
5206
5207         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5208
5209         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5210                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5211                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5212                 return *mods ? 0 : -ENOMEM;
5213         }
5214
5215         switch (adev->family) {
5216         case AMDGPU_FAMILY_AI:
5217         case AMDGPU_FAMILY_RV:
5218                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5219                 break;
5220         case AMDGPU_FAMILY_NV:
5221         case AMDGPU_FAMILY_VGH:
5222         case AMDGPU_FAMILY_YC:
5223                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5224                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5225                 else
5226                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5227                 break;
5228         }
5229
5230         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5231
5232         /* INVALID marks the end of the list. */
5233         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5234
5235         if (!*mods)
5236                 return -ENOMEM;
5237
5238         return 0;
5239 }
5240
5241 static int
5242 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5243                                           const struct amdgpu_framebuffer *afb,
5244                                           const enum surface_pixel_format format,
5245                                           const enum dc_rotation_angle rotation,
5246                                           const struct plane_size *plane_size,
5247                                           union dc_tiling_info *tiling_info,
5248                                           struct dc_plane_dcc_param *dcc,
5249                                           struct dc_plane_address *address,
5250                                           const bool force_disable_dcc)
5251 {
5252         const uint64_t modifier = afb->base.modifier;
5253         int ret = 0;
5254
5255         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5256         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5257
5258         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5259                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5260                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5261                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5262
5263                 dcc->enable = 1;
5264                 dcc->meta_pitch = afb->base.pitches[1];
5265                 dcc->independent_64b_blks = independent_64b_blks;
5266                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5267                         if (independent_64b_blks && independent_128b_blks)
5268                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5269                         else if (independent_128b_blks)
5270                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5271                         else if (independent_64b_blks && !independent_128b_blks)
5272                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5273                         else
5274                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5275                 } else {
5276                         if (independent_64b_blks)
5277                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5278                         else
5279                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5280                 }
5281
5282                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5283                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5284         }
5285
5286         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5287         if (ret)
5288                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5289
5290         return ret;
5291 }
5292
5293 static int
5294 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5295                              const struct amdgpu_framebuffer *afb,
5296                              const enum surface_pixel_format format,
5297                              const enum dc_rotation_angle rotation,
5298                              const uint64_t tiling_flags,
5299                              union dc_tiling_info *tiling_info,
5300                              struct plane_size *plane_size,
5301                              struct dc_plane_dcc_param *dcc,
5302                              struct dc_plane_address *address,
5303                              bool tmz_surface,
5304                              bool force_disable_dcc)
5305 {
5306         const struct drm_framebuffer *fb = &afb->base;
5307         int ret;
5308
5309         memset(tiling_info, 0, sizeof(*tiling_info));
5310         memset(plane_size, 0, sizeof(*plane_size));
5311         memset(dcc, 0, sizeof(*dcc));
5312         memset(address, 0, sizeof(*address));
5313
5314         address->tmz_surface = tmz_surface;
5315
5316         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5317                 uint64_t addr = afb->address + fb->offsets[0];
5318
5319                 plane_size->surface_size.x = 0;
5320                 plane_size->surface_size.y = 0;
5321                 plane_size->surface_size.width = fb->width;
5322                 plane_size->surface_size.height = fb->height;
5323                 plane_size->surface_pitch =
5324                         fb->pitches[0] / fb->format->cpp[0];
5325
5326                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5327                 address->grph.addr.low_part = lower_32_bits(addr);
5328                 address->grph.addr.high_part = upper_32_bits(addr);
5329         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5330                 uint64_t luma_addr = afb->address + fb->offsets[0];
5331                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5332
5333                 plane_size->surface_size.x = 0;
5334                 plane_size->surface_size.y = 0;
5335                 plane_size->surface_size.width = fb->width;
5336                 plane_size->surface_size.height = fb->height;
5337                 plane_size->surface_pitch =
5338                         fb->pitches[0] / fb->format->cpp[0];
5339
5340                 plane_size->chroma_size.x = 0;
5341                 plane_size->chroma_size.y = 0;
5342                 /* TODO: set these based on surface format */
5343                 plane_size->chroma_size.width = fb->width / 2;
5344                 plane_size->chroma_size.height = fb->height / 2;
5345
5346                 plane_size->chroma_pitch =
5347                         fb->pitches[1] / fb->format->cpp[1];
5348
5349                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5350                 address->video_progressive.luma_addr.low_part =
5351                         lower_32_bits(luma_addr);
5352                 address->video_progressive.luma_addr.high_part =
5353                         upper_32_bits(luma_addr);
5354                 address->video_progressive.chroma_addr.low_part =
5355                         lower_32_bits(chroma_addr);
5356                 address->video_progressive.chroma_addr.high_part =
5357                         upper_32_bits(chroma_addr);
5358         }
5359
5360         if (adev->family >= AMDGPU_FAMILY_AI) {
5361                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5362                                                                 rotation, plane_size,
5363                                                                 tiling_info, dcc,
5364                                                                 address,
5365                                                                 force_disable_dcc);
5366                 if (ret)
5367                         return ret;
5368         } else {
5369                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5370         }
5371
5372         return 0;
5373 }
5374
5375 static void
5376 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5377                                bool *per_pixel_alpha, bool *global_alpha,
5378                                int *global_alpha_value)
5379 {
5380         *per_pixel_alpha = false;
5381         *global_alpha = false;
5382         *global_alpha_value = 0xff;
5383
5384         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5385                 return;
5386
5387         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5388                 static const uint32_t alpha_formats[] = {
5389                         DRM_FORMAT_ARGB8888,
5390                         DRM_FORMAT_RGBA8888,
5391                         DRM_FORMAT_ABGR8888,
5392                 };
5393                 uint32_t format = plane_state->fb->format->format;
5394                 unsigned int i;
5395
5396                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5397                         if (format == alpha_formats[i]) {
5398                                 *per_pixel_alpha = true;
5399                                 break;
5400                         }
5401                 }
5402         }
5403
5404         if (plane_state->alpha < 0xffff) {
5405                 *global_alpha = true;
5406                 *global_alpha_value = plane_state->alpha >> 8;
5407         }
5408 }
5409
5410 static int
5411 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5412                             const enum surface_pixel_format format,
5413                             enum dc_color_space *color_space)
5414 {
5415         bool full_range;
5416
5417         *color_space = COLOR_SPACE_SRGB;
5418
5419         /* DRM color properties only affect non-RGB formats. */
5420         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5421                 return 0;
5422
5423         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5424
5425         switch (plane_state->color_encoding) {
5426         case DRM_COLOR_YCBCR_BT601:
5427                 if (full_range)
5428                         *color_space = COLOR_SPACE_YCBCR601;
5429                 else
5430                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5431                 break;
5432
5433         case DRM_COLOR_YCBCR_BT709:
5434                 if (full_range)
5435                         *color_space = COLOR_SPACE_YCBCR709;
5436                 else
5437                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5438                 break;
5439
5440         case DRM_COLOR_YCBCR_BT2020:
5441                 if (full_range)
5442                         *color_space = COLOR_SPACE_2020_YCBCR;
5443                 else
5444                         return -EINVAL;
5445                 break;
5446
5447         default:
5448                 return -EINVAL;
5449         }
5450
5451         return 0;
5452 }
5453
5454 static int
5455 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5456                             const struct drm_plane_state *plane_state,
5457                             const uint64_t tiling_flags,
5458                             struct dc_plane_info *plane_info,
5459                             struct dc_plane_address *address,
5460                             bool tmz_surface,
5461                             bool force_disable_dcc)
5462 {
5463         const struct drm_framebuffer *fb = plane_state->fb;
5464         const struct amdgpu_framebuffer *afb =
5465                 to_amdgpu_framebuffer(plane_state->fb);
5466         int ret;
5467
5468         memset(plane_info, 0, sizeof(*plane_info));
5469
5470         switch (fb->format->format) {
5471         case DRM_FORMAT_C8:
5472                 plane_info->format =
5473                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5474                 break;
5475         case DRM_FORMAT_RGB565:
5476                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5477                 break;
5478         case DRM_FORMAT_XRGB8888:
5479         case DRM_FORMAT_ARGB8888:
5480                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5481                 break;
5482         case DRM_FORMAT_XRGB2101010:
5483         case DRM_FORMAT_ARGB2101010:
5484                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5485                 break;
5486         case DRM_FORMAT_XBGR2101010:
5487         case DRM_FORMAT_ABGR2101010:
5488                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5489                 break;
5490         case DRM_FORMAT_XBGR8888:
5491         case DRM_FORMAT_ABGR8888:
5492                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5493                 break;
5494         case DRM_FORMAT_NV21:
5495                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5496                 break;
5497         case DRM_FORMAT_NV12:
5498                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5499                 break;
5500         case DRM_FORMAT_P010:
5501                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5502                 break;
5503         case DRM_FORMAT_XRGB16161616F:
5504         case DRM_FORMAT_ARGB16161616F:
5505                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5506                 break;
5507         case DRM_FORMAT_XBGR16161616F:
5508         case DRM_FORMAT_ABGR16161616F:
5509                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5510                 break;
5511         case DRM_FORMAT_XRGB16161616:
5512         case DRM_FORMAT_ARGB16161616:
5513                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5514                 break;
5515         case DRM_FORMAT_XBGR16161616:
5516         case DRM_FORMAT_ABGR16161616:
5517                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5518                 break;
5519         default:
5520                 DRM_ERROR(
5521                         "Unsupported screen format %p4cc\n",
5522                         &fb->format->format);
5523                 return -EINVAL;
5524         }
5525
5526         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5527         case DRM_MODE_ROTATE_0:
5528                 plane_info->rotation = ROTATION_ANGLE_0;
5529                 break;
5530         case DRM_MODE_ROTATE_90:
5531                 plane_info->rotation = ROTATION_ANGLE_90;
5532                 break;
5533         case DRM_MODE_ROTATE_180:
5534                 plane_info->rotation = ROTATION_ANGLE_180;
5535                 break;
5536         case DRM_MODE_ROTATE_270:
5537                 plane_info->rotation = ROTATION_ANGLE_270;
5538                 break;
5539         default:
5540                 plane_info->rotation = ROTATION_ANGLE_0;
5541                 break;
5542         }
5543
5544         plane_info->visible = true;
5545         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5546
5547         plane_info->layer_index = 0;
5548
5549         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5550                                           &plane_info->color_space);
5551         if (ret)
5552                 return ret;
5553
5554         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5555                                            plane_info->rotation, tiling_flags,
5556                                            &plane_info->tiling_info,
5557                                            &plane_info->plane_size,
5558                                            &plane_info->dcc, address, tmz_surface,
5559                                            force_disable_dcc);
5560         if (ret)
5561                 return ret;
5562
5563         fill_blending_from_plane_state(
5564                 plane_state, &plane_info->per_pixel_alpha,
5565                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5566
5567         return 0;
5568 }
5569
5570 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5571                                     struct dc_plane_state *dc_plane_state,
5572                                     struct drm_plane_state *plane_state,
5573                                     struct drm_crtc_state *crtc_state)
5574 {
5575         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5576         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5577         struct dc_scaling_info scaling_info;
5578         struct dc_plane_info plane_info;
5579         int ret;
5580         bool force_disable_dcc = false;
5581
5582         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5583         if (ret)
5584                 return ret;
5585
5586         dc_plane_state->src_rect = scaling_info.src_rect;
5587         dc_plane_state->dst_rect = scaling_info.dst_rect;
5588         dc_plane_state->clip_rect = scaling_info.clip_rect;
5589         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5590
5591         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5592         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5593                                           afb->tiling_flags,
5594                                           &plane_info,
5595                                           &dc_plane_state->address,
5596                                           afb->tmz_surface,
5597                                           force_disable_dcc);
5598         if (ret)
5599                 return ret;
5600
5601         dc_plane_state->format = plane_info.format;
5602         dc_plane_state->color_space = plane_info.color_space;
5603         dc_plane_state->format = plane_info.format;
5604         dc_plane_state->plane_size = plane_info.plane_size;
5605         dc_plane_state->rotation = plane_info.rotation;
5606         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5607         dc_plane_state->stereo_format = plane_info.stereo_format;
5608         dc_plane_state->tiling_info = plane_info.tiling_info;
5609         dc_plane_state->visible = plane_info.visible;
5610         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5611         dc_plane_state->global_alpha = plane_info.global_alpha;
5612         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5613         dc_plane_state->dcc = plane_info.dcc;
5614         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5615         dc_plane_state->flip_int_enabled = true;
5616
5617         /*
5618          * Always set input transfer function, since plane state is refreshed
5619          * every time.
5620          */
5621         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5622         if (ret)
5623                 return ret;
5624
5625         return 0;
5626 }
5627
5628 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5629                                            const struct dm_connector_state *dm_state,
5630                                            struct dc_stream_state *stream)
5631 {
5632         enum amdgpu_rmx_type rmx_type;
5633
5634         struct rect src = { 0 }; /* viewport in composition space*/
5635         struct rect dst = { 0 }; /* stream addressable area */
5636
5637         /* no mode. nothing to be done */
5638         if (!mode)
5639                 return;
5640
5641         /* Full screen scaling by default */
5642         src.width = mode->hdisplay;
5643         src.height = mode->vdisplay;
5644         dst.width = stream->timing.h_addressable;
5645         dst.height = stream->timing.v_addressable;
5646
5647         if (dm_state) {
5648                 rmx_type = dm_state->scaling;
5649                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5650                         if (src.width * dst.height <
5651                                         src.height * dst.width) {
5652                                 /* height needs less upscaling/more downscaling */
5653                                 dst.width = src.width *
5654                                                 dst.height / src.height;
5655                         } else {
5656                                 /* width needs less upscaling/more downscaling */
5657                                 dst.height = src.height *
5658                                                 dst.width / src.width;
5659                         }
5660                 } else if (rmx_type == RMX_CENTER) {
5661                         dst = src;
5662                 }
5663
5664                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5665                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5666
5667                 if (dm_state->underscan_enable) {
5668                         dst.x += dm_state->underscan_hborder / 2;
5669                         dst.y += dm_state->underscan_vborder / 2;
5670                         dst.width -= dm_state->underscan_hborder;
5671                         dst.height -= dm_state->underscan_vborder;
5672                 }
5673         }
5674
5675         stream->src = src;
5676         stream->dst = dst;
5677
5678         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5679                       dst.x, dst.y, dst.width, dst.height);
5680
5681 }
5682
5683 static enum dc_color_depth
5684 convert_color_depth_from_display_info(const struct drm_connector *connector,
5685                                       bool is_y420, int requested_bpc)
5686 {
5687         uint8_t bpc;
5688
5689         if (is_y420) {
5690                 bpc = 8;
5691
5692                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5693                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5694                         bpc = 16;
5695                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5696                         bpc = 12;
5697                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5698                         bpc = 10;
5699         } else {
5700                 bpc = (uint8_t)connector->display_info.bpc;
5701                 /* Assume 8 bpc by default if no bpc is specified. */
5702                 bpc = bpc ? bpc : 8;
5703         }
5704
5705         if (requested_bpc > 0) {
5706                 /*
5707                  * Cap display bpc based on the user requested value.
5708                  *
5709                  * The value for state->max_bpc may not correctly updated
5710                  * depending on when the connector gets added to the state
5711                  * or if this was called outside of atomic check, so it
5712                  * can't be used directly.
5713                  */
5714                 bpc = min_t(u8, bpc, requested_bpc);
5715
5716                 /* Round down to the nearest even number. */
5717                 bpc = bpc - (bpc & 1);
5718         }
5719
5720         switch (bpc) {
5721         case 0:
5722                 /*
5723                  * Temporary Work around, DRM doesn't parse color depth for
5724                  * EDID revision before 1.4
5725                  * TODO: Fix edid parsing
5726                  */
5727                 return COLOR_DEPTH_888;
5728         case 6:
5729                 return COLOR_DEPTH_666;
5730         case 8:
5731                 return COLOR_DEPTH_888;
5732         case 10:
5733                 return COLOR_DEPTH_101010;
5734         case 12:
5735                 return COLOR_DEPTH_121212;
5736         case 14:
5737                 return COLOR_DEPTH_141414;
5738         case 16:
5739                 return COLOR_DEPTH_161616;
5740         default:
5741                 return COLOR_DEPTH_UNDEFINED;
5742         }
5743 }
5744
5745 static enum dc_aspect_ratio
5746 get_aspect_ratio(const struct drm_display_mode *mode_in)
5747 {
5748         /* 1-1 mapping, since both enums follow the HDMI spec. */
5749         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5750 }
5751
5752 static enum dc_color_space
5753 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5754 {
5755         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5756
5757         switch (dc_crtc_timing->pixel_encoding) {
5758         case PIXEL_ENCODING_YCBCR422:
5759         case PIXEL_ENCODING_YCBCR444:
5760         case PIXEL_ENCODING_YCBCR420:
5761         {
5762                 /*
5763                  * 27030khz is the separation point between HDTV and SDTV
5764                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5765                  * respectively
5766                  */
5767                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5768                         if (dc_crtc_timing->flags.Y_ONLY)
5769                                 color_space =
5770                                         COLOR_SPACE_YCBCR709_LIMITED;
5771                         else
5772                                 color_space = COLOR_SPACE_YCBCR709;
5773                 } else {
5774                         if (dc_crtc_timing->flags.Y_ONLY)
5775                                 color_space =
5776                                         COLOR_SPACE_YCBCR601_LIMITED;
5777                         else
5778                                 color_space = COLOR_SPACE_YCBCR601;
5779                 }
5780
5781         }
5782         break;
5783         case PIXEL_ENCODING_RGB:
5784                 color_space = COLOR_SPACE_SRGB;
5785                 break;
5786
5787         default:
5788                 WARN_ON(1);
5789                 break;
5790         }
5791
5792         return color_space;
5793 }
5794
5795 static bool adjust_colour_depth_from_display_info(
5796         struct dc_crtc_timing *timing_out,
5797         const struct drm_display_info *info)
5798 {
5799         enum dc_color_depth depth = timing_out->display_color_depth;
5800         int normalized_clk;
5801         do {
5802                 normalized_clk = timing_out->pix_clk_100hz / 10;
5803                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5804                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5805                         normalized_clk /= 2;
5806                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5807                 switch (depth) {
5808                 case COLOR_DEPTH_888:
5809                         break;
5810                 case COLOR_DEPTH_101010:
5811                         normalized_clk = (normalized_clk * 30) / 24;
5812                         break;
5813                 case COLOR_DEPTH_121212:
5814                         normalized_clk = (normalized_clk * 36) / 24;
5815                         break;
5816                 case COLOR_DEPTH_161616:
5817                         normalized_clk = (normalized_clk * 48) / 24;
5818                         break;
5819                 default:
5820                         /* The above depths are the only ones valid for HDMI. */
5821                         return false;
5822                 }
5823                 if (normalized_clk <= info->max_tmds_clock) {
5824                         timing_out->display_color_depth = depth;
5825                         return true;
5826                 }
5827         } while (--depth > COLOR_DEPTH_666);
5828         return false;
5829 }
5830
5831 static void fill_stream_properties_from_drm_display_mode(
5832         struct dc_stream_state *stream,
5833         const struct drm_display_mode *mode_in,
5834         const struct drm_connector *connector,
5835         const struct drm_connector_state *connector_state,
5836         const struct dc_stream_state *old_stream,
5837         int requested_bpc)
5838 {
5839         struct dc_crtc_timing *timing_out = &stream->timing;
5840         const struct drm_display_info *info = &connector->display_info;
5841         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5842         struct hdmi_vendor_infoframe hv_frame;
5843         struct hdmi_avi_infoframe avi_frame;
5844
5845         memset(&hv_frame, 0, sizeof(hv_frame));
5846         memset(&avi_frame, 0, sizeof(avi_frame));
5847
5848         timing_out->h_border_left = 0;
5849         timing_out->h_border_right = 0;
5850         timing_out->v_border_top = 0;
5851         timing_out->v_border_bottom = 0;
5852         /* TODO: un-hardcode */
5853         if (drm_mode_is_420_only(info, mode_in)
5854                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5855                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5856         else if (drm_mode_is_420_also(info, mode_in)
5857                         && aconnector->force_yuv420_output)
5858                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5859         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5860                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5861                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5862         else
5863                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5864
5865         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5866         timing_out->display_color_depth = convert_color_depth_from_display_info(
5867                 connector,
5868                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5869                 requested_bpc);
5870         timing_out->scan_type = SCANNING_TYPE_NODATA;
5871         timing_out->hdmi_vic = 0;
5872
5873         if(old_stream) {
5874                 timing_out->vic = old_stream->timing.vic;
5875                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5876                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5877         } else {
5878                 timing_out->vic = drm_match_cea_mode(mode_in);
5879                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5880                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5881                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5882                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5883         }
5884
5885         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5886                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5887                 timing_out->vic = avi_frame.video_code;
5888                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5889                 timing_out->hdmi_vic = hv_frame.vic;
5890         }
5891
5892         if (is_freesync_video_mode(mode_in, aconnector)) {
5893                 timing_out->h_addressable = mode_in->hdisplay;
5894                 timing_out->h_total = mode_in->htotal;
5895                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5896                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5897                 timing_out->v_total = mode_in->vtotal;
5898                 timing_out->v_addressable = mode_in->vdisplay;
5899                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5900                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5901                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5902         } else {
5903                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5904                 timing_out->h_total = mode_in->crtc_htotal;
5905                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5906                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5907                 timing_out->v_total = mode_in->crtc_vtotal;
5908                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5909                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5910                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5911                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5912         }
5913
5914         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5915
5916         stream->output_color_space = get_output_color_space(timing_out);
5917
5918         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5919         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5920         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5921                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5922                     drm_mode_is_420_also(info, mode_in) &&
5923                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5924                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5925                         adjust_colour_depth_from_display_info(timing_out, info);
5926                 }
5927         }
5928 }
5929
5930 static void fill_audio_info(struct audio_info *audio_info,
5931                             const struct drm_connector *drm_connector,
5932                             const struct dc_sink *dc_sink)
5933 {
5934         int i = 0;
5935         int cea_revision = 0;
5936         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5937
5938         audio_info->manufacture_id = edid_caps->manufacturer_id;
5939         audio_info->product_id = edid_caps->product_id;
5940
5941         cea_revision = drm_connector->display_info.cea_rev;
5942
5943         strscpy(audio_info->display_name,
5944                 edid_caps->display_name,
5945                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5946
5947         if (cea_revision >= 3) {
5948                 audio_info->mode_count = edid_caps->audio_mode_count;
5949
5950                 for (i = 0; i < audio_info->mode_count; ++i) {
5951                         audio_info->modes[i].format_code =
5952                                         (enum audio_format_code)
5953                                         (edid_caps->audio_modes[i].format_code);
5954                         audio_info->modes[i].channel_count =
5955                                         edid_caps->audio_modes[i].channel_count;
5956                         audio_info->modes[i].sample_rates.all =
5957                                         edid_caps->audio_modes[i].sample_rate;
5958                         audio_info->modes[i].sample_size =
5959                                         edid_caps->audio_modes[i].sample_size;
5960                 }
5961         }
5962
5963         audio_info->flags.all = edid_caps->speaker_flags;
5964
5965         /* TODO: We only check for the progressive mode, check for interlace mode too */
5966         if (drm_connector->latency_present[0]) {
5967                 audio_info->video_latency = drm_connector->video_latency[0];
5968                 audio_info->audio_latency = drm_connector->audio_latency[0];
5969         }
5970
5971         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5972
5973 }
5974
5975 static void
5976 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5977                                       struct drm_display_mode *dst_mode)
5978 {
5979         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5980         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5981         dst_mode->crtc_clock = src_mode->crtc_clock;
5982         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5983         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5984         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5985         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5986         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5987         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5988         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5989         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5990         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5991         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5992         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5993 }
5994
5995 static void
5996 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5997                                         const struct drm_display_mode *native_mode,
5998                                         bool scale_enabled)
5999 {
6000         if (scale_enabled) {
6001                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6002         } else if (native_mode->clock == drm_mode->clock &&
6003                         native_mode->htotal == drm_mode->htotal &&
6004                         native_mode->vtotal == drm_mode->vtotal) {
6005                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6006         } else {
6007                 /* no scaling nor amdgpu inserted, no need to patch */
6008         }
6009 }
6010
6011 static struct dc_sink *
6012 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6013 {
6014         struct dc_sink_init_data sink_init_data = { 0 };
6015         struct dc_sink *sink = NULL;
6016         sink_init_data.link = aconnector->dc_link;
6017         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6018
6019         sink = dc_sink_create(&sink_init_data);
6020         if (!sink) {
6021                 DRM_ERROR("Failed to create sink!\n");
6022                 return NULL;
6023         }
6024         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6025
6026         return sink;
6027 }
6028
6029 static void set_multisync_trigger_params(
6030                 struct dc_stream_state *stream)
6031 {
6032         struct dc_stream_state *master = NULL;
6033
6034         if (stream->triggered_crtc_reset.enabled) {
6035                 master = stream->triggered_crtc_reset.event_source;
6036                 stream->triggered_crtc_reset.event =
6037                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6038                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6039                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6040         }
6041 }
6042
6043 static void set_master_stream(struct dc_stream_state *stream_set[],
6044                               int stream_count)
6045 {
6046         int j, highest_rfr = 0, master_stream = 0;
6047
6048         for (j = 0;  j < stream_count; j++) {
6049                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6050                         int refresh_rate = 0;
6051
6052                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6053                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6054                         if (refresh_rate > highest_rfr) {
6055                                 highest_rfr = refresh_rate;
6056                                 master_stream = j;
6057                         }
6058                 }
6059         }
6060         for (j = 0;  j < stream_count; j++) {
6061                 if (stream_set[j])
6062                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6063         }
6064 }
6065
6066 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6067 {
6068         int i = 0;
6069         struct dc_stream_state *stream;
6070
6071         if (context->stream_count < 2)
6072                 return;
6073         for (i = 0; i < context->stream_count ; i++) {
6074                 if (!context->streams[i])
6075                         continue;
6076                 /*
6077                  * TODO: add a function to read AMD VSDB bits and set
6078                  * crtc_sync_master.multi_sync_enabled flag
6079                  * For now it's set to false
6080                  */
6081         }
6082
6083         set_master_stream(context->streams, context->stream_count);
6084
6085         for (i = 0; i < context->stream_count ; i++) {
6086                 stream = context->streams[i];
6087
6088                 if (!stream)
6089                         continue;
6090
6091                 set_multisync_trigger_params(stream);
6092         }
6093 }
6094
6095 #if defined(CONFIG_DRM_AMD_DC_DCN)
6096 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6097                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6098                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6099 {
6100         stream->timing.flags.DSC = 0;
6101         dsc_caps->is_dsc_supported = false;
6102
6103         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6104                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6105                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6106                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6107                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6108                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6109                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6110                                 dsc_caps);
6111         }
6112 }
6113
6114 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6115                                     struct dc_sink *sink, struct dc_stream_state *stream,
6116                                     struct dsc_dec_dpcd_caps *dsc_caps,
6117                                     uint32_t max_dsc_target_bpp_limit_override)
6118 {
6119         const struct dc_link_settings *verified_link_cap = NULL;
6120         uint32_t link_bw_in_kbps;
6121         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6122         struct dc *dc = sink->ctx->dc;
6123         struct dc_dsc_bw_range bw_range = {0};
6124         struct dc_dsc_config dsc_cfg = {0};
6125
6126         verified_link_cap = dc_link_get_link_cap(stream->link);
6127         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6128         edp_min_bpp_x16 = 8 * 16;
6129         edp_max_bpp_x16 = 8 * 16;
6130
6131         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6132                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6133
6134         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6135                 edp_min_bpp_x16 = edp_max_bpp_x16;
6136
6137         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6138                                 dc->debug.dsc_min_slice_height_override,
6139                                 edp_min_bpp_x16, edp_max_bpp_x16,
6140                                 dsc_caps,
6141                                 &stream->timing,
6142                                 &bw_range)) {
6143
6144                 if (bw_range.max_kbps < link_bw_in_kbps) {
6145                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6146                                         dsc_caps,
6147                                         dc->debug.dsc_min_slice_height_override,
6148                                         max_dsc_target_bpp_limit_override,
6149                                         0,
6150                                         &stream->timing,
6151                                         &dsc_cfg)) {
6152                                 stream->timing.dsc_cfg = dsc_cfg;
6153                                 stream->timing.flags.DSC = 1;
6154                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6155                         }
6156                         return;
6157                 }
6158         }
6159
6160         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6161                                 dsc_caps,
6162                                 dc->debug.dsc_min_slice_height_override,
6163                                 max_dsc_target_bpp_limit_override,
6164                                 link_bw_in_kbps,
6165                                 &stream->timing,
6166                                 &dsc_cfg)) {
6167                 stream->timing.dsc_cfg = dsc_cfg;
6168                 stream->timing.flags.DSC = 1;
6169         }
6170 }
6171
6172 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6173                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6174                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6175 {
6176         struct drm_connector *drm_connector = &aconnector->base;
6177         uint32_t link_bandwidth_kbps;
6178         uint32_t max_dsc_target_bpp_limit_override = 0;
6179         struct dc *dc = sink->ctx->dc;
6180         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6181         uint32_t dsc_max_supported_bw_in_kbps;
6182
6183         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6184                                                         dc_link_get_link_cap(aconnector->dc_link));
6185
6186         if (stream->link && stream->link->local_sink)
6187                 max_dsc_target_bpp_limit_override =
6188                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6189         
6190         /* Set DSC policy according to dsc_clock_en */
6191         dc_dsc_policy_set_enable_dsc_when_not_needed(
6192                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6193
6194         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6195             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6196
6197                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6198
6199         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6200                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6201                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6202                                                 dsc_caps,
6203                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6204                                                 max_dsc_target_bpp_limit_override,
6205                                                 link_bandwidth_kbps,
6206                                                 &stream->timing,
6207                                                 &stream->timing.dsc_cfg)) {
6208                                 stream->timing.flags.DSC = 1;
6209                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6210                                                                  __func__, drm_connector->name);
6211                         }
6212                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6213                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6214                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6215                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6216
6217                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6218                                         max_supported_bw_in_kbps > 0 &&
6219                                         dsc_max_supported_bw_in_kbps > 0)
6220                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6221                                                 dsc_caps,
6222                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6223                                                 max_dsc_target_bpp_limit_override,
6224                                                 dsc_max_supported_bw_in_kbps,
6225                                                 &stream->timing,
6226                                                 &stream->timing.dsc_cfg)) {
6227                                         stream->timing.flags.DSC = 1;
6228                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6229                                                                          __func__, drm_connector->name);
6230                                 }
6231                 }
6232         }
6233
6234         /* Overwrite the stream flag if DSC is enabled through debugfs */
6235         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6236                 stream->timing.flags.DSC = 1;
6237
6238         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6239                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6240
6241         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6242                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6243
6244         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6245                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6246 }
6247 #endif /* CONFIG_DRM_AMD_DC_DCN */
6248
6249 /**
6250  * DOC: FreeSync Video
6251  *
6252  * When a userspace application wants to play a video, the content follows a
6253  * standard format definition that usually specifies the FPS for that format.
6254  * The below list illustrates some video format and the expected FPS,
6255  * respectively:
6256  *
6257  * - TV/NTSC (23.976 FPS)
6258  * - Cinema (24 FPS)
6259  * - TV/PAL (25 FPS)
6260  * - TV/NTSC (29.97 FPS)
6261  * - TV/NTSC (30 FPS)
6262  * - Cinema HFR (48 FPS)
6263  * - TV/PAL (50 FPS)
6264  * - Commonly used (60 FPS)
6265  * - Multiples of 24 (48,72,96,120 FPS)
6266  *
6267  * The list of standards video format is not huge and can be added to the
6268  * connector modeset list beforehand. With that, userspace can leverage
6269  * FreeSync to extends the front porch in order to attain the target refresh
6270  * rate. Such a switch will happen seamlessly, without screen blanking or
6271  * reprogramming of the output in any other way. If the userspace requests a
6272  * modesetting change compatible with FreeSync modes that only differ in the
6273  * refresh rate, DC will skip the full update and avoid blink during the
6274  * transition. For example, the video player can change the modesetting from
6275  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6276  * causing any display blink. This same concept can be applied to a mode
6277  * setting change.
6278  */
6279 static struct drm_display_mode *
6280 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6281                           bool use_probed_modes)
6282 {
6283         struct drm_display_mode *m, *m_pref = NULL;
6284         u16 current_refresh, highest_refresh;
6285         struct list_head *list_head = use_probed_modes ?
6286                                                     &aconnector->base.probed_modes :
6287                                                     &aconnector->base.modes;
6288
6289         if (aconnector->freesync_vid_base.clock != 0)
6290                 return &aconnector->freesync_vid_base;
6291
6292         /* Find the preferred mode */
6293         list_for_each_entry (m, list_head, head) {
6294                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6295                         m_pref = m;
6296                         break;
6297                 }
6298         }
6299
6300         if (!m_pref) {
6301                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6302                 m_pref = list_first_entry_or_null(
6303                         &aconnector->base.modes, struct drm_display_mode, head);
6304                 if (!m_pref) {
6305                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6306                         return NULL;
6307                 }
6308         }
6309
6310         highest_refresh = drm_mode_vrefresh(m_pref);
6311
6312         /*
6313          * Find the mode with highest refresh rate with same resolution.
6314          * For some monitors, preferred mode is not the mode with highest
6315          * supported refresh rate.
6316          */
6317         list_for_each_entry (m, list_head, head) {
6318                 current_refresh  = drm_mode_vrefresh(m);
6319
6320                 if (m->hdisplay == m_pref->hdisplay &&
6321                     m->vdisplay == m_pref->vdisplay &&
6322                     highest_refresh < current_refresh) {
6323                         highest_refresh = current_refresh;
6324                         m_pref = m;
6325                 }
6326         }
6327
6328         aconnector->freesync_vid_base = *m_pref;
6329         return m_pref;
6330 }
6331
6332 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6333                                    struct amdgpu_dm_connector *aconnector)
6334 {
6335         struct drm_display_mode *high_mode;
6336         int timing_diff;
6337
6338         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6339         if (!high_mode || !mode)
6340                 return false;
6341
6342         timing_diff = high_mode->vtotal - mode->vtotal;
6343
6344         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6345             high_mode->hdisplay != mode->hdisplay ||
6346             high_mode->vdisplay != mode->vdisplay ||
6347             high_mode->hsync_start != mode->hsync_start ||
6348             high_mode->hsync_end != mode->hsync_end ||
6349             high_mode->htotal != mode->htotal ||
6350             high_mode->hskew != mode->hskew ||
6351             high_mode->vscan != mode->vscan ||
6352             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6353             high_mode->vsync_end - mode->vsync_end != timing_diff)
6354                 return false;
6355         else
6356                 return true;
6357 }
6358
6359 static struct dc_stream_state *
6360 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6361                        const struct drm_display_mode *drm_mode,
6362                        const struct dm_connector_state *dm_state,
6363                        const struct dc_stream_state *old_stream,
6364                        int requested_bpc)
6365 {
6366         struct drm_display_mode *preferred_mode = NULL;
6367         struct drm_connector *drm_connector;
6368         const struct drm_connector_state *con_state =
6369                 dm_state ? &dm_state->base : NULL;
6370         struct dc_stream_state *stream = NULL;
6371         struct drm_display_mode mode = *drm_mode;
6372         struct drm_display_mode saved_mode;
6373         struct drm_display_mode *freesync_mode = NULL;
6374         bool native_mode_found = false;
6375         bool recalculate_timing = false;
6376         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6377         int mode_refresh;
6378         int preferred_refresh = 0;
6379 #if defined(CONFIG_DRM_AMD_DC_DCN)
6380         struct dsc_dec_dpcd_caps dsc_caps;
6381 #endif
6382         struct dc_sink *sink = NULL;
6383
6384         memset(&saved_mode, 0, sizeof(saved_mode));
6385
6386         if (aconnector == NULL) {
6387                 DRM_ERROR("aconnector is NULL!\n");
6388                 return stream;
6389         }
6390
6391         drm_connector = &aconnector->base;
6392
6393         if (!aconnector->dc_sink) {
6394                 sink = create_fake_sink(aconnector);
6395                 if (!sink)
6396                         return stream;
6397         } else {
6398                 sink = aconnector->dc_sink;
6399                 dc_sink_retain(sink);
6400         }
6401
6402         stream = dc_create_stream_for_sink(sink);
6403
6404         if (stream == NULL) {
6405                 DRM_ERROR("Failed to create stream for sink!\n");
6406                 goto finish;
6407         }
6408
6409         stream->dm_stream_context = aconnector;
6410
6411         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6412                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6413
6414         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6415                 /* Search for preferred mode */
6416                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6417                         native_mode_found = true;
6418                         break;
6419                 }
6420         }
6421         if (!native_mode_found)
6422                 preferred_mode = list_first_entry_or_null(
6423                                 &aconnector->base.modes,
6424                                 struct drm_display_mode,
6425                                 head);
6426
6427         mode_refresh = drm_mode_vrefresh(&mode);
6428
6429         if (preferred_mode == NULL) {
6430                 /*
6431                  * This may not be an error, the use case is when we have no
6432                  * usermode calls to reset and set mode upon hotplug. In this
6433                  * case, we call set mode ourselves to restore the previous mode
6434                  * and the modelist may not be filled in in time.
6435                  */
6436                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6437         } else {
6438                 recalculate_timing = amdgpu_freesync_vid_mode &&
6439                                  is_freesync_video_mode(&mode, aconnector);
6440                 if (recalculate_timing) {
6441                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6442                         saved_mode = mode;
6443                         mode = *freesync_mode;
6444                 } else {
6445                         decide_crtc_timing_for_drm_display_mode(
6446                                 &mode, preferred_mode, scale);
6447
6448                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6449                 }
6450         }
6451
6452         if (recalculate_timing)
6453                 drm_mode_set_crtcinfo(&saved_mode, 0);
6454         else if (!dm_state)
6455                 drm_mode_set_crtcinfo(&mode, 0);
6456
6457        /*
6458         * If scaling is enabled and refresh rate didn't change
6459         * we copy the vic and polarities of the old timings
6460         */
6461         if (!scale || mode_refresh != preferred_refresh)
6462                 fill_stream_properties_from_drm_display_mode(
6463                         stream, &mode, &aconnector->base, con_state, NULL,
6464                         requested_bpc);
6465         else
6466                 fill_stream_properties_from_drm_display_mode(
6467                         stream, &mode, &aconnector->base, con_state, old_stream,
6468                         requested_bpc);
6469
6470 #if defined(CONFIG_DRM_AMD_DC_DCN)
6471         /* SST DSC determination policy */
6472         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6473         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6474                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6475 #endif
6476
6477         update_stream_scaling_settings(&mode, dm_state, stream);
6478
6479         fill_audio_info(
6480                 &stream->audio_info,
6481                 drm_connector,
6482                 sink);
6483
6484         update_stream_signal(stream, sink);
6485
6486         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6487                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6488
6489         if (stream->link->psr_settings.psr_feature_enabled) {
6490                 //
6491                 // should decide stream support vsc sdp colorimetry capability
6492                 // before building vsc info packet
6493                 //
6494                 stream->use_vsc_sdp_for_colorimetry = false;
6495                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6496                         stream->use_vsc_sdp_for_colorimetry =
6497                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6498                 } else {
6499                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6500                                 stream->use_vsc_sdp_for_colorimetry = true;
6501                 }
6502                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6503                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6504
6505         }
6506 finish:
6507         dc_sink_release(sink);
6508
6509         return stream;
6510 }
6511
6512 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6513 {
6514         drm_crtc_cleanup(crtc);
6515         kfree(crtc);
6516 }
6517
6518 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6519                                   struct drm_crtc_state *state)
6520 {
6521         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6522
6523         /* TODO Destroy dc_stream objects are stream object is flattened */
6524         if (cur->stream)
6525                 dc_stream_release(cur->stream);
6526
6527
6528         __drm_atomic_helper_crtc_destroy_state(state);
6529
6530
6531         kfree(state);
6532 }
6533
6534 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6535 {
6536         struct dm_crtc_state *state;
6537
6538         if (crtc->state)
6539                 dm_crtc_destroy_state(crtc, crtc->state);
6540
6541         state = kzalloc(sizeof(*state), GFP_KERNEL);
6542         if (WARN_ON(!state))
6543                 return;
6544
6545         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6546 }
6547
6548 static struct drm_crtc_state *
6549 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6550 {
6551         struct dm_crtc_state *state, *cur;
6552
6553         cur = to_dm_crtc_state(crtc->state);
6554
6555         if (WARN_ON(!crtc->state))
6556                 return NULL;
6557
6558         state = kzalloc(sizeof(*state), GFP_KERNEL);
6559         if (!state)
6560                 return NULL;
6561
6562         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6563
6564         if (cur->stream) {
6565                 state->stream = cur->stream;
6566                 dc_stream_retain(state->stream);
6567         }
6568
6569         state->active_planes = cur->active_planes;
6570         state->vrr_infopacket = cur->vrr_infopacket;
6571         state->abm_level = cur->abm_level;
6572         state->vrr_supported = cur->vrr_supported;
6573         state->freesync_config = cur->freesync_config;
6574         state->cm_has_degamma = cur->cm_has_degamma;
6575         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6576         state->force_dpms_off = cur->force_dpms_off;
6577         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6578
6579         return &state->base;
6580 }
6581
6582 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6583 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6584 {
6585         crtc_debugfs_init(crtc);
6586
6587         return 0;
6588 }
6589 #endif
6590
6591 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6592 {
6593         enum dc_irq_source irq_source;
6594         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6595         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6596         int rc;
6597
6598         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6599
6600         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6601
6602         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6603                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6604         return rc;
6605 }
6606
6607 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6608 {
6609         enum dc_irq_source irq_source;
6610         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6611         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6612         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6613 #if defined(CONFIG_DRM_AMD_DC_DCN)
6614         struct amdgpu_display_manager *dm = &adev->dm;
6615         struct vblank_control_work *work;
6616 #endif
6617         int rc = 0;
6618
6619         if (enable) {
6620                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6621                 if (amdgpu_dm_vrr_active(acrtc_state))
6622                         rc = dm_set_vupdate_irq(crtc, true);
6623         } else {
6624                 /* vblank irq off -> vupdate irq off */
6625                 rc = dm_set_vupdate_irq(crtc, false);
6626         }
6627
6628         if (rc)
6629                 return rc;
6630
6631         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6632
6633         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6634                 return -EBUSY;
6635
6636         if (amdgpu_in_reset(adev))
6637                 return 0;
6638
6639 #if defined(CONFIG_DRM_AMD_DC_DCN)
6640         if (dm->vblank_control_workqueue) {
6641                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6642                 if (!work)
6643                         return -ENOMEM;
6644
6645                 INIT_WORK(&work->work, vblank_control_worker);
6646                 work->dm = dm;
6647                 work->acrtc = acrtc;
6648                 work->enable = enable;
6649
6650                 if (acrtc_state->stream) {
6651                         dc_stream_retain(acrtc_state->stream);
6652                         work->stream = acrtc_state->stream;
6653                 }
6654
6655                 queue_work(dm->vblank_control_workqueue, &work->work);
6656         }
6657 #endif
6658
6659         return 0;
6660 }
6661
6662 static int dm_enable_vblank(struct drm_crtc *crtc)
6663 {
6664         return dm_set_vblank(crtc, true);
6665 }
6666
6667 static void dm_disable_vblank(struct drm_crtc *crtc)
6668 {
6669         dm_set_vblank(crtc, false);
6670 }
6671
6672 /* Implemented only the options currently availible for the driver */
6673 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6674         .reset = dm_crtc_reset_state,
6675         .destroy = amdgpu_dm_crtc_destroy,
6676         .set_config = drm_atomic_helper_set_config,
6677         .page_flip = drm_atomic_helper_page_flip,
6678         .atomic_duplicate_state = dm_crtc_duplicate_state,
6679         .atomic_destroy_state = dm_crtc_destroy_state,
6680         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6681         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6682         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6683         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6684         .enable_vblank = dm_enable_vblank,
6685         .disable_vblank = dm_disable_vblank,
6686         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6687 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6688         .late_register = amdgpu_dm_crtc_late_register,
6689 #endif
6690 };
6691
6692 static enum drm_connector_status
6693 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6694 {
6695         bool connected;
6696         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6697
6698         /*
6699          * Notes:
6700          * 1. This interface is NOT called in context of HPD irq.
6701          * 2. This interface *is called* in context of user-mode ioctl. Which
6702          * makes it a bad place for *any* MST-related activity.
6703          */
6704
6705         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6706             !aconnector->fake_enable)
6707                 connected = (aconnector->dc_sink != NULL);
6708         else
6709                 connected = (aconnector->base.force == DRM_FORCE_ON);
6710
6711         update_subconnector_property(aconnector);
6712
6713         return (connected ? connector_status_connected :
6714                         connector_status_disconnected);
6715 }
6716
6717 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6718                                             struct drm_connector_state *connector_state,
6719                                             struct drm_property *property,
6720                                             uint64_t val)
6721 {
6722         struct drm_device *dev = connector->dev;
6723         struct amdgpu_device *adev = drm_to_adev(dev);
6724         struct dm_connector_state *dm_old_state =
6725                 to_dm_connector_state(connector->state);
6726         struct dm_connector_state *dm_new_state =
6727                 to_dm_connector_state(connector_state);
6728
6729         int ret = -EINVAL;
6730
6731         if (property == dev->mode_config.scaling_mode_property) {
6732                 enum amdgpu_rmx_type rmx_type;
6733
6734                 switch (val) {
6735                 case DRM_MODE_SCALE_CENTER:
6736                         rmx_type = RMX_CENTER;
6737                         break;
6738                 case DRM_MODE_SCALE_ASPECT:
6739                         rmx_type = RMX_ASPECT;
6740                         break;
6741                 case DRM_MODE_SCALE_FULLSCREEN:
6742                         rmx_type = RMX_FULL;
6743                         break;
6744                 case DRM_MODE_SCALE_NONE:
6745                 default:
6746                         rmx_type = RMX_OFF;
6747                         break;
6748                 }
6749
6750                 if (dm_old_state->scaling == rmx_type)
6751                         return 0;
6752
6753                 dm_new_state->scaling = rmx_type;
6754                 ret = 0;
6755         } else if (property == adev->mode_info.underscan_hborder_property) {
6756                 dm_new_state->underscan_hborder = val;
6757                 ret = 0;
6758         } else if (property == adev->mode_info.underscan_vborder_property) {
6759                 dm_new_state->underscan_vborder = val;
6760                 ret = 0;
6761         } else if (property == adev->mode_info.underscan_property) {
6762                 dm_new_state->underscan_enable = val;
6763                 ret = 0;
6764         } else if (property == adev->mode_info.abm_level_property) {
6765                 dm_new_state->abm_level = val;
6766                 ret = 0;
6767         }
6768
6769         return ret;
6770 }
6771
6772 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6773                                             const struct drm_connector_state *state,
6774                                             struct drm_property *property,
6775                                             uint64_t *val)
6776 {
6777         struct drm_device *dev = connector->dev;
6778         struct amdgpu_device *adev = drm_to_adev(dev);
6779         struct dm_connector_state *dm_state =
6780                 to_dm_connector_state(state);
6781         int ret = -EINVAL;
6782
6783         if (property == dev->mode_config.scaling_mode_property) {
6784                 switch (dm_state->scaling) {
6785                 case RMX_CENTER:
6786                         *val = DRM_MODE_SCALE_CENTER;
6787                         break;
6788                 case RMX_ASPECT:
6789                         *val = DRM_MODE_SCALE_ASPECT;
6790                         break;
6791                 case RMX_FULL:
6792                         *val = DRM_MODE_SCALE_FULLSCREEN;
6793                         break;
6794                 case RMX_OFF:
6795                 default:
6796                         *val = DRM_MODE_SCALE_NONE;
6797                         break;
6798                 }
6799                 ret = 0;
6800         } else if (property == adev->mode_info.underscan_hborder_property) {
6801                 *val = dm_state->underscan_hborder;
6802                 ret = 0;
6803         } else if (property == adev->mode_info.underscan_vborder_property) {
6804                 *val = dm_state->underscan_vborder;
6805                 ret = 0;
6806         } else if (property == adev->mode_info.underscan_property) {
6807                 *val = dm_state->underscan_enable;
6808                 ret = 0;
6809         } else if (property == adev->mode_info.abm_level_property) {
6810                 *val = dm_state->abm_level;
6811                 ret = 0;
6812         }
6813
6814         return ret;
6815 }
6816
6817 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6818 {
6819         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6820
6821         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6822 }
6823
6824 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6825 {
6826         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6827         const struct dc_link *link = aconnector->dc_link;
6828         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6829         struct amdgpu_display_manager *dm = &adev->dm;
6830         int i;
6831
6832         /*
6833          * Call only if mst_mgr was iniitalized before since it's not done
6834          * for all connector types.
6835          */
6836         if (aconnector->mst_mgr.dev)
6837                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6838
6839 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6840         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6841         for (i = 0; i < dm->num_of_edps; i++) {
6842                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6843                         backlight_device_unregister(dm->backlight_dev[i]);
6844                         dm->backlight_dev[i] = NULL;
6845                 }
6846         }
6847 #endif
6848
6849         if (aconnector->dc_em_sink)
6850                 dc_sink_release(aconnector->dc_em_sink);
6851         aconnector->dc_em_sink = NULL;
6852         if (aconnector->dc_sink)
6853                 dc_sink_release(aconnector->dc_sink);
6854         aconnector->dc_sink = NULL;
6855
6856         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6857         drm_connector_unregister(connector);
6858         drm_connector_cleanup(connector);
6859         if (aconnector->i2c) {
6860                 i2c_del_adapter(&aconnector->i2c->base);
6861                 kfree(aconnector->i2c);
6862         }
6863         kfree(aconnector->dm_dp_aux.aux.name);
6864
6865         kfree(connector);
6866 }
6867
6868 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6869 {
6870         struct dm_connector_state *state =
6871                 to_dm_connector_state(connector->state);
6872
6873         if (connector->state)
6874                 __drm_atomic_helper_connector_destroy_state(connector->state);
6875
6876         kfree(state);
6877
6878         state = kzalloc(sizeof(*state), GFP_KERNEL);
6879
6880         if (state) {
6881                 state->scaling = RMX_OFF;
6882                 state->underscan_enable = false;
6883                 state->underscan_hborder = 0;
6884                 state->underscan_vborder = 0;
6885                 state->base.max_requested_bpc = 8;
6886                 state->vcpi_slots = 0;
6887                 state->pbn = 0;
6888                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6889                         state->abm_level = amdgpu_dm_abm_level;
6890
6891                 __drm_atomic_helper_connector_reset(connector, &state->base);
6892         }
6893 }
6894
6895 struct drm_connector_state *
6896 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6897 {
6898         struct dm_connector_state *state =
6899                 to_dm_connector_state(connector->state);
6900
6901         struct dm_connector_state *new_state =
6902                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6903
6904         if (!new_state)
6905                 return NULL;
6906
6907         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6908
6909         new_state->freesync_capable = state->freesync_capable;
6910         new_state->abm_level = state->abm_level;
6911         new_state->scaling = state->scaling;
6912         new_state->underscan_enable = state->underscan_enable;
6913         new_state->underscan_hborder = state->underscan_hborder;
6914         new_state->underscan_vborder = state->underscan_vborder;
6915         new_state->vcpi_slots = state->vcpi_slots;
6916         new_state->pbn = state->pbn;
6917         return &new_state->base;
6918 }
6919
6920 static int
6921 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6922 {
6923         struct amdgpu_dm_connector *amdgpu_dm_connector =
6924                 to_amdgpu_dm_connector(connector);
6925         int r;
6926
6927         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6928             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6929                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6930                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6931                 if (r)
6932                         return r;
6933         }
6934
6935 #if defined(CONFIG_DEBUG_FS)
6936         connector_debugfs_init(amdgpu_dm_connector);
6937 #endif
6938
6939         return 0;
6940 }
6941
6942 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6943         .reset = amdgpu_dm_connector_funcs_reset,
6944         .detect = amdgpu_dm_connector_detect,
6945         .fill_modes = drm_helper_probe_single_connector_modes,
6946         .destroy = amdgpu_dm_connector_destroy,
6947         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6948         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6949         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6950         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6951         .late_register = amdgpu_dm_connector_late_register,
6952         .early_unregister = amdgpu_dm_connector_unregister
6953 };
6954
6955 static int get_modes(struct drm_connector *connector)
6956 {
6957         return amdgpu_dm_connector_get_modes(connector);
6958 }
6959
6960 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6961 {
6962         struct dc_sink_init_data init_params = {
6963                         .link = aconnector->dc_link,
6964                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6965         };
6966         struct edid *edid;
6967
6968         if (!aconnector->base.edid_blob_ptr) {
6969                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6970                                 aconnector->base.name);
6971
6972                 aconnector->base.force = DRM_FORCE_OFF;
6973                 aconnector->base.override_edid = false;
6974                 return;
6975         }
6976
6977         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6978
6979         aconnector->edid = edid;
6980
6981         aconnector->dc_em_sink = dc_link_add_remote_sink(
6982                 aconnector->dc_link,
6983                 (uint8_t *)edid,
6984                 (edid->extensions + 1) * EDID_LENGTH,
6985                 &init_params);
6986
6987         if (aconnector->base.force == DRM_FORCE_ON) {
6988                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6989                 aconnector->dc_link->local_sink :
6990                 aconnector->dc_em_sink;
6991                 dc_sink_retain(aconnector->dc_sink);
6992         }
6993 }
6994
6995 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6996 {
6997         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6998
6999         /*
7000          * In case of headless boot with force on for DP managed connector
7001          * Those settings have to be != 0 to get initial modeset
7002          */
7003         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7004                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7005                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7006         }
7007
7008
7009         aconnector->base.override_edid = true;
7010         create_eml_sink(aconnector);
7011 }
7012
7013 static struct dc_stream_state *
7014 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7015                                 const struct drm_display_mode *drm_mode,
7016                                 const struct dm_connector_state *dm_state,
7017                                 const struct dc_stream_state *old_stream)
7018 {
7019         struct drm_connector *connector = &aconnector->base;
7020         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7021         struct dc_stream_state *stream;
7022         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7023         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7024         enum dc_status dc_result = DC_OK;
7025
7026         do {
7027                 stream = create_stream_for_sink(aconnector, drm_mode,
7028                                                 dm_state, old_stream,
7029                                                 requested_bpc);
7030                 if (stream == NULL) {
7031                         DRM_ERROR("Failed to create stream for sink!\n");
7032                         break;
7033                 }
7034
7035                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7036
7037                 if (dc_result != DC_OK) {
7038                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7039                                       drm_mode->hdisplay,
7040                                       drm_mode->vdisplay,
7041                                       drm_mode->clock,
7042                                       dc_result,
7043                                       dc_status_to_str(dc_result));
7044
7045                         dc_stream_release(stream);
7046                         stream = NULL;
7047                         requested_bpc -= 2; /* lower bpc to retry validation */
7048                 }
7049
7050         } while (stream == NULL && requested_bpc >= 6);
7051
7052         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7053                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7054
7055                 aconnector->force_yuv420_output = true;
7056                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7057                                                 dm_state, old_stream);
7058                 aconnector->force_yuv420_output = false;
7059         }
7060
7061         return stream;
7062 }
7063
7064 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7065                                    struct drm_display_mode *mode)
7066 {
7067         int result = MODE_ERROR;
7068         struct dc_sink *dc_sink;
7069         /* TODO: Unhardcode stream count */
7070         struct dc_stream_state *stream;
7071         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7072
7073         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7074                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7075                 return result;
7076
7077         /*
7078          * Only run this the first time mode_valid is called to initilialize
7079          * EDID mgmt
7080          */
7081         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7082                 !aconnector->dc_em_sink)
7083                 handle_edid_mgmt(aconnector);
7084
7085         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7086
7087         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7088                                 aconnector->base.force != DRM_FORCE_ON) {
7089                 DRM_ERROR("dc_sink is NULL!\n");
7090                 goto fail;
7091         }
7092
7093         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7094         if (stream) {
7095                 dc_stream_release(stream);
7096                 result = MODE_OK;
7097         }
7098
7099 fail:
7100         /* TODO: error handling*/
7101         return result;
7102 }
7103
7104 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7105                                 struct dc_info_packet *out)
7106 {
7107         struct hdmi_drm_infoframe frame;
7108         unsigned char buf[30]; /* 26 + 4 */
7109         ssize_t len;
7110         int ret, i;
7111
7112         memset(out, 0, sizeof(*out));
7113
7114         if (!state->hdr_output_metadata)
7115                 return 0;
7116
7117         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7118         if (ret)
7119                 return ret;
7120
7121         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7122         if (len < 0)
7123                 return (int)len;
7124
7125         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7126         if (len != 30)
7127                 return -EINVAL;
7128
7129         /* Prepare the infopacket for DC. */
7130         switch (state->connector->connector_type) {
7131         case DRM_MODE_CONNECTOR_HDMIA:
7132                 out->hb0 = 0x87; /* type */
7133                 out->hb1 = 0x01; /* version */
7134                 out->hb2 = 0x1A; /* length */
7135                 out->sb[0] = buf[3]; /* checksum */
7136                 i = 1;
7137                 break;
7138
7139         case DRM_MODE_CONNECTOR_DisplayPort:
7140         case DRM_MODE_CONNECTOR_eDP:
7141                 out->hb0 = 0x00; /* sdp id, zero */
7142                 out->hb1 = 0x87; /* type */
7143                 out->hb2 = 0x1D; /* payload len - 1 */
7144                 out->hb3 = (0x13 << 2); /* sdp version */
7145                 out->sb[0] = 0x01; /* version */
7146                 out->sb[1] = 0x1A; /* length */
7147                 i = 2;
7148                 break;
7149
7150         default:
7151                 return -EINVAL;
7152         }
7153
7154         memcpy(&out->sb[i], &buf[4], 26);
7155         out->valid = true;
7156
7157         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7158                        sizeof(out->sb), false);
7159
7160         return 0;
7161 }
7162
7163 static int
7164 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7165                                  struct drm_atomic_state *state)
7166 {
7167         struct drm_connector_state *new_con_state =
7168                 drm_atomic_get_new_connector_state(state, conn);
7169         struct drm_connector_state *old_con_state =
7170                 drm_atomic_get_old_connector_state(state, conn);
7171         struct drm_crtc *crtc = new_con_state->crtc;
7172         struct drm_crtc_state *new_crtc_state;
7173         int ret;
7174
7175         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7176
7177         if (!crtc)
7178                 return 0;
7179
7180         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7181                 struct dc_info_packet hdr_infopacket;
7182
7183                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7184                 if (ret)
7185                         return ret;
7186
7187                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7188                 if (IS_ERR(new_crtc_state))
7189                         return PTR_ERR(new_crtc_state);
7190
7191                 /*
7192                  * DC considers the stream backends changed if the
7193                  * static metadata changes. Forcing the modeset also
7194                  * gives a simple way for userspace to switch from
7195                  * 8bpc to 10bpc when setting the metadata to enter
7196                  * or exit HDR.
7197                  *
7198                  * Changing the static metadata after it's been
7199                  * set is permissible, however. So only force a
7200                  * modeset if we're entering or exiting HDR.
7201                  */
7202                 new_crtc_state->mode_changed =
7203                         !old_con_state->hdr_output_metadata ||
7204                         !new_con_state->hdr_output_metadata;
7205         }
7206
7207         return 0;
7208 }
7209
7210 static const struct drm_connector_helper_funcs
7211 amdgpu_dm_connector_helper_funcs = {
7212         /*
7213          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7214          * modes will be filtered by drm_mode_validate_size(), and those modes
7215          * are missing after user start lightdm. So we need to renew modes list.
7216          * in get_modes call back, not just return the modes count
7217          */
7218         .get_modes = get_modes,
7219         .mode_valid = amdgpu_dm_connector_mode_valid,
7220         .atomic_check = amdgpu_dm_connector_atomic_check,
7221 };
7222
7223 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7224 {
7225 }
7226
7227 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7228 {
7229         struct drm_atomic_state *state = new_crtc_state->state;
7230         struct drm_plane *plane;
7231         int num_active = 0;
7232
7233         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7234                 struct drm_plane_state *new_plane_state;
7235
7236                 /* Cursor planes are "fake". */
7237                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7238                         continue;
7239
7240                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7241
7242                 if (!new_plane_state) {
7243                         /*
7244                          * The plane is enable on the CRTC and hasn't changed
7245                          * state. This means that it previously passed
7246                          * validation and is therefore enabled.
7247                          */
7248                         num_active += 1;
7249                         continue;
7250                 }
7251
7252                 /* We need a framebuffer to be considered enabled. */
7253                 num_active += (new_plane_state->fb != NULL);
7254         }
7255
7256         return num_active;
7257 }
7258
7259 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7260                                          struct drm_crtc_state *new_crtc_state)
7261 {
7262         struct dm_crtc_state *dm_new_crtc_state =
7263                 to_dm_crtc_state(new_crtc_state);
7264
7265         dm_new_crtc_state->active_planes = 0;
7266
7267         if (!dm_new_crtc_state->stream)
7268                 return;
7269
7270         dm_new_crtc_state->active_planes =
7271                 count_crtc_active_planes(new_crtc_state);
7272 }
7273
7274 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7275                                        struct drm_atomic_state *state)
7276 {
7277         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7278                                                                           crtc);
7279         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7280         struct dc *dc = adev->dm.dc;
7281         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7282         int ret = -EINVAL;
7283
7284         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7285
7286         dm_update_crtc_active_planes(crtc, crtc_state);
7287
7288         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7289                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7290                 return ret;
7291         }
7292
7293         /*
7294          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7295          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7296          * planes are disabled, which is not supported by the hardware. And there is legacy
7297          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7298          */
7299         if (crtc_state->enable &&
7300             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7301                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7302                 return -EINVAL;
7303         }
7304
7305         /* In some use cases, like reset, no stream is attached */
7306         if (!dm_crtc_state->stream)
7307                 return 0;
7308
7309         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7310                 return 0;
7311
7312         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7313         return ret;
7314 }
7315
7316 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7317                                       const struct drm_display_mode *mode,
7318                                       struct drm_display_mode *adjusted_mode)
7319 {
7320         return true;
7321 }
7322
7323 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7324         .disable = dm_crtc_helper_disable,
7325         .atomic_check = dm_crtc_helper_atomic_check,
7326         .mode_fixup = dm_crtc_helper_mode_fixup,
7327         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7328 };
7329
7330 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7331 {
7332
7333 }
7334
7335 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7336 {
7337         switch (display_color_depth) {
7338                 case COLOR_DEPTH_666:
7339                         return 6;
7340                 case COLOR_DEPTH_888:
7341                         return 8;
7342                 case COLOR_DEPTH_101010:
7343                         return 10;
7344                 case COLOR_DEPTH_121212:
7345                         return 12;
7346                 case COLOR_DEPTH_141414:
7347                         return 14;
7348                 case COLOR_DEPTH_161616:
7349                         return 16;
7350                 default:
7351                         break;
7352                 }
7353         return 0;
7354 }
7355
7356 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7357                                           struct drm_crtc_state *crtc_state,
7358                                           struct drm_connector_state *conn_state)
7359 {
7360         struct drm_atomic_state *state = crtc_state->state;
7361         struct drm_connector *connector = conn_state->connector;
7362         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7363         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7364         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7365         struct drm_dp_mst_topology_mgr *mst_mgr;
7366         struct drm_dp_mst_port *mst_port;
7367         enum dc_color_depth color_depth;
7368         int clock, bpp = 0;
7369         bool is_y420 = false;
7370
7371         if (!aconnector->port || !aconnector->dc_sink)
7372                 return 0;
7373
7374         mst_port = aconnector->port;
7375         mst_mgr = &aconnector->mst_port->mst_mgr;
7376
7377         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7378                 return 0;
7379
7380         if (!state->duplicated) {
7381                 int max_bpc = conn_state->max_requested_bpc;
7382                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7383                                 aconnector->force_yuv420_output;
7384                 color_depth = convert_color_depth_from_display_info(connector,
7385                                                                     is_y420,
7386                                                                     max_bpc);
7387                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7388                 clock = adjusted_mode->clock;
7389                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7390         }
7391         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7392                                                                            mst_mgr,
7393                                                                            mst_port,
7394                                                                            dm_new_connector_state->pbn,
7395                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7396         if (dm_new_connector_state->vcpi_slots < 0) {
7397                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7398                 return dm_new_connector_state->vcpi_slots;
7399         }
7400         return 0;
7401 }
7402
7403 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7404         .disable = dm_encoder_helper_disable,
7405         .atomic_check = dm_encoder_helper_atomic_check
7406 };
7407
7408 #if defined(CONFIG_DRM_AMD_DC_DCN)
7409 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7410                                             struct dc_state *dc_state,
7411                                             struct dsc_mst_fairness_vars *vars)
7412 {
7413         struct dc_stream_state *stream = NULL;
7414         struct drm_connector *connector;
7415         struct drm_connector_state *new_con_state;
7416         struct amdgpu_dm_connector *aconnector;
7417         struct dm_connector_state *dm_conn_state;
7418         int i, j;
7419         int vcpi, pbn_div, pbn, slot_num = 0;
7420
7421         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7422
7423                 aconnector = to_amdgpu_dm_connector(connector);
7424
7425                 if (!aconnector->port)
7426                         continue;
7427
7428                 if (!new_con_state || !new_con_state->crtc)
7429                         continue;
7430
7431                 dm_conn_state = to_dm_connector_state(new_con_state);
7432
7433                 for (j = 0; j < dc_state->stream_count; j++) {
7434                         stream = dc_state->streams[j];
7435                         if (!stream)
7436                                 continue;
7437
7438                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7439                                 break;
7440
7441                         stream = NULL;
7442                 }
7443
7444                 if (!stream)
7445                         continue;
7446
7447                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7448                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7449                 for (j = 0; j < dc_state->stream_count; j++) {
7450                         if (vars[j].aconnector == aconnector) {
7451                                 pbn = vars[j].pbn;
7452                                 break;
7453                         }
7454                 }
7455
7456                 if (j == dc_state->stream_count)
7457                         continue;
7458
7459                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7460
7461                 if (stream->timing.flags.DSC != 1) {
7462                         dm_conn_state->pbn = pbn;
7463                         dm_conn_state->vcpi_slots = slot_num;
7464
7465                         drm_dp_mst_atomic_enable_dsc(state,
7466                                                      aconnector->port,
7467                                                      dm_conn_state->pbn,
7468                                                      0,
7469                                                      false);
7470                         continue;
7471                 }
7472
7473                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7474                                                     aconnector->port,
7475                                                     pbn, pbn_div,
7476                                                     true);
7477                 if (vcpi < 0)
7478                         return vcpi;
7479
7480                 dm_conn_state->pbn = pbn;
7481                 dm_conn_state->vcpi_slots = vcpi;
7482         }
7483         return 0;
7484 }
7485 #endif
7486
7487 static void dm_drm_plane_reset(struct drm_plane *plane)
7488 {
7489         struct dm_plane_state *amdgpu_state = NULL;
7490
7491         if (plane->state)
7492                 plane->funcs->atomic_destroy_state(plane, plane->state);
7493
7494         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7495         WARN_ON(amdgpu_state == NULL);
7496
7497         if (amdgpu_state)
7498                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7499 }
7500
7501 static struct drm_plane_state *
7502 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7503 {
7504         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7505
7506         old_dm_plane_state = to_dm_plane_state(plane->state);
7507         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7508         if (!dm_plane_state)
7509                 return NULL;
7510
7511         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7512
7513         if (old_dm_plane_state->dc_state) {
7514                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7515                 dc_plane_state_retain(dm_plane_state->dc_state);
7516         }
7517
7518         return &dm_plane_state->base;
7519 }
7520
7521 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7522                                 struct drm_plane_state *state)
7523 {
7524         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7525
7526         if (dm_plane_state->dc_state)
7527                 dc_plane_state_release(dm_plane_state->dc_state);
7528
7529         drm_atomic_helper_plane_destroy_state(plane, state);
7530 }
7531
7532 static const struct drm_plane_funcs dm_plane_funcs = {
7533         .update_plane   = drm_atomic_helper_update_plane,
7534         .disable_plane  = drm_atomic_helper_disable_plane,
7535         .destroy        = drm_primary_helper_destroy,
7536         .reset = dm_drm_plane_reset,
7537         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7538         .atomic_destroy_state = dm_drm_plane_destroy_state,
7539         .format_mod_supported = dm_plane_format_mod_supported,
7540 };
7541
7542 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7543                                       struct drm_plane_state *new_state)
7544 {
7545         struct amdgpu_framebuffer *afb;
7546         struct drm_gem_object *obj;
7547         struct amdgpu_device *adev;
7548         struct amdgpu_bo *rbo;
7549         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7550         struct list_head list;
7551         struct ttm_validate_buffer tv;
7552         struct ww_acquire_ctx ticket;
7553         uint32_t domain;
7554         int r;
7555
7556         if (!new_state->fb) {
7557                 DRM_DEBUG_KMS("No FB bound\n");
7558                 return 0;
7559         }
7560
7561         afb = to_amdgpu_framebuffer(new_state->fb);
7562         obj = new_state->fb->obj[0];
7563         rbo = gem_to_amdgpu_bo(obj);
7564         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7565         INIT_LIST_HEAD(&list);
7566
7567         tv.bo = &rbo->tbo;
7568         tv.num_shared = 1;
7569         list_add(&tv.head, &list);
7570
7571         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7572         if (r) {
7573                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7574                 return r;
7575         }
7576
7577         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7578                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7579         else
7580                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7581
7582         r = amdgpu_bo_pin(rbo, domain);
7583         if (unlikely(r != 0)) {
7584                 if (r != -ERESTARTSYS)
7585                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7586                 ttm_eu_backoff_reservation(&ticket, &list);
7587                 return r;
7588         }
7589
7590         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7591         if (unlikely(r != 0)) {
7592                 amdgpu_bo_unpin(rbo);
7593                 ttm_eu_backoff_reservation(&ticket, &list);
7594                 DRM_ERROR("%p bind failed\n", rbo);
7595                 return r;
7596         }
7597
7598         ttm_eu_backoff_reservation(&ticket, &list);
7599
7600         afb->address = amdgpu_bo_gpu_offset(rbo);
7601
7602         amdgpu_bo_ref(rbo);
7603
7604         /**
7605          * We don't do surface updates on planes that have been newly created,
7606          * but we also don't have the afb->address during atomic check.
7607          *
7608          * Fill in buffer attributes depending on the address here, but only on
7609          * newly created planes since they're not being used by DC yet and this
7610          * won't modify global state.
7611          */
7612         dm_plane_state_old = to_dm_plane_state(plane->state);
7613         dm_plane_state_new = to_dm_plane_state(new_state);
7614
7615         if (dm_plane_state_new->dc_state &&
7616             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7617                 struct dc_plane_state *plane_state =
7618                         dm_plane_state_new->dc_state;
7619                 bool force_disable_dcc = !plane_state->dcc.enable;
7620
7621                 fill_plane_buffer_attributes(
7622                         adev, afb, plane_state->format, plane_state->rotation,
7623                         afb->tiling_flags,
7624                         &plane_state->tiling_info, &plane_state->plane_size,
7625                         &plane_state->dcc, &plane_state->address,
7626                         afb->tmz_surface, force_disable_dcc);
7627         }
7628
7629         return 0;
7630 }
7631
7632 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7633                                        struct drm_plane_state *old_state)
7634 {
7635         struct amdgpu_bo *rbo;
7636         int r;
7637
7638         if (!old_state->fb)
7639                 return;
7640
7641         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7642         r = amdgpu_bo_reserve(rbo, false);
7643         if (unlikely(r)) {
7644                 DRM_ERROR("failed to reserve rbo before unpin\n");
7645                 return;
7646         }
7647
7648         amdgpu_bo_unpin(rbo);
7649         amdgpu_bo_unreserve(rbo);
7650         amdgpu_bo_unref(&rbo);
7651 }
7652
7653 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7654                                        struct drm_crtc_state *new_crtc_state)
7655 {
7656         struct drm_framebuffer *fb = state->fb;
7657         int min_downscale, max_upscale;
7658         int min_scale = 0;
7659         int max_scale = INT_MAX;
7660
7661         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7662         if (fb && state->crtc) {
7663                 /* Validate viewport to cover the case when only the position changes */
7664                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7665                         int viewport_width = state->crtc_w;
7666                         int viewport_height = state->crtc_h;
7667
7668                         if (state->crtc_x < 0)
7669                                 viewport_width += state->crtc_x;
7670                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7671                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7672
7673                         if (state->crtc_y < 0)
7674                                 viewport_height += state->crtc_y;
7675                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7676                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7677
7678                         if (viewport_width < 0 || viewport_height < 0) {
7679                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7680                                 return -EINVAL;
7681                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7682                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7683                                 return -EINVAL;
7684                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7685                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7686                                 return -EINVAL;
7687                         }
7688
7689                 }
7690
7691                 /* Get min/max allowed scaling factors from plane caps. */
7692                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7693                                              &min_downscale, &max_upscale);
7694                 /*
7695                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7696                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7697                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7698                  */
7699                 min_scale = (1000 << 16) / max_upscale;
7700                 max_scale = (1000 << 16) / min_downscale;
7701         }
7702
7703         return drm_atomic_helper_check_plane_state(
7704                 state, new_crtc_state, min_scale, max_scale, true, true);
7705 }
7706
7707 static int dm_plane_atomic_check(struct drm_plane *plane,
7708                                  struct drm_atomic_state *state)
7709 {
7710         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7711                                                                                  plane);
7712         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7713         struct dc *dc = adev->dm.dc;
7714         struct dm_plane_state *dm_plane_state;
7715         struct dc_scaling_info scaling_info;
7716         struct drm_crtc_state *new_crtc_state;
7717         int ret;
7718
7719         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7720
7721         dm_plane_state = to_dm_plane_state(new_plane_state);
7722
7723         if (!dm_plane_state->dc_state)
7724                 return 0;
7725
7726         new_crtc_state =
7727                 drm_atomic_get_new_crtc_state(state,
7728                                               new_plane_state->crtc);
7729         if (!new_crtc_state)
7730                 return -EINVAL;
7731
7732         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7733         if (ret)
7734                 return ret;
7735
7736         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7737         if (ret)
7738                 return ret;
7739
7740         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7741                 return 0;
7742
7743         return -EINVAL;
7744 }
7745
7746 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7747                                        struct drm_atomic_state *state)
7748 {
7749         /* Only support async updates on cursor planes. */
7750         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7751                 return -EINVAL;
7752
7753         return 0;
7754 }
7755
7756 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7757                                          struct drm_atomic_state *state)
7758 {
7759         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7760                                                                            plane);
7761         struct drm_plane_state *old_state =
7762                 drm_atomic_get_old_plane_state(state, plane);
7763
7764         trace_amdgpu_dm_atomic_update_cursor(new_state);
7765
7766         swap(plane->state->fb, new_state->fb);
7767
7768         plane->state->src_x = new_state->src_x;
7769         plane->state->src_y = new_state->src_y;
7770         plane->state->src_w = new_state->src_w;
7771         plane->state->src_h = new_state->src_h;
7772         plane->state->crtc_x = new_state->crtc_x;
7773         plane->state->crtc_y = new_state->crtc_y;
7774         plane->state->crtc_w = new_state->crtc_w;
7775         plane->state->crtc_h = new_state->crtc_h;
7776
7777         handle_cursor_update(plane, old_state);
7778 }
7779
7780 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7781         .prepare_fb = dm_plane_helper_prepare_fb,
7782         .cleanup_fb = dm_plane_helper_cleanup_fb,
7783         .atomic_check = dm_plane_atomic_check,
7784         .atomic_async_check = dm_plane_atomic_async_check,
7785         .atomic_async_update = dm_plane_atomic_async_update
7786 };
7787
7788 /*
7789  * TODO: these are currently initialized to rgb formats only.
7790  * For future use cases we should either initialize them dynamically based on
7791  * plane capabilities, or initialize this array to all formats, so internal drm
7792  * check will succeed, and let DC implement proper check
7793  */
7794 static const uint32_t rgb_formats[] = {
7795         DRM_FORMAT_XRGB8888,
7796         DRM_FORMAT_ARGB8888,
7797         DRM_FORMAT_RGBA8888,
7798         DRM_FORMAT_XRGB2101010,
7799         DRM_FORMAT_XBGR2101010,
7800         DRM_FORMAT_ARGB2101010,
7801         DRM_FORMAT_ABGR2101010,
7802         DRM_FORMAT_XRGB16161616,
7803         DRM_FORMAT_XBGR16161616,
7804         DRM_FORMAT_ARGB16161616,
7805         DRM_FORMAT_ABGR16161616,
7806         DRM_FORMAT_XBGR8888,
7807         DRM_FORMAT_ABGR8888,
7808         DRM_FORMAT_RGB565,
7809 };
7810
7811 static const uint32_t overlay_formats[] = {
7812         DRM_FORMAT_XRGB8888,
7813         DRM_FORMAT_ARGB8888,
7814         DRM_FORMAT_RGBA8888,
7815         DRM_FORMAT_XBGR8888,
7816         DRM_FORMAT_ABGR8888,
7817         DRM_FORMAT_RGB565
7818 };
7819
7820 static const u32 cursor_formats[] = {
7821         DRM_FORMAT_ARGB8888
7822 };
7823
7824 static int get_plane_formats(const struct drm_plane *plane,
7825                              const struct dc_plane_cap *plane_cap,
7826                              uint32_t *formats, int max_formats)
7827 {
7828         int i, num_formats = 0;
7829
7830         /*
7831          * TODO: Query support for each group of formats directly from
7832          * DC plane caps. This will require adding more formats to the
7833          * caps list.
7834          */
7835
7836         switch (plane->type) {
7837         case DRM_PLANE_TYPE_PRIMARY:
7838                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7839                         if (num_formats >= max_formats)
7840                                 break;
7841
7842                         formats[num_formats++] = rgb_formats[i];
7843                 }
7844
7845                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7846                         formats[num_formats++] = DRM_FORMAT_NV12;
7847                 if (plane_cap && plane_cap->pixel_format_support.p010)
7848                         formats[num_formats++] = DRM_FORMAT_P010;
7849                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7850                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7851                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7852                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7853                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7854                 }
7855                 break;
7856
7857         case DRM_PLANE_TYPE_OVERLAY:
7858                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7859                         if (num_formats >= max_formats)
7860                                 break;
7861
7862                         formats[num_formats++] = overlay_formats[i];
7863                 }
7864                 break;
7865
7866         case DRM_PLANE_TYPE_CURSOR:
7867                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7868                         if (num_formats >= max_formats)
7869                                 break;
7870
7871                         formats[num_formats++] = cursor_formats[i];
7872                 }
7873                 break;
7874         }
7875
7876         return num_formats;
7877 }
7878
7879 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7880                                 struct drm_plane *plane,
7881                                 unsigned long possible_crtcs,
7882                                 const struct dc_plane_cap *plane_cap)
7883 {
7884         uint32_t formats[32];
7885         int num_formats;
7886         int res = -EPERM;
7887         unsigned int supported_rotations;
7888         uint64_t *modifiers = NULL;
7889
7890         num_formats = get_plane_formats(plane, plane_cap, formats,
7891                                         ARRAY_SIZE(formats));
7892
7893         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7894         if (res)
7895                 return res;
7896
7897         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7898                                        &dm_plane_funcs, formats, num_formats,
7899                                        modifiers, plane->type, NULL);
7900         kfree(modifiers);
7901         if (res)
7902                 return res;
7903
7904         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7905             plane_cap && plane_cap->per_pixel_alpha) {
7906                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7907                                           BIT(DRM_MODE_BLEND_PREMULTI);
7908
7909                 drm_plane_create_alpha_property(plane);
7910                 drm_plane_create_blend_mode_property(plane, blend_caps);
7911         }
7912
7913         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7914             plane_cap &&
7915             (plane_cap->pixel_format_support.nv12 ||
7916              plane_cap->pixel_format_support.p010)) {
7917                 /* This only affects YUV formats. */
7918                 drm_plane_create_color_properties(
7919                         plane,
7920                         BIT(DRM_COLOR_YCBCR_BT601) |
7921                         BIT(DRM_COLOR_YCBCR_BT709) |
7922                         BIT(DRM_COLOR_YCBCR_BT2020),
7923                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7924                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7925                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7926         }
7927
7928         supported_rotations =
7929                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7930                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7931
7932         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7933             plane->type != DRM_PLANE_TYPE_CURSOR)
7934                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7935                                                    supported_rotations);
7936
7937         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7938
7939         /* Create (reset) the plane state */
7940         if (plane->funcs->reset)
7941                 plane->funcs->reset(plane);
7942
7943         return 0;
7944 }
7945
7946 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7947                                struct drm_plane *plane,
7948                                uint32_t crtc_index)
7949 {
7950         struct amdgpu_crtc *acrtc = NULL;
7951         struct drm_plane *cursor_plane;
7952
7953         int res = -ENOMEM;
7954
7955         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7956         if (!cursor_plane)
7957                 goto fail;
7958
7959         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7960         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7961
7962         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7963         if (!acrtc)
7964                 goto fail;
7965
7966         res = drm_crtc_init_with_planes(
7967                         dm->ddev,
7968                         &acrtc->base,
7969                         plane,
7970                         cursor_plane,
7971                         &amdgpu_dm_crtc_funcs, NULL);
7972
7973         if (res)
7974                 goto fail;
7975
7976         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7977
7978         /* Create (reset) the plane state */
7979         if (acrtc->base.funcs->reset)
7980                 acrtc->base.funcs->reset(&acrtc->base);
7981
7982         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7983         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7984
7985         acrtc->crtc_id = crtc_index;
7986         acrtc->base.enabled = false;
7987         acrtc->otg_inst = -1;
7988
7989         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7990         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7991                                    true, MAX_COLOR_LUT_ENTRIES);
7992         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7993
7994         return 0;
7995
7996 fail:
7997         kfree(acrtc);
7998         kfree(cursor_plane);
7999         return res;
8000 }
8001
8002
8003 static int to_drm_connector_type(enum signal_type st)
8004 {
8005         switch (st) {
8006         case SIGNAL_TYPE_HDMI_TYPE_A:
8007                 return DRM_MODE_CONNECTOR_HDMIA;
8008         case SIGNAL_TYPE_EDP:
8009                 return DRM_MODE_CONNECTOR_eDP;
8010         case SIGNAL_TYPE_LVDS:
8011                 return DRM_MODE_CONNECTOR_LVDS;
8012         case SIGNAL_TYPE_RGB:
8013                 return DRM_MODE_CONNECTOR_VGA;
8014         case SIGNAL_TYPE_DISPLAY_PORT:
8015         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8016                 return DRM_MODE_CONNECTOR_DisplayPort;
8017         case SIGNAL_TYPE_DVI_DUAL_LINK:
8018         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8019                 return DRM_MODE_CONNECTOR_DVID;
8020         case SIGNAL_TYPE_VIRTUAL:
8021                 return DRM_MODE_CONNECTOR_VIRTUAL;
8022
8023         default:
8024                 return DRM_MODE_CONNECTOR_Unknown;
8025         }
8026 }
8027
8028 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8029 {
8030         struct drm_encoder *encoder;
8031
8032         /* There is only one encoder per connector */
8033         drm_connector_for_each_possible_encoder(connector, encoder)
8034                 return encoder;
8035
8036         return NULL;
8037 }
8038
8039 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8040 {
8041         struct drm_encoder *encoder;
8042         struct amdgpu_encoder *amdgpu_encoder;
8043
8044         encoder = amdgpu_dm_connector_to_encoder(connector);
8045
8046         if (encoder == NULL)
8047                 return;
8048
8049         amdgpu_encoder = to_amdgpu_encoder(encoder);
8050
8051         amdgpu_encoder->native_mode.clock = 0;
8052
8053         if (!list_empty(&connector->probed_modes)) {
8054                 struct drm_display_mode *preferred_mode = NULL;
8055
8056                 list_for_each_entry(preferred_mode,
8057                                     &connector->probed_modes,
8058                                     head) {
8059                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8060                                 amdgpu_encoder->native_mode = *preferred_mode;
8061
8062                         break;
8063                 }
8064
8065         }
8066 }
8067
8068 static struct drm_display_mode *
8069 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8070                              char *name,
8071                              int hdisplay, int vdisplay)
8072 {
8073         struct drm_device *dev = encoder->dev;
8074         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8075         struct drm_display_mode *mode = NULL;
8076         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8077
8078         mode = drm_mode_duplicate(dev, native_mode);
8079
8080         if (mode == NULL)
8081                 return NULL;
8082
8083         mode->hdisplay = hdisplay;
8084         mode->vdisplay = vdisplay;
8085         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8086         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8087
8088         return mode;
8089
8090 }
8091
8092 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8093                                                  struct drm_connector *connector)
8094 {
8095         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8096         struct drm_display_mode *mode = NULL;
8097         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8098         struct amdgpu_dm_connector *amdgpu_dm_connector =
8099                                 to_amdgpu_dm_connector(connector);
8100         int i;
8101         int n;
8102         struct mode_size {
8103                 char name[DRM_DISPLAY_MODE_LEN];
8104                 int w;
8105                 int h;
8106         } common_modes[] = {
8107                 {  "640x480",  640,  480},
8108                 {  "800x600",  800,  600},
8109                 { "1024x768", 1024,  768},
8110                 { "1280x720", 1280,  720},
8111                 { "1280x800", 1280,  800},
8112                 {"1280x1024", 1280, 1024},
8113                 { "1440x900", 1440,  900},
8114                 {"1680x1050", 1680, 1050},
8115                 {"1600x1200", 1600, 1200},
8116                 {"1920x1080", 1920, 1080},
8117                 {"1920x1200", 1920, 1200}
8118         };
8119
8120         n = ARRAY_SIZE(common_modes);
8121
8122         for (i = 0; i < n; i++) {
8123                 struct drm_display_mode *curmode = NULL;
8124                 bool mode_existed = false;
8125
8126                 if (common_modes[i].w > native_mode->hdisplay ||
8127                     common_modes[i].h > native_mode->vdisplay ||
8128                    (common_modes[i].w == native_mode->hdisplay &&
8129                     common_modes[i].h == native_mode->vdisplay))
8130                         continue;
8131
8132                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8133                         if (common_modes[i].w == curmode->hdisplay &&
8134                             common_modes[i].h == curmode->vdisplay) {
8135                                 mode_existed = true;
8136                                 break;
8137                         }
8138                 }
8139
8140                 if (mode_existed)
8141                         continue;
8142
8143                 mode = amdgpu_dm_create_common_mode(encoder,
8144                                 common_modes[i].name, common_modes[i].w,
8145                                 common_modes[i].h);
8146                 drm_mode_probed_add(connector, mode);
8147                 amdgpu_dm_connector->num_modes++;
8148         }
8149 }
8150
8151 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8152 {
8153         struct drm_encoder *encoder;
8154         struct amdgpu_encoder *amdgpu_encoder;
8155         const struct drm_display_mode *native_mode;
8156
8157         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8158             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8159                 return;
8160
8161         encoder = amdgpu_dm_connector_to_encoder(connector);
8162         if (!encoder)
8163                 return;
8164
8165         amdgpu_encoder = to_amdgpu_encoder(encoder);
8166
8167         native_mode = &amdgpu_encoder->native_mode;
8168         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8169                 return;
8170
8171         drm_connector_set_panel_orientation_with_quirk(connector,
8172                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8173                                                        native_mode->hdisplay,
8174                                                        native_mode->vdisplay);
8175 }
8176
8177 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8178                                               struct edid *edid)
8179 {
8180         struct amdgpu_dm_connector *amdgpu_dm_connector =
8181                         to_amdgpu_dm_connector(connector);
8182
8183         if (edid) {
8184                 /* empty probed_modes */
8185                 INIT_LIST_HEAD(&connector->probed_modes);
8186                 amdgpu_dm_connector->num_modes =
8187                                 drm_add_edid_modes(connector, edid);
8188
8189                 /* sorting the probed modes before calling function
8190                  * amdgpu_dm_get_native_mode() since EDID can have
8191                  * more than one preferred mode. The modes that are
8192                  * later in the probed mode list could be of higher
8193                  * and preferred resolution. For example, 3840x2160
8194                  * resolution in base EDID preferred timing and 4096x2160
8195                  * preferred resolution in DID extension block later.
8196                  */
8197                 drm_mode_sort(&connector->probed_modes);
8198                 amdgpu_dm_get_native_mode(connector);
8199
8200                 /* Freesync capabilities are reset by calling
8201                  * drm_add_edid_modes() and need to be
8202                  * restored here.
8203                  */
8204                 amdgpu_dm_update_freesync_caps(connector, edid);
8205
8206                 amdgpu_set_panel_orientation(connector);
8207         } else {
8208                 amdgpu_dm_connector->num_modes = 0;
8209         }
8210 }
8211
8212 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8213                               struct drm_display_mode *mode)
8214 {
8215         struct drm_display_mode *m;
8216
8217         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8218                 if (drm_mode_equal(m, mode))
8219                         return true;
8220         }
8221
8222         return false;
8223 }
8224
8225 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8226 {
8227         const struct drm_display_mode *m;
8228         struct drm_display_mode *new_mode;
8229         uint i;
8230         uint32_t new_modes_count = 0;
8231
8232         /* Standard FPS values
8233          *
8234          * 23.976       - TV/NTSC
8235          * 24           - Cinema
8236          * 25           - TV/PAL
8237          * 29.97        - TV/NTSC
8238          * 30           - TV/NTSC
8239          * 48           - Cinema HFR
8240          * 50           - TV/PAL
8241          * 60           - Commonly used
8242          * 48,72,96,120 - Multiples of 24
8243          */
8244         static const uint32_t common_rates[] = {
8245                 23976, 24000, 25000, 29970, 30000,
8246                 48000, 50000, 60000, 72000, 96000, 120000
8247         };
8248
8249         /*
8250          * Find mode with highest refresh rate with the same resolution
8251          * as the preferred mode. Some monitors report a preferred mode
8252          * with lower resolution than the highest refresh rate supported.
8253          */
8254
8255         m = get_highest_refresh_rate_mode(aconnector, true);
8256         if (!m)
8257                 return 0;
8258
8259         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8260                 uint64_t target_vtotal, target_vtotal_diff;
8261                 uint64_t num, den;
8262
8263                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8264                         continue;
8265
8266                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8267                     common_rates[i] > aconnector->max_vfreq * 1000)
8268                         continue;
8269
8270                 num = (unsigned long long)m->clock * 1000 * 1000;
8271                 den = common_rates[i] * (unsigned long long)m->htotal;
8272                 target_vtotal = div_u64(num, den);
8273                 target_vtotal_diff = target_vtotal - m->vtotal;
8274
8275                 /* Check for illegal modes */
8276                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8277                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8278                     m->vtotal + target_vtotal_diff < m->vsync_end)
8279                         continue;
8280
8281                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8282                 if (!new_mode)
8283                         goto out;
8284
8285                 new_mode->vtotal += (u16)target_vtotal_diff;
8286                 new_mode->vsync_start += (u16)target_vtotal_diff;
8287                 new_mode->vsync_end += (u16)target_vtotal_diff;
8288                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8289                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8290
8291                 if (!is_duplicate_mode(aconnector, new_mode)) {
8292                         drm_mode_probed_add(&aconnector->base, new_mode);
8293                         new_modes_count += 1;
8294                 } else
8295                         drm_mode_destroy(aconnector->base.dev, new_mode);
8296         }
8297  out:
8298         return new_modes_count;
8299 }
8300
8301 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8302                                                    struct edid *edid)
8303 {
8304         struct amdgpu_dm_connector *amdgpu_dm_connector =
8305                 to_amdgpu_dm_connector(connector);
8306
8307         if (!(amdgpu_freesync_vid_mode && edid))
8308                 return;
8309
8310         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8311                 amdgpu_dm_connector->num_modes +=
8312                         add_fs_modes(amdgpu_dm_connector);
8313 }
8314
8315 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8316 {
8317         struct amdgpu_dm_connector *amdgpu_dm_connector =
8318                         to_amdgpu_dm_connector(connector);
8319         struct drm_encoder *encoder;
8320         struct edid *edid = amdgpu_dm_connector->edid;
8321
8322         encoder = amdgpu_dm_connector_to_encoder(connector);
8323
8324         if (!drm_edid_is_valid(edid)) {
8325                 amdgpu_dm_connector->num_modes =
8326                                 drm_add_modes_noedid(connector, 640, 480);
8327         } else {
8328                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8329                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8330                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8331         }
8332         amdgpu_dm_fbc_init(connector);
8333
8334         return amdgpu_dm_connector->num_modes;
8335 }
8336
8337 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8338                                      struct amdgpu_dm_connector *aconnector,
8339                                      int connector_type,
8340                                      struct dc_link *link,
8341                                      int link_index)
8342 {
8343         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8344
8345         /*
8346          * Some of the properties below require access to state, like bpc.
8347          * Allocate some default initial connector state with our reset helper.
8348          */
8349         if (aconnector->base.funcs->reset)
8350                 aconnector->base.funcs->reset(&aconnector->base);
8351
8352         aconnector->connector_id = link_index;
8353         aconnector->dc_link = link;
8354         aconnector->base.interlace_allowed = false;
8355         aconnector->base.doublescan_allowed = false;
8356         aconnector->base.stereo_allowed = false;
8357         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8358         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8359         aconnector->audio_inst = -1;
8360         mutex_init(&aconnector->hpd_lock);
8361
8362         /*
8363          * configure support HPD hot plug connector_>polled default value is 0
8364          * which means HPD hot plug not supported
8365          */
8366         switch (connector_type) {
8367         case DRM_MODE_CONNECTOR_HDMIA:
8368                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8369                 aconnector->base.ycbcr_420_allowed =
8370                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8371                 break;
8372         case DRM_MODE_CONNECTOR_DisplayPort:
8373                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8374                 link->link_enc = dp_get_link_enc(link);
8375                 ASSERT(link->link_enc);
8376                 if (link->link_enc)
8377                         aconnector->base.ycbcr_420_allowed =
8378                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8379                 break;
8380         case DRM_MODE_CONNECTOR_DVID:
8381                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8382                 break;
8383         default:
8384                 break;
8385         }
8386
8387         drm_object_attach_property(&aconnector->base.base,
8388                                 dm->ddev->mode_config.scaling_mode_property,
8389                                 DRM_MODE_SCALE_NONE);
8390
8391         drm_object_attach_property(&aconnector->base.base,
8392                                 adev->mode_info.underscan_property,
8393                                 UNDERSCAN_OFF);
8394         drm_object_attach_property(&aconnector->base.base,
8395                                 adev->mode_info.underscan_hborder_property,
8396                                 0);
8397         drm_object_attach_property(&aconnector->base.base,
8398                                 adev->mode_info.underscan_vborder_property,
8399                                 0);
8400
8401         if (!aconnector->mst_port)
8402                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8403
8404         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8405         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8406         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8407
8408         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8409             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8410                 drm_object_attach_property(&aconnector->base.base,
8411                                 adev->mode_info.abm_level_property, 0);
8412         }
8413
8414         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8415             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8416             connector_type == DRM_MODE_CONNECTOR_eDP) {
8417                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8418
8419                 if (!aconnector->mst_port)
8420                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8421
8422 #ifdef CONFIG_DRM_AMD_DC_HDCP
8423                 if (adev->dm.hdcp_workqueue)
8424                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8425 #endif
8426         }
8427 }
8428
8429 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8430                               struct i2c_msg *msgs, int num)
8431 {
8432         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8433         struct ddc_service *ddc_service = i2c->ddc_service;
8434         struct i2c_command cmd;
8435         int i;
8436         int result = -EIO;
8437
8438         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8439
8440         if (!cmd.payloads)
8441                 return result;
8442
8443         cmd.number_of_payloads = num;
8444         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8445         cmd.speed = 100;
8446
8447         for (i = 0; i < num; i++) {
8448                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8449                 cmd.payloads[i].address = msgs[i].addr;
8450                 cmd.payloads[i].length = msgs[i].len;
8451                 cmd.payloads[i].data = msgs[i].buf;
8452         }
8453
8454         if (dc_submit_i2c(
8455                         ddc_service->ctx->dc,
8456                         ddc_service->ddc_pin->hw_info.ddc_channel,
8457                         &cmd))
8458                 result = num;
8459
8460         kfree(cmd.payloads);
8461         return result;
8462 }
8463
8464 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8465 {
8466         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8467 }
8468
8469 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8470         .master_xfer = amdgpu_dm_i2c_xfer,
8471         .functionality = amdgpu_dm_i2c_func,
8472 };
8473
8474 static struct amdgpu_i2c_adapter *
8475 create_i2c(struct ddc_service *ddc_service,
8476            int link_index,
8477            int *res)
8478 {
8479         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8480         struct amdgpu_i2c_adapter *i2c;
8481
8482         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8483         if (!i2c)
8484                 return NULL;
8485         i2c->base.owner = THIS_MODULE;
8486         i2c->base.class = I2C_CLASS_DDC;
8487         i2c->base.dev.parent = &adev->pdev->dev;
8488         i2c->base.algo = &amdgpu_dm_i2c_algo;
8489         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8490         i2c_set_adapdata(&i2c->base, i2c);
8491         i2c->ddc_service = ddc_service;
8492         if (i2c->ddc_service->ddc_pin)
8493                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8494
8495         return i2c;
8496 }
8497
8498
8499 /*
8500  * Note: this function assumes that dc_link_detect() was called for the
8501  * dc_link which will be represented by this aconnector.
8502  */
8503 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8504                                     struct amdgpu_dm_connector *aconnector,
8505                                     uint32_t link_index,
8506                                     struct amdgpu_encoder *aencoder)
8507 {
8508         int res = 0;
8509         int connector_type;
8510         struct dc *dc = dm->dc;
8511         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8512         struct amdgpu_i2c_adapter *i2c;
8513
8514         link->priv = aconnector;
8515
8516         DRM_DEBUG_DRIVER("%s()\n", __func__);
8517
8518         i2c = create_i2c(link->ddc, link->link_index, &res);
8519         if (!i2c) {
8520                 DRM_ERROR("Failed to create i2c adapter data\n");
8521                 return -ENOMEM;
8522         }
8523
8524         aconnector->i2c = i2c;
8525         res = i2c_add_adapter(&i2c->base);
8526
8527         if (res) {
8528                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8529                 goto out_free;
8530         }
8531
8532         connector_type = to_drm_connector_type(link->connector_signal);
8533
8534         res = drm_connector_init_with_ddc(
8535                         dm->ddev,
8536                         &aconnector->base,
8537                         &amdgpu_dm_connector_funcs,
8538                         connector_type,
8539                         &i2c->base);
8540
8541         if (res) {
8542                 DRM_ERROR("connector_init failed\n");
8543                 aconnector->connector_id = -1;
8544                 goto out_free;
8545         }
8546
8547         drm_connector_helper_add(
8548                         &aconnector->base,
8549                         &amdgpu_dm_connector_helper_funcs);
8550
8551         amdgpu_dm_connector_init_helper(
8552                 dm,
8553                 aconnector,
8554                 connector_type,
8555                 link,
8556                 link_index);
8557
8558         drm_connector_attach_encoder(
8559                 &aconnector->base, &aencoder->base);
8560
8561         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8562                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8563                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8564
8565 out_free:
8566         if (res) {
8567                 kfree(i2c);
8568                 aconnector->i2c = NULL;
8569         }
8570         return res;
8571 }
8572
8573 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8574 {
8575         switch (adev->mode_info.num_crtc) {
8576         case 1:
8577                 return 0x1;
8578         case 2:
8579                 return 0x3;
8580         case 3:
8581                 return 0x7;
8582         case 4:
8583                 return 0xf;
8584         case 5:
8585                 return 0x1f;
8586         case 6:
8587         default:
8588                 return 0x3f;
8589         }
8590 }
8591
8592 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8593                                   struct amdgpu_encoder *aencoder,
8594                                   uint32_t link_index)
8595 {
8596         struct amdgpu_device *adev = drm_to_adev(dev);
8597
8598         int res = drm_encoder_init(dev,
8599                                    &aencoder->base,
8600                                    &amdgpu_dm_encoder_funcs,
8601                                    DRM_MODE_ENCODER_TMDS,
8602                                    NULL);
8603
8604         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8605
8606         if (!res)
8607                 aencoder->encoder_id = link_index;
8608         else
8609                 aencoder->encoder_id = -1;
8610
8611         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8612
8613         return res;
8614 }
8615
8616 static void manage_dm_interrupts(struct amdgpu_device *adev,
8617                                  struct amdgpu_crtc *acrtc,
8618                                  bool enable)
8619 {
8620         /*
8621          * We have no guarantee that the frontend index maps to the same
8622          * backend index - some even map to more than one.
8623          *
8624          * TODO: Use a different interrupt or check DC itself for the mapping.
8625          */
8626         int irq_type =
8627                 amdgpu_display_crtc_idx_to_irq_type(
8628                         adev,
8629                         acrtc->crtc_id);
8630
8631         if (enable) {
8632                 drm_crtc_vblank_on(&acrtc->base);
8633                 amdgpu_irq_get(
8634                         adev,
8635                         &adev->pageflip_irq,
8636                         irq_type);
8637 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8638                 amdgpu_irq_get(
8639                         adev,
8640                         &adev->vline0_irq,
8641                         irq_type);
8642 #endif
8643         } else {
8644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8645                 amdgpu_irq_put(
8646                         adev,
8647                         &adev->vline0_irq,
8648                         irq_type);
8649 #endif
8650                 amdgpu_irq_put(
8651                         adev,
8652                         &adev->pageflip_irq,
8653                         irq_type);
8654                 drm_crtc_vblank_off(&acrtc->base);
8655         }
8656 }
8657
8658 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8659                                       struct amdgpu_crtc *acrtc)
8660 {
8661         int irq_type =
8662                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8663
8664         /**
8665          * This reads the current state for the IRQ and force reapplies
8666          * the setting to hardware.
8667          */
8668         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8669 }
8670
8671 static bool
8672 is_scaling_state_different(const struct dm_connector_state *dm_state,
8673                            const struct dm_connector_state *old_dm_state)
8674 {
8675         if (dm_state->scaling != old_dm_state->scaling)
8676                 return true;
8677         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8678                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8679                         return true;
8680         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8681                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8682                         return true;
8683         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8684                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8685                 return true;
8686         return false;
8687 }
8688
8689 #ifdef CONFIG_DRM_AMD_DC_HDCP
8690 static bool is_content_protection_different(struct drm_connector_state *state,
8691                                             const struct drm_connector_state *old_state,
8692                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8693 {
8694         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8695         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8696
8697         /* Handle: Type0/1 change */
8698         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8699             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8700                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8701                 return true;
8702         }
8703
8704         /* CP is being re enabled, ignore this
8705          *
8706          * Handles:     ENABLED -> DESIRED
8707          */
8708         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8709             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8710                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8711                 return false;
8712         }
8713
8714         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8715          *
8716          * Handles:     UNDESIRED -> ENABLED
8717          */
8718         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8719             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8720                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8721
8722         /* Stream removed and re-enabled
8723          *
8724          * Can sometimes overlap with the HPD case,
8725          * thus set update_hdcp to false to avoid
8726          * setting HDCP multiple times.
8727          *
8728          * Handles:     DESIRED -> DESIRED (Special case)
8729          */
8730         if (!(old_state->crtc && old_state->crtc->enabled) &&
8731                 state->crtc && state->crtc->enabled &&
8732                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8733                 dm_con_state->update_hdcp = false;
8734                 return true;
8735         }
8736
8737         /* Hot-plug, headless s3, dpms
8738          *
8739          * Only start HDCP if the display is connected/enabled.
8740          * update_hdcp flag will be set to false until the next
8741          * HPD comes in.
8742          *
8743          * Handles:     DESIRED -> DESIRED (Special case)
8744          */
8745         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8746             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8747                 dm_con_state->update_hdcp = false;
8748                 return true;
8749         }
8750
8751         /*
8752          * Handles:     UNDESIRED -> UNDESIRED
8753          *              DESIRED -> DESIRED
8754          *              ENABLED -> ENABLED
8755          */
8756         if (old_state->content_protection == state->content_protection)
8757                 return false;
8758
8759         /*
8760          * Handles:     UNDESIRED -> DESIRED
8761          *              DESIRED -> UNDESIRED
8762          *              ENABLED -> UNDESIRED
8763          */
8764         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8765                 return true;
8766
8767         /*
8768          * Handles:     DESIRED -> ENABLED
8769          */
8770         return false;
8771 }
8772
8773 #endif
8774 static void remove_stream(struct amdgpu_device *adev,
8775                           struct amdgpu_crtc *acrtc,
8776                           struct dc_stream_state *stream)
8777 {
8778         /* this is the update mode case */
8779
8780         acrtc->otg_inst = -1;
8781         acrtc->enabled = false;
8782 }
8783
8784 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8785                                struct dc_cursor_position *position)
8786 {
8787         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8788         int x, y;
8789         int xorigin = 0, yorigin = 0;
8790
8791         if (!crtc || !plane->state->fb)
8792                 return 0;
8793
8794         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8795             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8796                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8797                           __func__,
8798                           plane->state->crtc_w,
8799                           plane->state->crtc_h);
8800                 return -EINVAL;
8801         }
8802
8803         x = plane->state->crtc_x;
8804         y = plane->state->crtc_y;
8805
8806         if (x <= -amdgpu_crtc->max_cursor_width ||
8807             y <= -amdgpu_crtc->max_cursor_height)
8808                 return 0;
8809
8810         if (x < 0) {
8811                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8812                 x = 0;
8813         }
8814         if (y < 0) {
8815                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8816                 y = 0;
8817         }
8818         position->enable = true;
8819         position->translate_by_source = true;
8820         position->x = x;
8821         position->y = y;
8822         position->x_hotspot = xorigin;
8823         position->y_hotspot = yorigin;
8824
8825         return 0;
8826 }
8827
8828 static void handle_cursor_update(struct drm_plane *plane,
8829                                  struct drm_plane_state *old_plane_state)
8830 {
8831         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8832         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8833         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8834         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8835         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8836         uint64_t address = afb ? afb->address : 0;
8837         struct dc_cursor_position position = {0};
8838         struct dc_cursor_attributes attributes;
8839         int ret;
8840
8841         if (!plane->state->fb && !old_plane_state->fb)
8842                 return;
8843
8844         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8845                       __func__,
8846                       amdgpu_crtc->crtc_id,
8847                       plane->state->crtc_w,
8848                       plane->state->crtc_h);
8849
8850         ret = get_cursor_position(plane, crtc, &position);
8851         if (ret)
8852                 return;
8853
8854         if (!position.enable) {
8855                 /* turn off cursor */
8856                 if (crtc_state && crtc_state->stream) {
8857                         mutex_lock(&adev->dm.dc_lock);
8858                         dc_stream_set_cursor_position(crtc_state->stream,
8859                                                       &position);
8860                         mutex_unlock(&adev->dm.dc_lock);
8861                 }
8862                 return;
8863         }
8864
8865         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8866         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8867
8868         memset(&attributes, 0, sizeof(attributes));
8869         attributes.address.high_part = upper_32_bits(address);
8870         attributes.address.low_part  = lower_32_bits(address);
8871         attributes.width             = plane->state->crtc_w;
8872         attributes.height            = plane->state->crtc_h;
8873         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8874         attributes.rotation_angle    = 0;
8875         attributes.attribute_flags.value = 0;
8876
8877         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8878
8879         if (crtc_state->stream) {
8880                 mutex_lock(&adev->dm.dc_lock);
8881                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8882                                                          &attributes))
8883                         DRM_ERROR("DC failed to set cursor attributes\n");
8884
8885                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8886                                                    &position))
8887                         DRM_ERROR("DC failed to set cursor position\n");
8888                 mutex_unlock(&adev->dm.dc_lock);
8889         }
8890 }
8891
8892 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8893 {
8894
8895         assert_spin_locked(&acrtc->base.dev->event_lock);
8896         WARN_ON(acrtc->event);
8897
8898         acrtc->event = acrtc->base.state->event;
8899
8900         /* Set the flip status */
8901         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8902
8903         /* Mark this event as consumed */
8904         acrtc->base.state->event = NULL;
8905
8906         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8907                      acrtc->crtc_id);
8908 }
8909
8910 static void update_freesync_state_on_stream(
8911         struct amdgpu_display_manager *dm,
8912         struct dm_crtc_state *new_crtc_state,
8913         struct dc_stream_state *new_stream,
8914         struct dc_plane_state *surface,
8915         u32 flip_timestamp_in_us)
8916 {
8917         struct mod_vrr_params vrr_params;
8918         struct dc_info_packet vrr_infopacket = {0};
8919         struct amdgpu_device *adev = dm->adev;
8920         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8921         unsigned long flags;
8922         bool pack_sdp_v1_3 = false;
8923
8924         if (!new_stream)
8925                 return;
8926
8927         /*
8928          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8929          * For now it's sufficient to just guard against these conditions.
8930          */
8931
8932         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8933                 return;
8934
8935         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8936         vrr_params = acrtc->dm_irq_params.vrr_params;
8937
8938         if (surface) {
8939                 mod_freesync_handle_preflip(
8940                         dm->freesync_module,
8941                         surface,
8942                         new_stream,
8943                         flip_timestamp_in_us,
8944                         &vrr_params);
8945
8946                 if (adev->family < AMDGPU_FAMILY_AI &&
8947                     amdgpu_dm_vrr_active(new_crtc_state)) {
8948                         mod_freesync_handle_v_update(dm->freesync_module,
8949                                                      new_stream, &vrr_params);
8950
8951                         /* Need to call this before the frame ends. */
8952                         dc_stream_adjust_vmin_vmax(dm->dc,
8953                                                    new_crtc_state->stream,
8954                                                    &vrr_params.adjust);
8955                 }
8956         }
8957
8958         mod_freesync_build_vrr_infopacket(
8959                 dm->freesync_module,
8960                 new_stream,
8961                 &vrr_params,
8962                 PACKET_TYPE_VRR,
8963                 TRANSFER_FUNC_UNKNOWN,
8964                 &vrr_infopacket,
8965                 pack_sdp_v1_3);
8966
8967         new_crtc_state->freesync_timing_changed |=
8968                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8969                         &vrr_params.adjust,
8970                         sizeof(vrr_params.adjust)) != 0);
8971
8972         new_crtc_state->freesync_vrr_info_changed |=
8973                 (memcmp(&new_crtc_state->vrr_infopacket,
8974                         &vrr_infopacket,
8975                         sizeof(vrr_infopacket)) != 0);
8976
8977         acrtc->dm_irq_params.vrr_params = vrr_params;
8978         new_crtc_state->vrr_infopacket = vrr_infopacket;
8979
8980         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8981         new_stream->vrr_infopacket = vrr_infopacket;
8982
8983         if (new_crtc_state->freesync_vrr_info_changed)
8984                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8985                               new_crtc_state->base.crtc->base.id,
8986                               (int)new_crtc_state->base.vrr_enabled,
8987                               (int)vrr_params.state);
8988
8989         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8990 }
8991
8992 static void update_stream_irq_parameters(
8993         struct amdgpu_display_manager *dm,
8994         struct dm_crtc_state *new_crtc_state)
8995 {
8996         struct dc_stream_state *new_stream = new_crtc_state->stream;
8997         struct mod_vrr_params vrr_params;
8998         struct mod_freesync_config config = new_crtc_state->freesync_config;
8999         struct amdgpu_device *adev = dm->adev;
9000         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9001         unsigned long flags;
9002
9003         if (!new_stream)
9004                 return;
9005
9006         /*
9007          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9008          * For now it's sufficient to just guard against these conditions.
9009          */
9010         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9011                 return;
9012
9013         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9014         vrr_params = acrtc->dm_irq_params.vrr_params;
9015
9016         if (new_crtc_state->vrr_supported &&
9017             config.min_refresh_in_uhz &&
9018             config.max_refresh_in_uhz) {
9019                 /*
9020                  * if freesync compatible mode was set, config.state will be set
9021                  * in atomic check
9022                  */
9023                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9024                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9025                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9026                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9027                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9028                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9029                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9030                 } else {
9031                         config.state = new_crtc_state->base.vrr_enabled ?
9032                                                      VRR_STATE_ACTIVE_VARIABLE :
9033                                                      VRR_STATE_INACTIVE;
9034                 }
9035         } else {
9036                 config.state = VRR_STATE_UNSUPPORTED;
9037         }
9038
9039         mod_freesync_build_vrr_params(dm->freesync_module,
9040                                       new_stream,
9041                                       &config, &vrr_params);
9042
9043         new_crtc_state->freesync_timing_changed |=
9044                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9045                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9046
9047         new_crtc_state->freesync_config = config;
9048         /* Copy state for access from DM IRQ handler */
9049         acrtc->dm_irq_params.freesync_config = config;
9050         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9051         acrtc->dm_irq_params.vrr_params = vrr_params;
9052         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9053 }
9054
9055 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9056                                             struct dm_crtc_state *new_state)
9057 {
9058         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9059         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9060
9061         if (!old_vrr_active && new_vrr_active) {
9062                 /* Transition VRR inactive -> active:
9063                  * While VRR is active, we must not disable vblank irq, as a
9064                  * reenable after disable would compute bogus vblank/pflip
9065                  * timestamps if it likely happened inside display front-porch.
9066                  *
9067                  * We also need vupdate irq for the actual core vblank handling
9068                  * at end of vblank.
9069                  */
9070                 dm_set_vupdate_irq(new_state->base.crtc, true);
9071                 drm_crtc_vblank_get(new_state->base.crtc);
9072                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9073                                  __func__, new_state->base.crtc->base.id);
9074         } else if (old_vrr_active && !new_vrr_active) {
9075                 /* Transition VRR active -> inactive:
9076                  * Allow vblank irq disable again for fixed refresh rate.
9077                  */
9078                 dm_set_vupdate_irq(new_state->base.crtc, false);
9079                 drm_crtc_vblank_put(new_state->base.crtc);
9080                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9081                                  __func__, new_state->base.crtc->base.id);
9082         }
9083 }
9084
9085 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9086 {
9087         struct drm_plane *plane;
9088         struct drm_plane_state *old_plane_state;
9089         int i;
9090
9091         /*
9092          * TODO: Make this per-stream so we don't issue redundant updates for
9093          * commits with multiple streams.
9094          */
9095         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9096                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9097                         handle_cursor_update(plane, old_plane_state);
9098 }
9099
9100 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9101                                     struct dc_state *dc_state,
9102                                     struct drm_device *dev,
9103                                     struct amdgpu_display_manager *dm,
9104                                     struct drm_crtc *pcrtc,
9105                                     bool wait_for_vblank)
9106 {
9107         uint32_t i;
9108         uint64_t timestamp_ns;
9109         struct drm_plane *plane;
9110         struct drm_plane_state *old_plane_state, *new_plane_state;
9111         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9112         struct drm_crtc_state *new_pcrtc_state =
9113                         drm_atomic_get_new_crtc_state(state, pcrtc);
9114         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9115         struct dm_crtc_state *dm_old_crtc_state =
9116                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9117         int planes_count = 0, vpos, hpos;
9118         long r;
9119         unsigned long flags;
9120         struct amdgpu_bo *abo;
9121         uint32_t target_vblank, last_flip_vblank;
9122         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9123         bool pflip_present = false;
9124         struct {
9125                 struct dc_surface_update surface_updates[MAX_SURFACES];
9126                 struct dc_plane_info plane_infos[MAX_SURFACES];
9127                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9128                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9129                 struct dc_stream_update stream_update;
9130         } *bundle;
9131
9132         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9133
9134         if (!bundle) {
9135                 dm_error("Failed to allocate update bundle\n");
9136                 goto cleanup;
9137         }
9138
9139         /*
9140          * Disable the cursor first if we're disabling all the planes.
9141          * It'll remain on the screen after the planes are re-enabled
9142          * if we don't.
9143          */
9144         if (acrtc_state->active_planes == 0)
9145                 amdgpu_dm_commit_cursors(state);
9146
9147         /* update planes when needed */
9148         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9149                 struct drm_crtc *crtc = new_plane_state->crtc;
9150                 struct drm_crtc_state *new_crtc_state;
9151                 struct drm_framebuffer *fb = new_plane_state->fb;
9152                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9153                 bool plane_needs_flip;
9154                 struct dc_plane_state *dc_plane;
9155                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9156
9157                 /* Cursor plane is handled after stream updates */
9158                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9159                         continue;
9160
9161                 if (!fb || !crtc || pcrtc != crtc)
9162                         continue;
9163
9164                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9165                 if (!new_crtc_state->active)
9166                         continue;
9167
9168                 dc_plane = dm_new_plane_state->dc_state;
9169
9170                 bundle->surface_updates[planes_count].surface = dc_plane;
9171                 if (new_pcrtc_state->color_mgmt_changed) {
9172                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9173                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9174                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9175                 }
9176
9177                 fill_dc_scaling_info(dm->adev, new_plane_state,
9178                                      &bundle->scaling_infos[planes_count]);
9179
9180                 bundle->surface_updates[planes_count].scaling_info =
9181                         &bundle->scaling_infos[planes_count];
9182
9183                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9184
9185                 pflip_present = pflip_present || plane_needs_flip;
9186
9187                 if (!plane_needs_flip) {
9188                         planes_count += 1;
9189                         continue;
9190                 }
9191
9192                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9193
9194                 /*
9195                  * Wait for all fences on this FB. Do limited wait to avoid
9196                  * deadlock during GPU reset when this fence will not signal
9197                  * but we hold reservation lock for the BO.
9198                  */
9199                 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9200                                           msecs_to_jiffies(5000));
9201                 if (unlikely(r <= 0))
9202                         DRM_ERROR("Waiting for fences timed out!");
9203
9204                 fill_dc_plane_info_and_addr(
9205                         dm->adev, new_plane_state,
9206                         afb->tiling_flags,
9207                         &bundle->plane_infos[planes_count],
9208                         &bundle->flip_addrs[planes_count].address,
9209                         afb->tmz_surface, false);
9210
9211                 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9212                                  new_plane_state->plane->index,
9213                                  bundle->plane_infos[planes_count].dcc.enable);
9214
9215                 bundle->surface_updates[planes_count].plane_info =
9216                         &bundle->plane_infos[planes_count];
9217
9218                 /*
9219                  * Only allow immediate flips for fast updates that don't
9220                  * change FB pitch, DCC state, rotation or mirroing.
9221                  */
9222                 bundle->flip_addrs[planes_count].flip_immediate =
9223                         crtc->state->async_flip &&
9224                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9225
9226                 timestamp_ns = ktime_get_ns();
9227                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9228                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9229                 bundle->surface_updates[planes_count].surface = dc_plane;
9230
9231                 if (!bundle->surface_updates[planes_count].surface) {
9232                         DRM_ERROR("No surface for CRTC: id=%d\n",
9233                                         acrtc_attach->crtc_id);
9234                         continue;
9235                 }
9236
9237                 if (plane == pcrtc->primary)
9238                         update_freesync_state_on_stream(
9239                                 dm,
9240                                 acrtc_state,
9241                                 acrtc_state->stream,
9242                                 dc_plane,
9243                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9244
9245                 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9246                                  __func__,
9247                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9248                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9249
9250                 planes_count += 1;
9251
9252         }
9253
9254         if (pflip_present) {
9255                 if (!vrr_active) {
9256                         /* Use old throttling in non-vrr fixed refresh rate mode
9257                          * to keep flip scheduling based on target vblank counts
9258                          * working in a backwards compatible way, e.g., for
9259                          * clients using the GLX_OML_sync_control extension or
9260                          * DRI3/Present extension with defined target_msc.
9261                          */
9262                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9263                 }
9264                 else {
9265                         /* For variable refresh rate mode only:
9266                          * Get vblank of last completed flip to avoid > 1 vrr
9267                          * flips per video frame by use of throttling, but allow
9268                          * flip programming anywhere in the possibly large
9269                          * variable vrr vblank interval for fine-grained flip
9270                          * timing control and more opportunity to avoid stutter
9271                          * on late submission of flips.
9272                          */
9273                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9274                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9275                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9276                 }
9277
9278                 target_vblank = last_flip_vblank + wait_for_vblank;
9279
9280                 /*
9281                  * Wait until we're out of the vertical blank period before the one
9282                  * targeted by the flip
9283                  */
9284                 while ((acrtc_attach->enabled &&
9285                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9286                                                             0, &vpos, &hpos, NULL,
9287                                                             NULL, &pcrtc->hwmode)
9288                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9289                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9290                         (int)(target_vblank -
9291                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9292                         usleep_range(1000, 1100);
9293                 }
9294
9295                 /**
9296                  * Prepare the flip event for the pageflip interrupt to handle.
9297                  *
9298                  * This only works in the case where we've already turned on the
9299                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9300                  * from 0 -> n planes we have to skip a hardware generated event
9301                  * and rely on sending it from software.
9302                  */
9303                 if (acrtc_attach->base.state->event &&
9304                     acrtc_state->active_planes > 0 &&
9305                     !acrtc_state->force_dpms_off) {
9306                         drm_crtc_vblank_get(pcrtc);
9307
9308                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9309
9310                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9311                         prepare_flip_isr(acrtc_attach);
9312
9313                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9314                 }
9315
9316                 if (acrtc_state->stream) {
9317                         if (acrtc_state->freesync_vrr_info_changed)
9318                                 bundle->stream_update.vrr_infopacket =
9319                                         &acrtc_state->stream->vrr_infopacket;
9320                 }
9321         }
9322
9323         /* Update the planes if changed or disable if we don't have any. */
9324         if ((planes_count || acrtc_state->active_planes == 0) &&
9325                 acrtc_state->stream) {
9326 #if defined(CONFIG_DRM_AMD_DC_DCN)
9327                 /*
9328                  * If PSR or idle optimizations are enabled then flush out
9329                  * any pending work before hardware programming.
9330                  */
9331                 if (dm->vblank_control_workqueue)
9332                         flush_workqueue(dm->vblank_control_workqueue);
9333 #endif
9334
9335                 bundle->stream_update.stream = acrtc_state->stream;
9336                 if (new_pcrtc_state->mode_changed) {
9337                         bundle->stream_update.src = acrtc_state->stream->src;
9338                         bundle->stream_update.dst = acrtc_state->stream->dst;
9339                 }
9340
9341                 if (new_pcrtc_state->color_mgmt_changed) {
9342                         /*
9343                          * TODO: This isn't fully correct since we've actually
9344                          * already modified the stream in place.
9345                          */
9346                         bundle->stream_update.gamut_remap =
9347                                 &acrtc_state->stream->gamut_remap_matrix;
9348                         bundle->stream_update.output_csc_transform =
9349                                 &acrtc_state->stream->csc_color_matrix;
9350                         bundle->stream_update.out_transfer_func =
9351                                 acrtc_state->stream->out_transfer_func;
9352                 }
9353
9354                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9355                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9356                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9357
9358                 /*
9359                  * If FreeSync state on the stream has changed then we need to
9360                  * re-adjust the min/max bounds now that DC doesn't handle this
9361                  * as part of commit.
9362                  */
9363                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9364                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9365                         dc_stream_adjust_vmin_vmax(
9366                                 dm->dc, acrtc_state->stream,
9367                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9368                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9369                 }
9370                 mutex_lock(&dm->dc_lock);
9371                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9372                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9373                         amdgpu_dm_psr_disable(acrtc_state->stream);
9374
9375                 dc_commit_updates_for_stream(dm->dc,
9376                                                      bundle->surface_updates,
9377                                                      planes_count,
9378                                                      acrtc_state->stream,
9379                                                      &bundle->stream_update,
9380                                                      dc_state);
9381
9382                 /**
9383                  * Enable or disable the interrupts on the backend.
9384                  *
9385                  * Most pipes are put into power gating when unused.
9386                  *
9387                  * When power gating is enabled on a pipe we lose the
9388                  * interrupt enablement state when power gating is disabled.
9389                  *
9390                  * So we need to update the IRQ control state in hardware
9391                  * whenever the pipe turns on (since it could be previously
9392                  * power gated) or off (since some pipes can't be power gated
9393                  * on some ASICs).
9394                  */
9395                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9396                         dm_update_pflip_irq_state(drm_to_adev(dev),
9397                                                   acrtc_attach);
9398
9399                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9400                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9401                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9402                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9403
9404                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9405                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9406                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9407                         struct amdgpu_dm_connector *aconn =
9408                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9409
9410                         if (aconn->psr_skip_count > 0)
9411                                 aconn->psr_skip_count--;
9412
9413                         /* Allow PSR when skip count is 0. */
9414                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9415                 } else {
9416                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9417                 }
9418
9419                 mutex_unlock(&dm->dc_lock);
9420         }
9421
9422         /*
9423          * Update cursor state *after* programming all the planes.
9424          * This avoids redundant programming in the case where we're going
9425          * to be disabling a single plane - those pipes are being disabled.
9426          */
9427         if (acrtc_state->active_planes)
9428                 amdgpu_dm_commit_cursors(state);
9429
9430 cleanup:
9431         kfree(bundle);
9432 }
9433
9434 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9435                                    struct drm_atomic_state *state)
9436 {
9437         struct amdgpu_device *adev = drm_to_adev(dev);
9438         struct amdgpu_dm_connector *aconnector;
9439         struct drm_connector *connector;
9440         struct drm_connector_state *old_con_state, *new_con_state;
9441         struct drm_crtc_state *new_crtc_state;
9442         struct dm_crtc_state *new_dm_crtc_state;
9443         const struct dc_stream_status *status;
9444         int i, inst;
9445
9446         /* Notify device removals. */
9447         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9448                 if (old_con_state->crtc != new_con_state->crtc) {
9449                         /* CRTC changes require notification. */
9450                         goto notify;
9451                 }
9452
9453                 if (!new_con_state->crtc)
9454                         continue;
9455
9456                 new_crtc_state = drm_atomic_get_new_crtc_state(
9457                         state, new_con_state->crtc);
9458
9459                 if (!new_crtc_state)
9460                         continue;
9461
9462                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9463                         continue;
9464
9465         notify:
9466                 aconnector = to_amdgpu_dm_connector(connector);
9467
9468                 mutex_lock(&adev->dm.audio_lock);
9469                 inst = aconnector->audio_inst;
9470                 aconnector->audio_inst = -1;
9471                 mutex_unlock(&adev->dm.audio_lock);
9472
9473                 amdgpu_dm_audio_eld_notify(adev, inst);
9474         }
9475
9476         /* Notify audio device additions. */
9477         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9478                 if (!new_con_state->crtc)
9479                         continue;
9480
9481                 new_crtc_state = drm_atomic_get_new_crtc_state(
9482                         state, new_con_state->crtc);
9483
9484                 if (!new_crtc_state)
9485                         continue;
9486
9487                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9488                         continue;
9489
9490                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9491                 if (!new_dm_crtc_state->stream)
9492                         continue;
9493
9494                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9495                 if (!status)
9496                         continue;
9497
9498                 aconnector = to_amdgpu_dm_connector(connector);
9499
9500                 mutex_lock(&adev->dm.audio_lock);
9501                 inst = status->audio_inst;
9502                 aconnector->audio_inst = inst;
9503                 mutex_unlock(&adev->dm.audio_lock);
9504
9505                 amdgpu_dm_audio_eld_notify(adev, inst);
9506         }
9507 }
9508
9509 /*
9510  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9511  * @crtc_state: the DRM CRTC state
9512  * @stream_state: the DC stream state.
9513  *
9514  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9515  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9516  */
9517 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9518                                                 struct dc_stream_state *stream_state)
9519 {
9520         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9521 }
9522
9523 /**
9524  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9525  * @state: The atomic state to commit
9526  *
9527  * This will tell DC to commit the constructed DC state from atomic_check,
9528  * programming the hardware. Any failures here implies a hardware failure, since
9529  * atomic check should have filtered anything non-kosher.
9530  */
9531 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9532 {
9533         struct drm_device *dev = state->dev;
9534         struct amdgpu_device *adev = drm_to_adev(dev);
9535         struct amdgpu_display_manager *dm = &adev->dm;
9536         struct dm_atomic_state *dm_state;
9537         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9538         uint32_t i, j;
9539         struct drm_crtc *crtc;
9540         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9541         unsigned long flags;
9542         bool wait_for_vblank = true;
9543         struct drm_connector *connector;
9544         struct drm_connector_state *old_con_state, *new_con_state;
9545         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9546         int crtc_disable_count = 0;
9547         bool mode_set_reset_required = false;
9548
9549         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9550
9551         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9552
9553         dm_state = dm_atomic_get_new_state(state);
9554         if (dm_state && dm_state->context) {
9555                 dc_state = dm_state->context;
9556         } else {
9557                 /* No state changes, retain current state. */
9558                 dc_state_temp = dc_create_state(dm->dc);
9559                 ASSERT(dc_state_temp);
9560                 dc_state = dc_state_temp;
9561                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9562         }
9563
9564         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9565                                        new_crtc_state, i) {
9566                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9567
9568                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9569
9570                 if (old_crtc_state->active &&
9571                     (!new_crtc_state->active ||
9572                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9573                         manage_dm_interrupts(adev, acrtc, false);
9574                         dc_stream_release(dm_old_crtc_state->stream);
9575                 }
9576         }
9577
9578         drm_atomic_helper_calc_timestamping_constants(state);
9579
9580         /* update changed items */
9581         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9582                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9583
9584                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9585                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9586
9587                 DRM_DEBUG_ATOMIC(
9588                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9589                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9590                         "connectors_changed:%d\n",
9591                         acrtc->crtc_id,
9592                         new_crtc_state->enable,
9593                         new_crtc_state->active,
9594                         new_crtc_state->planes_changed,
9595                         new_crtc_state->mode_changed,
9596                         new_crtc_state->active_changed,
9597                         new_crtc_state->connectors_changed);
9598
9599                 /* Disable cursor if disabling crtc */
9600                 if (old_crtc_state->active && !new_crtc_state->active) {
9601                         struct dc_cursor_position position;
9602
9603                         memset(&position, 0, sizeof(position));
9604                         mutex_lock(&dm->dc_lock);
9605                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9606                         mutex_unlock(&dm->dc_lock);
9607                 }
9608
9609                 /* Copy all transient state flags into dc state */
9610                 if (dm_new_crtc_state->stream) {
9611                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9612                                                             dm_new_crtc_state->stream);
9613                 }
9614
9615                 /* handles headless hotplug case, updating new_state and
9616                  * aconnector as needed
9617                  */
9618
9619                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9620
9621                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9622
9623                         if (!dm_new_crtc_state->stream) {
9624                                 /*
9625                                  * this could happen because of issues with
9626                                  * userspace notifications delivery.
9627                                  * In this case userspace tries to set mode on
9628                                  * display which is disconnected in fact.
9629                                  * dc_sink is NULL in this case on aconnector.
9630                                  * We expect reset mode will come soon.
9631                                  *
9632                                  * This can also happen when unplug is done
9633                                  * during resume sequence ended
9634                                  *
9635                                  * In this case, we want to pretend we still
9636                                  * have a sink to keep the pipe running so that
9637                                  * hw state is consistent with the sw state
9638                                  */
9639                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9640                                                 __func__, acrtc->base.base.id);
9641                                 continue;
9642                         }
9643
9644                         if (dm_old_crtc_state->stream)
9645                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9646
9647                         pm_runtime_get_noresume(dev->dev);
9648
9649                         acrtc->enabled = true;
9650                         acrtc->hw_mode = new_crtc_state->mode;
9651                         crtc->hwmode = new_crtc_state->mode;
9652                         mode_set_reset_required = true;
9653                 } else if (modereset_required(new_crtc_state)) {
9654                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9655                         /* i.e. reset mode */
9656                         if (dm_old_crtc_state->stream)
9657                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9658
9659                         mode_set_reset_required = true;
9660                 }
9661         } /* for_each_crtc_in_state() */
9662
9663         if (dc_state) {
9664                 /* if there mode set or reset, disable eDP PSR */
9665                 if (mode_set_reset_required) {
9666 #if defined(CONFIG_DRM_AMD_DC_DCN)
9667                         if (dm->vblank_control_workqueue)
9668                                 flush_workqueue(dm->vblank_control_workqueue);
9669 #endif
9670                         amdgpu_dm_psr_disable_all(dm);
9671                 }
9672
9673                 dm_enable_per_frame_crtc_master_sync(dc_state);
9674                 mutex_lock(&dm->dc_lock);
9675                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9676 #if defined(CONFIG_DRM_AMD_DC_DCN)
9677                /* Allow idle optimization when vblank count is 0 for display off */
9678                if (dm->active_vblank_irq_count == 0)
9679                    dc_allow_idle_optimizations(dm->dc,true);
9680 #endif
9681                 mutex_unlock(&dm->dc_lock);
9682         }
9683
9684         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9685                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9686
9687                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9688
9689                 if (dm_new_crtc_state->stream != NULL) {
9690                         const struct dc_stream_status *status =
9691                                         dc_stream_get_status(dm_new_crtc_state->stream);
9692
9693                         if (!status)
9694                                 status = dc_stream_get_status_from_state(dc_state,
9695                                                                          dm_new_crtc_state->stream);
9696                         if (!status)
9697                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9698                         else
9699                                 acrtc->otg_inst = status->primary_otg_inst;
9700                 }
9701         }
9702 #ifdef CONFIG_DRM_AMD_DC_HDCP
9703         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9704                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9705                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9706                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9707
9708                 new_crtc_state = NULL;
9709
9710                 if (acrtc)
9711                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9712
9713                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9714
9715                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9716                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9717                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9718                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9719                         dm_new_con_state->update_hdcp = true;
9720                         continue;
9721                 }
9722
9723                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9724                         hdcp_update_display(
9725                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9726                                 new_con_state->hdcp_content_type,
9727                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9728         }
9729 #endif
9730
9731         /* Handle connector state changes */
9732         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9733                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9734                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9735                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9736                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9737                 struct dc_stream_update stream_update;
9738                 struct dc_info_packet hdr_packet;
9739                 struct dc_stream_status *status = NULL;
9740                 bool abm_changed, hdr_changed, scaling_changed;
9741
9742                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9743                 memset(&stream_update, 0, sizeof(stream_update));
9744
9745                 if (acrtc) {
9746                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9747                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9748                 }
9749
9750                 /* Skip any modesets/resets */
9751                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9752                         continue;
9753
9754                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9755                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9756
9757                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9758                                                              dm_old_con_state);
9759
9760                 abm_changed = dm_new_crtc_state->abm_level !=
9761                               dm_old_crtc_state->abm_level;
9762
9763                 hdr_changed =
9764                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9765
9766                 if (!scaling_changed && !abm_changed && !hdr_changed)
9767                         continue;
9768
9769                 stream_update.stream = dm_new_crtc_state->stream;
9770                 if (scaling_changed) {
9771                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9772                                         dm_new_con_state, dm_new_crtc_state->stream);
9773
9774                         stream_update.src = dm_new_crtc_state->stream->src;
9775                         stream_update.dst = dm_new_crtc_state->stream->dst;
9776                 }
9777
9778                 if (abm_changed) {
9779                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9780
9781                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9782                 }
9783
9784                 if (hdr_changed) {
9785                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9786                         stream_update.hdr_static_metadata = &hdr_packet;
9787                 }
9788
9789                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9790
9791                 if (WARN_ON(!status))
9792                         continue;
9793
9794                 WARN_ON(!status->plane_count);
9795
9796                 /*
9797                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9798                  * Here we create an empty update on each plane.
9799                  * To fix this, DC should permit updating only stream properties.
9800                  */
9801                 for (j = 0; j < status->plane_count; j++)
9802                         dummy_updates[j].surface = status->plane_states[0];
9803
9804
9805                 mutex_lock(&dm->dc_lock);
9806                 dc_commit_updates_for_stream(dm->dc,
9807                                                      dummy_updates,
9808                                                      status->plane_count,
9809                                                      dm_new_crtc_state->stream,
9810                                                      &stream_update,
9811                                                      dc_state);
9812                 mutex_unlock(&dm->dc_lock);
9813         }
9814
9815         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9816         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9817                                       new_crtc_state, i) {
9818                 if (old_crtc_state->active && !new_crtc_state->active)
9819                         crtc_disable_count++;
9820
9821                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9822                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9823
9824                 /* For freesync config update on crtc state and params for irq */
9825                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9826
9827                 /* Handle vrr on->off / off->on transitions */
9828                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9829                                                 dm_new_crtc_state);
9830         }
9831
9832         /**
9833          * Enable interrupts for CRTCs that are newly enabled or went through
9834          * a modeset. It was intentionally deferred until after the front end
9835          * state was modified to wait until the OTG was on and so the IRQ
9836          * handlers didn't access stale or invalid state.
9837          */
9838         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9839                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9840 #ifdef CONFIG_DEBUG_FS
9841                 bool configure_crc = false;
9842                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9843 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9844                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9845 #endif
9846                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9847                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9848                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9849 #endif
9850                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9851
9852                 if (new_crtc_state->active &&
9853                     (!old_crtc_state->active ||
9854                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9855                         dc_stream_retain(dm_new_crtc_state->stream);
9856                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9857                         manage_dm_interrupts(adev, acrtc, true);
9858
9859 #ifdef CONFIG_DEBUG_FS
9860                         /**
9861                          * Frontend may have changed so reapply the CRC capture
9862                          * settings for the stream.
9863                          */
9864                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9865
9866                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9867                                 configure_crc = true;
9868 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9869                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9870                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9871                                         acrtc->dm_irq_params.crc_window.update_win = true;
9872                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9873                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9874                                         crc_rd_wrk->crtc = crtc;
9875                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9876                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9877                                 }
9878 #endif
9879                         }
9880
9881                         if (configure_crc)
9882                                 if (amdgpu_dm_crtc_configure_crc_source(
9883                                         crtc, dm_new_crtc_state, cur_crc_src))
9884                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9885 #endif
9886                 }
9887         }
9888
9889         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9890                 if (new_crtc_state->async_flip)
9891                         wait_for_vblank = false;
9892
9893         /* update planes when needed per crtc*/
9894         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9895                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9896
9897                 if (dm_new_crtc_state->stream)
9898                         amdgpu_dm_commit_planes(state, dc_state, dev,
9899                                                 dm, crtc, wait_for_vblank);
9900         }
9901
9902         /* Update audio instances for each connector. */
9903         amdgpu_dm_commit_audio(dev, state);
9904
9905 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9906         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9907         /* restore the backlight level */
9908         for (i = 0; i < dm->num_of_edps; i++) {
9909                 if (dm->backlight_dev[i] &&
9910                     (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9911                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9912         }
9913 #endif
9914         /*
9915          * send vblank event on all events not handled in flip and
9916          * mark consumed event for drm_atomic_helper_commit_hw_done
9917          */
9918         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9919         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9920
9921                 if (new_crtc_state->event)
9922                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9923
9924                 new_crtc_state->event = NULL;
9925         }
9926         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9927
9928         /* Signal HW programming completion */
9929         drm_atomic_helper_commit_hw_done(state);
9930
9931         if (wait_for_vblank)
9932                 drm_atomic_helper_wait_for_flip_done(dev, state);
9933
9934         drm_atomic_helper_cleanup_planes(dev, state);
9935
9936         /* return the stolen vga memory back to VRAM */
9937         if (!adev->mman.keep_stolen_vga_memory)
9938                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9939         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9940
9941         /*
9942          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9943          * so we can put the GPU into runtime suspend if we're not driving any
9944          * displays anymore
9945          */
9946         for (i = 0; i < crtc_disable_count; i++)
9947                 pm_runtime_put_autosuspend(dev->dev);
9948         pm_runtime_mark_last_busy(dev->dev);
9949
9950         if (dc_state_temp)
9951                 dc_release_state(dc_state_temp);
9952 }
9953
9954
9955 static int dm_force_atomic_commit(struct drm_connector *connector)
9956 {
9957         int ret = 0;
9958         struct drm_device *ddev = connector->dev;
9959         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9960         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9961         struct drm_plane *plane = disconnected_acrtc->base.primary;
9962         struct drm_connector_state *conn_state;
9963         struct drm_crtc_state *crtc_state;
9964         struct drm_plane_state *plane_state;
9965
9966         if (!state)
9967                 return -ENOMEM;
9968
9969         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9970
9971         /* Construct an atomic state to restore previous display setting */
9972
9973         /*
9974          * Attach connectors to drm_atomic_state
9975          */
9976         conn_state = drm_atomic_get_connector_state(state, connector);
9977
9978         ret = PTR_ERR_OR_ZERO(conn_state);
9979         if (ret)
9980                 goto out;
9981
9982         /* Attach crtc to drm_atomic_state*/
9983         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9984
9985         ret = PTR_ERR_OR_ZERO(crtc_state);
9986         if (ret)
9987                 goto out;
9988
9989         /* force a restore */
9990         crtc_state->mode_changed = true;
9991
9992         /* Attach plane to drm_atomic_state */
9993         plane_state = drm_atomic_get_plane_state(state, plane);
9994
9995         ret = PTR_ERR_OR_ZERO(plane_state);
9996         if (ret)
9997                 goto out;
9998
9999         /* Call commit internally with the state we just constructed */
10000         ret = drm_atomic_commit(state);
10001
10002 out:
10003         drm_atomic_state_put(state);
10004         if (ret)
10005                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10006
10007         return ret;
10008 }
10009
10010 /*
10011  * This function handles all cases when set mode does not come upon hotplug.
10012  * This includes when a display is unplugged then plugged back into the
10013  * same port and when running without usermode desktop manager supprot
10014  */
10015 void dm_restore_drm_connector_state(struct drm_device *dev,
10016                                     struct drm_connector *connector)
10017 {
10018         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10019         struct amdgpu_crtc *disconnected_acrtc;
10020         struct dm_crtc_state *acrtc_state;
10021
10022         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10023                 return;
10024
10025         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10026         if (!disconnected_acrtc)
10027                 return;
10028
10029         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10030         if (!acrtc_state->stream)
10031                 return;
10032
10033         /*
10034          * If the previous sink is not released and different from the current,
10035          * we deduce we are in a state where we can not rely on usermode call
10036          * to turn on the display, so we do it here
10037          */
10038         if (acrtc_state->stream->sink != aconnector->dc_sink)
10039                 dm_force_atomic_commit(&aconnector->base);
10040 }
10041
10042 /*
10043  * Grabs all modesetting locks to serialize against any blocking commits,
10044  * Waits for completion of all non blocking commits.
10045  */
10046 static int do_aquire_global_lock(struct drm_device *dev,
10047                                  struct drm_atomic_state *state)
10048 {
10049         struct drm_crtc *crtc;
10050         struct drm_crtc_commit *commit;
10051         long ret;
10052
10053         /*
10054          * Adding all modeset locks to aquire_ctx will
10055          * ensure that when the framework release it the
10056          * extra locks we are locking here will get released to
10057          */
10058         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10059         if (ret)
10060                 return ret;
10061
10062         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10063                 spin_lock(&crtc->commit_lock);
10064                 commit = list_first_entry_or_null(&crtc->commit_list,
10065                                 struct drm_crtc_commit, commit_entry);
10066                 if (commit)
10067                         drm_crtc_commit_get(commit);
10068                 spin_unlock(&crtc->commit_lock);
10069
10070                 if (!commit)
10071                         continue;
10072
10073                 /*
10074                  * Make sure all pending HW programming completed and
10075                  * page flips done
10076                  */
10077                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10078
10079                 if (ret > 0)
10080                         ret = wait_for_completion_interruptible_timeout(
10081                                         &commit->flip_done, 10*HZ);
10082
10083                 if (ret == 0)
10084                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10085                                   "timed out\n", crtc->base.id, crtc->name);
10086
10087                 drm_crtc_commit_put(commit);
10088         }
10089
10090         return ret < 0 ? ret : 0;
10091 }
10092
10093 static void get_freesync_config_for_crtc(
10094         struct dm_crtc_state *new_crtc_state,
10095         struct dm_connector_state *new_con_state)
10096 {
10097         struct mod_freesync_config config = {0};
10098         struct amdgpu_dm_connector *aconnector =
10099                         to_amdgpu_dm_connector(new_con_state->base.connector);
10100         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10101         int vrefresh = drm_mode_vrefresh(mode);
10102         bool fs_vid_mode = false;
10103
10104         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10105                                         vrefresh >= aconnector->min_vfreq &&
10106                                         vrefresh <= aconnector->max_vfreq;
10107
10108         if (new_crtc_state->vrr_supported) {
10109                 new_crtc_state->stream->ignore_msa_timing_param = true;
10110                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10111
10112                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10113                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10114                 config.vsif_supported = true;
10115                 config.btr = true;
10116
10117                 if (fs_vid_mode) {
10118                         config.state = VRR_STATE_ACTIVE_FIXED;
10119                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10120                         goto out;
10121                 } else if (new_crtc_state->base.vrr_enabled) {
10122                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10123                 } else {
10124                         config.state = VRR_STATE_INACTIVE;
10125                 }
10126         }
10127 out:
10128         new_crtc_state->freesync_config = config;
10129 }
10130
10131 static void reset_freesync_config_for_crtc(
10132         struct dm_crtc_state *new_crtc_state)
10133 {
10134         new_crtc_state->vrr_supported = false;
10135
10136         memset(&new_crtc_state->vrr_infopacket, 0,
10137                sizeof(new_crtc_state->vrr_infopacket));
10138 }
10139
10140 static bool
10141 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10142                                  struct drm_crtc_state *new_crtc_state)
10143 {
10144         struct drm_display_mode old_mode, new_mode;
10145
10146         if (!old_crtc_state || !new_crtc_state)
10147                 return false;
10148
10149         old_mode = old_crtc_state->mode;
10150         new_mode = new_crtc_state->mode;
10151
10152         if (old_mode.clock       == new_mode.clock &&
10153             old_mode.hdisplay    == new_mode.hdisplay &&
10154             old_mode.vdisplay    == new_mode.vdisplay &&
10155             old_mode.htotal      == new_mode.htotal &&
10156             old_mode.vtotal      != new_mode.vtotal &&
10157             old_mode.hsync_start == new_mode.hsync_start &&
10158             old_mode.vsync_start != new_mode.vsync_start &&
10159             old_mode.hsync_end   == new_mode.hsync_end &&
10160             old_mode.vsync_end   != new_mode.vsync_end &&
10161             old_mode.hskew       == new_mode.hskew &&
10162             old_mode.vscan       == new_mode.vscan &&
10163             (old_mode.vsync_end - old_mode.vsync_start) ==
10164             (new_mode.vsync_end - new_mode.vsync_start))
10165                 return true;
10166
10167         return false;
10168 }
10169
10170 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10171         uint64_t num, den, res;
10172         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10173
10174         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10175
10176         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10177         den = (unsigned long long)new_crtc_state->mode.htotal *
10178               (unsigned long long)new_crtc_state->mode.vtotal;
10179
10180         res = div_u64(num, den);
10181         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10182 }
10183
10184 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10185                                 struct drm_atomic_state *state,
10186                                 struct drm_crtc *crtc,
10187                                 struct drm_crtc_state *old_crtc_state,
10188                                 struct drm_crtc_state *new_crtc_state,
10189                                 bool enable,
10190                                 bool *lock_and_validation_needed)
10191 {
10192         struct dm_atomic_state *dm_state = NULL;
10193         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10194         struct dc_stream_state *new_stream;
10195         int ret = 0;
10196
10197         /*
10198          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10199          * update changed items
10200          */
10201         struct amdgpu_crtc *acrtc = NULL;
10202         struct amdgpu_dm_connector *aconnector = NULL;
10203         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10204         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10205
10206         new_stream = NULL;
10207
10208         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10209         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10210         acrtc = to_amdgpu_crtc(crtc);
10211         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10212
10213         /* TODO This hack should go away */
10214         if (aconnector && enable) {
10215                 /* Make sure fake sink is created in plug-in scenario */
10216                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10217                                                             &aconnector->base);
10218                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10219                                                             &aconnector->base);
10220
10221                 if (IS_ERR(drm_new_conn_state)) {
10222                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10223                         goto fail;
10224                 }
10225
10226                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10227                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10228
10229                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10230                         goto skip_modeset;
10231
10232                 new_stream = create_validate_stream_for_sink(aconnector,
10233                                                              &new_crtc_state->mode,
10234                                                              dm_new_conn_state,
10235                                                              dm_old_crtc_state->stream);
10236
10237                 /*
10238                  * we can have no stream on ACTION_SET if a display
10239                  * was disconnected during S3, in this case it is not an
10240                  * error, the OS will be updated after detection, and
10241                  * will do the right thing on next atomic commit
10242                  */
10243
10244                 if (!new_stream) {
10245                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10246                                         __func__, acrtc->base.base.id);
10247                         ret = -ENOMEM;
10248                         goto fail;
10249                 }
10250
10251                 /*
10252                  * TODO: Check VSDB bits to decide whether this should
10253                  * be enabled or not.
10254                  */
10255                 new_stream->triggered_crtc_reset.enabled =
10256                         dm->force_timing_sync;
10257
10258                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10259
10260                 ret = fill_hdr_info_packet(drm_new_conn_state,
10261                                            &new_stream->hdr_static_metadata);
10262                 if (ret)
10263                         goto fail;
10264
10265                 /*
10266                  * If we already removed the old stream from the context
10267                  * (and set the new stream to NULL) then we can't reuse
10268                  * the old stream even if the stream and scaling are unchanged.
10269                  * We'll hit the BUG_ON and black screen.
10270                  *
10271                  * TODO: Refactor this function to allow this check to work
10272                  * in all conditions.
10273                  */
10274                 if (amdgpu_freesync_vid_mode &&
10275                     dm_new_crtc_state->stream &&
10276                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10277                         goto skip_modeset;
10278
10279                 if (dm_new_crtc_state->stream &&
10280                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10281                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10282                         new_crtc_state->mode_changed = false;
10283                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10284                                          new_crtc_state->mode_changed);
10285                 }
10286         }
10287
10288         /* mode_changed flag may get updated above, need to check again */
10289         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10290                 goto skip_modeset;
10291
10292         DRM_DEBUG_ATOMIC(
10293                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10294                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10295                 "connectors_changed:%d\n",
10296                 acrtc->crtc_id,
10297                 new_crtc_state->enable,
10298                 new_crtc_state->active,
10299                 new_crtc_state->planes_changed,
10300                 new_crtc_state->mode_changed,
10301                 new_crtc_state->active_changed,
10302                 new_crtc_state->connectors_changed);
10303
10304         /* Remove stream for any changed/disabled CRTC */
10305         if (!enable) {
10306
10307                 if (!dm_old_crtc_state->stream)
10308                         goto skip_modeset;
10309
10310                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10311                     is_timing_unchanged_for_freesync(new_crtc_state,
10312                                                      old_crtc_state)) {
10313                         new_crtc_state->mode_changed = false;
10314                         DRM_DEBUG_DRIVER(
10315                                 "Mode change not required for front porch change, "
10316                                 "setting mode_changed to %d",
10317                                 new_crtc_state->mode_changed);
10318
10319                         set_freesync_fixed_config(dm_new_crtc_state);
10320
10321                         goto skip_modeset;
10322                 } else if (amdgpu_freesync_vid_mode && aconnector &&
10323                            is_freesync_video_mode(&new_crtc_state->mode,
10324                                                   aconnector)) {
10325                         struct drm_display_mode *high_mode;
10326
10327                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10328                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10329                                 set_freesync_fixed_config(dm_new_crtc_state);
10330                         }
10331                 }
10332
10333                 ret = dm_atomic_get_state(state, &dm_state);
10334                 if (ret)
10335                         goto fail;
10336
10337                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10338                                 crtc->base.id);
10339
10340                 /* i.e. reset mode */
10341                 if (dc_remove_stream_from_ctx(
10342                                 dm->dc,
10343                                 dm_state->context,
10344                                 dm_old_crtc_state->stream) != DC_OK) {
10345                         ret = -EINVAL;
10346                         goto fail;
10347                 }
10348
10349                 dc_stream_release(dm_old_crtc_state->stream);
10350                 dm_new_crtc_state->stream = NULL;
10351
10352                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10353
10354                 *lock_and_validation_needed = true;
10355
10356         } else {/* Add stream for any updated/enabled CRTC */
10357                 /*
10358                  * Quick fix to prevent NULL pointer on new_stream when
10359                  * added MST connectors not found in existing crtc_state in the chained mode
10360                  * TODO: need to dig out the root cause of that
10361                  */
10362                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10363                         goto skip_modeset;
10364
10365                 if (modereset_required(new_crtc_state))
10366                         goto skip_modeset;
10367
10368                 if (modeset_required(new_crtc_state, new_stream,
10369                                      dm_old_crtc_state->stream)) {
10370
10371                         WARN_ON(dm_new_crtc_state->stream);
10372
10373                         ret = dm_atomic_get_state(state, &dm_state);
10374                         if (ret)
10375                                 goto fail;
10376
10377                         dm_new_crtc_state->stream = new_stream;
10378
10379                         dc_stream_retain(new_stream);
10380
10381                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10382                                          crtc->base.id);
10383
10384                         if (dc_add_stream_to_ctx(
10385                                         dm->dc,
10386                                         dm_state->context,
10387                                         dm_new_crtc_state->stream) != DC_OK) {
10388                                 ret = -EINVAL;
10389                                 goto fail;
10390                         }
10391
10392                         *lock_and_validation_needed = true;
10393                 }
10394         }
10395
10396 skip_modeset:
10397         /* Release extra reference */
10398         if (new_stream)
10399                  dc_stream_release(new_stream);
10400
10401         /*
10402          * We want to do dc stream updates that do not require a
10403          * full modeset below.
10404          */
10405         if (!(enable && aconnector && new_crtc_state->active))
10406                 return 0;
10407         /*
10408          * Given above conditions, the dc state cannot be NULL because:
10409          * 1. We're in the process of enabling CRTCs (just been added
10410          *    to the dc context, or already is on the context)
10411          * 2. Has a valid connector attached, and
10412          * 3. Is currently active and enabled.
10413          * => The dc stream state currently exists.
10414          */
10415         BUG_ON(dm_new_crtc_state->stream == NULL);
10416
10417         /* Scaling or underscan settings */
10418         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10419                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10420                 update_stream_scaling_settings(
10421                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10422
10423         /* ABM settings */
10424         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10425
10426         /*
10427          * Color management settings. We also update color properties
10428          * when a modeset is needed, to ensure it gets reprogrammed.
10429          */
10430         if (dm_new_crtc_state->base.color_mgmt_changed ||
10431             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10432                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10433                 if (ret)
10434                         goto fail;
10435         }
10436
10437         /* Update Freesync settings. */
10438         get_freesync_config_for_crtc(dm_new_crtc_state,
10439                                      dm_new_conn_state);
10440
10441         return ret;
10442
10443 fail:
10444         if (new_stream)
10445                 dc_stream_release(new_stream);
10446         return ret;
10447 }
10448
10449 static bool should_reset_plane(struct drm_atomic_state *state,
10450                                struct drm_plane *plane,
10451                                struct drm_plane_state *old_plane_state,
10452                                struct drm_plane_state *new_plane_state)
10453 {
10454         struct drm_plane *other;
10455         struct drm_plane_state *old_other_state, *new_other_state;
10456         struct drm_crtc_state *new_crtc_state;
10457         int i;
10458
10459         /*
10460          * TODO: Remove this hack once the checks below are sufficient
10461          * enough to determine when we need to reset all the planes on
10462          * the stream.
10463          */
10464         if (state->allow_modeset)
10465                 return true;
10466
10467         /* Exit early if we know that we're adding or removing the plane. */
10468         if (old_plane_state->crtc != new_plane_state->crtc)
10469                 return true;
10470
10471         /* old crtc == new_crtc == NULL, plane not in context. */
10472         if (!new_plane_state->crtc)
10473                 return false;
10474
10475         new_crtc_state =
10476                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10477
10478         if (!new_crtc_state)
10479                 return true;
10480
10481         /* CRTC Degamma changes currently require us to recreate planes. */
10482         if (new_crtc_state->color_mgmt_changed)
10483                 return true;
10484
10485         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10486                 return true;
10487
10488         /*
10489          * If there are any new primary or overlay planes being added or
10490          * removed then the z-order can potentially change. To ensure
10491          * correct z-order and pipe acquisition the current DC architecture
10492          * requires us to remove and recreate all existing planes.
10493          *
10494          * TODO: Come up with a more elegant solution for this.
10495          */
10496         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10497                 struct amdgpu_framebuffer *old_afb, *new_afb;
10498                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10499                         continue;
10500
10501                 if (old_other_state->crtc != new_plane_state->crtc &&
10502                     new_other_state->crtc != new_plane_state->crtc)
10503                         continue;
10504
10505                 if (old_other_state->crtc != new_other_state->crtc)
10506                         return true;
10507
10508                 /* Src/dst size and scaling updates. */
10509                 if (old_other_state->src_w != new_other_state->src_w ||
10510                     old_other_state->src_h != new_other_state->src_h ||
10511                     old_other_state->crtc_w != new_other_state->crtc_w ||
10512                     old_other_state->crtc_h != new_other_state->crtc_h)
10513                         return true;
10514
10515                 /* Rotation / mirroring updates. */
10516                 if (old_other_state->rotation != new_other_state->rotation)
10517                         return true;
10518
10519                 /* Blending updates. */
10520                 if (old_other_state->pixel_blend_mode !=
10521                     new_other_state->pixel_blend_mode)
10522                         return true;
10523
10524                 /* Alpha updates. */
10525                 if (old_other_state->alpha != new_other_state->alpha)
10526                         return true;
10527
10528                 /* Colorspace changes. */
10529                 if (old_other_state->color_range != new_other_state->color_range ||
10530                     old_other_state->color_encoding != new_other_state->color_encoding)
10531                         return true;
10532
10533                 /* Framebuffer checks fall at the end. */
10534                 if (!old_other_state->fb || !new_other_state->fb)
10535                         continue;
10536
10537                 /* Pixel format changes can require bandwidth updates. */
10538                 if (old_other_state->fb->format != new_other_state->fb->format)
10539                         return true;
10540
10541                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10542                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10543
10544                 /* Tiling and DCC changes also require bandwidth updates. */
10545                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10546                     old_afb->base.modifier != new_afb->base.modifier)
10547                         return true;
10548         }
10549
10550         return false;
10551 }
10552
10553 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10554                               struct drm_plane_state *new_plane_state,
10555                               struct drm_framebuffer *fb)
10556 {
10557         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10558         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10559         unsigned int pitch;
10560         bool linear;
10561
10562         if (fb->width > new_acrtc->max_cursor_width ||
10563             fb->height > new_acrtc->max_cursor_height) {
10564                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10565                                  new_plane_state->fb->width,
10566                                  new_plane_state->fb->height);
10567                 return -EINVAL;
10568         }
10569         if (new_plane_state->src_w != fb->width << 16 ||
10570             new_plane_state->src_h != fb->height << 16) {
10571                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10572                 return -EINVAL;
10573         }
10574
10575         /* Pitch in pixels */
10576         pitch = fb->pitches[0] / fb->format->cpp[0];
10577
10578         if (fb->width != pitch) {
10579                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10580                                  fb->width, pitch);
10581                 return -EINVAL;
10582         }
10583
10584         switch (pitch) {
10585         case 64:
10586         case 128:
10587         case 256:
10588                 /* FB pitch is supported by cursor plane */
10589                 break;
10590         default:
10591                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10592                 return -EINVAL;
10593         }
10594
10595         /* Core DRM takes care of checking FB modifiers, so we only need to
10596          * check tiling flags when the FB doesn't have a modifier. */
10597         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10598                 if (adev->family < AMDGPU_FAMILY_AI) {
10599                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10600                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10601                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10602                 } else {
10603                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10604                 }
10605                 if (!linear) {
10606                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10607                         return -EINVAL;
10608                 }
10609         }
10610
10611         return 0;
10612 }
10613
10614 static int dm_update_plane_state(struct dc *dc,
10615                                  struct drm_atomic_state *state,
10616                                  struct drm_plane *plane,
10617                                  struct drm_plane_state *old_plane_state,
10618                                  struct drm_plane_state *new_plane_state,
10619                                  bool enable,
10620                                  bool *lock_and_validation_needed)
10621 {
10622
10623         struct dm_atomic_state *dm_state = NULL;
10624         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10625         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10626         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10627         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10628         struct amdgpu_crtc *new_acrtc;
10629         bool needs_reset;
10630         int ret = 0;
10631
10632
10633         new_plane_crtc = new_plane_state->crtc;
10634         old_plane_crtc = old_plane_state->crtc;
10635         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10636         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10637
10638         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10639                 if (!enable || !new_plane_crtc ||
10640                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10641                         return 0;
10642
10643                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10644
10645                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10646                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10647                         return -EINVAL;
10648                 }
10649
10650                 if (new_plane_state->fb) {
10651                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10652                                                  new_plane_state->fb);
10653                         if (ret)
10654                                 return ret;
10655                 }
10656
10657                 return 0;
10658         }
10659
10660         needs_reset = should_reset_plane(state, plane, old_plane_state,
10661                                          new_plane_state);
10662
10663         /* Remove any changed/removed planes */
10664         if (!enable) {
10665                 if (!needs_reset)
10666                         return 0;
10667
10668                 if (!old_plane_crtc)
10669                         return 0;
10670
10671                 old_crtc_state = drm_atomic_get_old_crtc_state(
10672                                 state, old_plane_crtc);
10673                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10674
10675                 if (!dm_old_crtc_state->stream)
10676                         return 0;
10677
10678                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10679                                 plane->base.id, old_plane_crtc->base.id);
10680
10681                 ret = dm_atomic_get_state(state, &dm_state);
10682                 if (ret)
10683                         return ret;
10684
10685                 if (!dc_remove_plane_from_context(
10686                                 dc,
10687                                 dm_old_crtc_state->stream,
10688                                 dm_old_plane_state->dc_state,
10689                                 dm_state->context)) {
10690
10691                         return -EINVAL;
10692                 }
10693
10694
10695                 dc_plane_state_release(dm_old_plane_state->dc_state);
10696                 dm_new_plane_state->dc_state = NULL;
10697
10698                 *lock_and_validation_needed = true;
10699
10700         } else { /* Add new planes */
10701                 struct dc_plane_state *dc_new_plane_state;
10702
10703                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10704                         return 0;
10705
10706                 if (!new_plane_crtc)
10707                         return 0;
10708
10709                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10710                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10711
10712                 if (!dm_new_crtc_state->stream)
10713                         return 0;
10714
10715                 if (!needs_reset)
10716                         return 0;
10717
10718                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10719                 if (ret)
10720                         return ret;
10721
10722                 WARN_ON(dm_new_plane_state->dc_state);
10723
10724                 dc_new_plane_state = dc_create_plane_state(dc);
10725                 if (!dc_new_plane_state)
10726                         return -ENOMEM;
10727
10728                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10729                                  plane->base.id, new_plane_crtc->base.id);
10730
10731                 ret = fill_dc_plane_attributes(
10732                         drm_to_adev(new_plane_crtc->dev),
10733                         dc_new_plane_state,
10734                         new_plane_state,
10735                         new_crtc_state);
10736                 if (ret) {
10737                         dc_plane_state_release(dc_new_plane_state);
10738                         return ret;
10739                 }
10740
10741                 ret = dm_atomic_get_state(state, &dm_state);
10742                 if (ret) {
10743                         dc_plane_state_release(dc_new_plane_state);
10744                         return ret;
10745                 }
10746
10747                 /*
10748                  * Any atomic check errors that occur after this will
10749                  * not need a release. The plane state will be attached
10750                  * to the stream, and therefore part of the atomic
10751                  * state. It'll be released when the atomic state is
10752                  * cleaned.
10753                  */
10754                 if (!dc_add_plane_to_context(
10755                                 dc,
10756                                 dm_new_crtc_state->stream,
10757                                 dc_new_plane_state,
10758                                 dm_state->context)) {
10759
10760                         dc_plane_state_release(dc_new_plane_state);
10761                         return -EINVAL;
10762                 }
10763
10764                 dm_new_plane_state->dc_state = dc_new_plane_state;
10765
10766                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10767
10768                 /* Tell DC to do a full surface update every time there
10769                  * is a plane change. Inefficient, but works for now.
10770                  */
10771                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10772
10773                 *lock_and_validation_needed = true;
10774         }
10775
10776
10777         return ret;
10778 }
10779
10780 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10781                                        int *src_w, int *src_h)
10782 {
10783         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10784         case DRM_MODE_ROTATE_90:
10785         case DRM_MODE_ROTATE_270:
10786                 *src_w = plane_state->src_h >> 16;
10787                 *src_h = plane_state->src_w >> 16;
10788                 break;
10789         case DRM_MODE_ROTATE_0:
10790         case DRM_MODE_ROTATE_180:
10791         default:
10792                 *src_w = plane_state->src_w >> 16;
10793                 *src_h = plane_state->src_h >> 16;
10794                 break;
10795         }
10796 }
10797
10798 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10799                                 struct drm_crtc *crtc,
10800                                 struct drm_crtc_state *new_crtc_state)
10801 {
10802         struct drm_plane *cursor = crtc->cursor, *underlying;
10803         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10804         int i;
10805         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10806         int cursor_src_w, cursor_src_h;
10807         int underlying_src_w, underlying_src_h;
10808
10809         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10810          * cursor per pipe but it's going to inherit the scaling and
10811          * positioning from the underlying pipe. Check the cursor plane's
10812          * blending properties match the underlying planes'. */
10813
10814         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10815         if (!new_cursor_state || !new_cursor_state->fb) {
10816                 return 0;
10817         }
10818
10819         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10820         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10821         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10822
10823         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10824                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10825                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10826                         continue;
10827
10828                 /* Ignore disabled planes */
10829                 if (!new_underlying_state->fb)
10830                         continue;
10831
10832                 dm_get_oriented_plane_size(new_underlying_state,
10833                                            &underlying_src_w, &underlying_src_h);
10834                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10835                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10836
10837                 if (cursor_scale_w != underlying_scale_w ||
10838                     cursor_scale_h != underlying_scale_h) {
10839                         drm_dbg_atomic(crtc->dev,
10840                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10841                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10842                         return -EINVAL;
10843                 }
10844
10845                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10846                 if (new_underlying_state->crtc_x <= 0 &&
10847                     new_underlying_state->crtc_y <= 0 &&
10848                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10849                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10850                         break;
10851         }
10852
10853         return 0;
10854 }
10855
10856 #if defined(CONFIG_DRM_AMD_DC_DCN)
10857 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10858 {
10859         struct drm_connector *connector;
10860         struct drm_connector_state *conn_state;
10861         struct amdgpu_dm_connector *aconnector = NULL;
10862         int i;
10863         for_each_new_connector_in_state(state, connector, conn_state, i) {
10864                 if (conn_state->crtc != crtc)
10865                         continue;
10866
10867                 aconnector = to_amdgpu_dm_connector(connector);
10868                 if (!aconnector->port || !aconnector->mst_port)
10869                         aconnector = NULL;
10870                 else
10871                         break;
10872         }
10873
10874         if (!aconnector)
10875                 return 0;
10876
10877         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10878 }
10879 #endif
10880
10881 /**
10882  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10883  * @dev: The DRM device
10884  * @state: The atomic state to commit
10885  *
10886  * Validate that the given atomic state is programmable by DC into hardware.
10887  * This involves constructing a &struct dc_state reflecting the new hardware
10888  * state we wish to commit, then querying DC to see if it is programmable. It's
10889  * important not to modify the existing DC state. Otherwise, atomic_check
10890  * may unexpectedly commit hardware changes.
10891  *
10892  * When validating the DC state, it's important that the right locks are
10893  * acquired. For full updates case which removes/adds/updates streams on one
10894  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10895  * that any such full update commit will wait for completion of any outstanding
10896  * flip using DRMs synchronization events.
10897  *
10898  * Note that DM adds the affected connectors for all CRTCs in state, when that
10899  * might not seem necessary. This is because DC stream creation requires the
10900  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10901  * be possible but non-trivial - a possible TODO item.
10902  *
10903  * Return: -Error code if validation failed.
10904  */
10905 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10906                                   struct drm_atomic_state *state)
10907 {
10908         struct amdgpu_device *adev = drm_to_adev(dev);
10909         struct dm_atomic_state *dm_state = NULL;
10910         struct dc *dc = adev->dm.dc;
10911         struct drm_connector *connector;
10912         struct drm_connector_state *old_con_state, *new_con_state;
10913         struct drm_crtc *crtc;
10914         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10915         struct drm_plane *plane;
10916         struct drm_plane_state *old_plane_state, *new_plane_state;
10917         enum dc_status status;
10918         int ret, i;
10919         bool lock_and_validation_needed = false;
10920         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10921 #if defined(CONFIG_DRM_AMD_DC_DCN)
10922         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10923         struct drm_dp_mst_topology_state *mst_state;
10924         struct drm_dp_mst_topology_mgr *mgr;
10925 #endif
10926
10927         trace_amdgpu_dm_atomic_check_begin(state);
10928
10929         ret = drm_atomic_helper_check_modeset(dev, state);
10930         if (ret) {
10931                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10932                 goto fail;
10933         }
10934
10935         /* Check connector changes */
10936         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10937                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10938                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10939
10940                 /* Skip connectors that are disabled or part of modeset already. */
10941                 if (!old_con_state->crtc && !new_con_state->crtc)
10942                         continue;
10943
10944                 if (!new_con_state->crtc)
10945                         continue;
10946
10947                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10948                 if (IS_ERR(new_crtc_state)) {
10949                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10950                         ret = PTR_ERR(new_crtc_state);
10951                         goto fail;
10952                 }
10953
10954                 if (dm_old_con_state->abm_level !=
10955                     dm_new_con_state->abm_level)
10956                         new_crtc_state->connectors_changed = true;
10957         }
10958
10959 #if defined(CONFIG_DRM_AMD_DC_DCN)
10960         if (dc_resource_is_dsc_encoding_supported(dc)) {
10961                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10962                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10963                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10964                                 if (ret) {
10965                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10966                                         goto fail;
10967                                 }
10968                         }
10969                 }
10970         }
10971 #endif
10972         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10973                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10974
10975                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10976                     !new_crtc_state->color_mgmt_changed &&
10977                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10978                         dm_old_crtc_state->dsc_force_changed == false)
10979                         continue;
10980
10981                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10982                 if (ret) {
10983                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10984                         goto fail;
10985                 }
10986
10987                 if (!new_crtc_state->enable)
10988                         continue;
10989
10990                 ret = drm_atomic_add_affected_connectors(state, crtc);
10991                 if (ret) {
10992                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10993                         goto fail;
10994                 }
10995
10996                 ret = drm_atomic_add_affected_planes(state, crtc);
10997                 if (ret) {
10998                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10999                         goto fail;
11000                 }
11001
11002                 if (dm_old_crtc_state->dsc_force_changed)
11003                         new_crtc_state->mode_changed = true;
11004         }
11005
11006         /*
11007          * Add all primary and overlay planes on the CRTC to the state
11008          * whenever a plane is enabled to maintain correct z-ordering
11009          * and to enable fast surface updates.
11010          */
11011         drm_for_each_crtc(crtc, dev) {
11012                 bool modified = false;
11013
11014                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11015                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11016                                 continue;
11017
11018                         if (new_plane_state->crtc == crtc ||
11019                             old_plane_state->crtc == crtc) {
11020                                 modified = true;
11021                                 break;
11022                         }
11023                 }
11024
11025                 if (!modified)
11026                         continue;
11027
11028                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11029                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11030                                 continue;
11031
11032                         new_plane_state =
11033                                 drm_atomic_get_plane_state(state, plane);
11034
11035                         if (IS_ERR(new_plane_state)) {
11036                                 ret = PTR_ERR(new_plane_state);
11037                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11038                                 goto fail;
11039                         }
11040                 }
11041         }
11042
11043         /* Remove exiting planes if they are modified */
11044         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11045                 ret = dm_update_plane_state(dc, state, plane,
11046                                             old_plane_state,
11047                                             new_plane_state,
11048                                             false,
11049                                             &lock_and_validation_needed);
11050                 if (ret) {
11051                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11052                         goto fail;
11053                 }
11054         }
11055
11056         /* Disable all crtcs which require disable */
11057         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11058                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11059                                            old_crtc_state,
11060                                            new_crtc_state,
11061                                            false,
11062                                            &lock_and_validation_needed);
11063                 if (ret) {
11064                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11065                         goto fail;
11066                 }
11067         }
11068
11069         /* Enable all crtcs which require enable */
11070         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11071                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11072                                            old_crtc_state,
11073                                            new_crtc_state,
11074                                            true,
11075                                            &lock_and_validation_needed);
11076                 if (ret) {
11077                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11078                         goto fail;
11079                 }
11080         }
11081
11082         /* Add new/modified planes */
11083         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11084                 ret = dm_update_plane_state(dc, state, plane,
11085                                             old_plane_state,
11086                                             new_plane_state,
11087                                             true,
11088                                             &lock_and_validation_needed);
11089                 if (ret) {
11090                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11091                         goto fail;
11092                 }
11093         }
11094
11095         /* Run this here since we want to validate the streams we created */
11096         ret = drm_atomic_helper_check_planes(dev, state);
11097         if (ret) {
11098                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11099                 goto fail;
11100         }
11101
11102         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11103                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11104                 if (dm_new_crtc_state->mpo_requested)
11105                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11106         }
11107
11108         /* Check cursor planes scaling */
11109         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11110                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11111                 if (ret) {
11112                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11113                         goto fail;
11114                 }
11115         }
11116
11117         if (state->legacy_cursor_update) {
11118                 /*
11119                  * This is a fast cursor update coming from the plane update
11120                  * helper, check if it can be done asynchronously for better
11121                  * performance.
11122                  */
11123                 state->async_update =
11124                         !drm_atomic_helper_async_check(dev, state);
11125
11126                 /*
11127                  * Skip the remaining global validation if this is an async
11128                  * update. Cursor updates can be done without affecting
11129                  * state or bandwidth calcs and this avoids the performance
11130                  * penalty of locking the private state object and
11131                  * allocating a new dc_state.
11132                  */
11133                 if (state->async_update)
11134                         return 0;
11135         }
11136
11137         /* Check scaling and underscan changes*/
11138         /* TODO Removed scaling changes validation due to inability to commit
11139          * new stream into context w\o causing full reset. Need to
11140          * decide how to handle.
11141          */
11142         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11143                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11144                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11145                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11146
11147                 /* Skip any modesets/resets */
11148                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11149                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11150                         continue;
11151
11152                 /* Skip any thing not scale or underscan changes */
11153                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11154                         continue;
11155
11156                 lock_and_validation_needed = true;
11157         }
11158
11159 #if defined(CONFIG_DRM_AMD_DC_DCN)
11160         /* set the slot info for each mst_state based on the link encoding format */
11161         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11162                 struct amdgpu_dm_connector *aconnector;
11163                 struct drm_connector *connector;
11164                 struct drm_connector_list_iter iter;
11165                 u8 link_coding_cap;
11166
11167                 if (!mgr->mst_state )
11168                         continue;
11169
11170                 drm_connector_list_iter_begin(dev, &iter);
11171                 drm_for_each_connector_iter(connector, &iter) {
11172                         int id = connector->index;
11173
11174                         if (id == mst_state->mgr->conn_base_id) {
11175                                 aconnector = to_amdgpu_dm_connector(connector);
11176                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11177                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11178
11179                                 break;
11180                         }
11181                 }
11182                 drm_connector_list_iter_end(&iter);
11183
11184         }
11185 #endif
11186         /**
11187          * Streams and planes are reset when there are changes that affect
11188          * bandwidth. Anything that affects bandwidth needs to go through
11189          * DC global validation to ensure that the configuration can be applied
11190          * to hardware.
11191          *
11192          * We have to currently stall out here in atomic_check for outstanding
11193          * commits to finish in this case because our IRQ handlers reference
11194          * DRM state directly - we can end up disabling interrupts too early
11195          * if we don't.
11196          *
11197          * TODO: Remove this stall and drop DM state private objects.
11198          */
11199         if (lock_and_validation_needed) {
11200                 ret = dm_atomic_get_state(state, &dm_state);
11201                 if (ret) {
11202                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11203                         goto fail;
11204                 }
11205
11206                 ret = do_aquire_global_lock(dev, state);
11207                 if (ret) {
11208                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11209                         goto fail;
11210                 }
11211
11212 #if defined(CONFIG_DRM_AMD_DC_DCN)
11213                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11214                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11215                         goto fail;
11216                 }
11217
11218                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11219                 if (ret) {
11220                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11221                         goto fail;
11222                 }
11223 #endif
11224
11225                 /*
11226                  * Perform validation of MST topology in the state:
11227                  * We need to perform MST atomic check before calling
11228                  * dc_validate_global_state(), or there is a chance
11229                  * to get stuck in an infinite loop and hang eventually.
11230                  */
11231                 ret = drm_dp_mst_atomic_check(state);
11232                 if (ret) {
11233                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11234                         goto fail;
11235                 }
11236                 status = dc_validate_global_state(dc, dm_state->context, true);
11237                 if (status != DC_OK) {
11238                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11239                                        dc_status_to_str(status), status);
11240                         ret = -EINVAL;
11241                         goto fail;
11242                 }
11243         } else {
11244                 /*
11245                  * The commit is a fast update. Fast updates shouldn't change
11246                  * the DC context, affect global validation, and can have their
11247                  * commit work done in parallel with other commits not touching
11248                  * the same resource. If we have a new DC context as part of
11249                  * the DM atomic state from validation we need to free it and
11250                  * retain the existing one instead.
11251                  *
11252                  * Furthermore, since the DM atomic state only contains the DC
11253                  * context and can safely be annulled, we can free the state
11254                  * and clear the associated private object now to free
11255                  * some memory and avoid a possible use-after-free later.
11256                  */
11257
11258                 for (i = 0; i < state->num_private_objs; i++) {
11259                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11260
11261                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11262                                 int j = state->num_private_objs-1;
11263
11264                                 dm_atomic_destroy_state(obj,
11265                                                 state->private_objs[i].state);
11266
11267                                 /* If i is not at the end of the array then the
11268                                  * last element needs to be moved to where i was
11269                                  * before the array can safely be truncated.
11270                                  */
11271                                 if (i != j)
11272                                         state->private_objs[i] =
11273                                                 state->private_objs[j];
11274
11275                                 state->private_objs[j].ptr = NULL;
11276                                 state->private_objs[j].state = NULL;
11277                                 state->private_objs[j].old_state = NULL;
11278                                 state->private_objs[j].new_state = NULL;
11279
11280                                 state->num_private_objs = j;
11281                                 break;
11282                         }
11283                 }
11284         }
11285
11286         /* Store the overall update type for use later in atomic check. */
11287         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11288                 struct dm_crtc_state *dm_new_crtc_state =
11289                         to_dm_crtc_state(new_crtc_state);
11290
11291                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11292                                                          UPDATE_TYPE_FULL :
11293                                                          UPDATE_TYPE_FAST;
11294         }
11295
11296         /* Must be success */
11297         WARN_ON(ret);
11298
11299         trace_amdgpu_dm_atomic_check_finish(state, ret);
11300
11301         return ret;
11302
11303 fail:
11304         if (ret == -EDEADLK)
11305                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11306         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11307                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11308         else
11309                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11310
11311         trace_amdgpu_dm_atomic_check_finish(state, ret);
11312
11313         return ret;
11314 }
11315
11316 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11317                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11318 {
11319         uint8_t dpcd_data;
11320         bool capable = false;
11321
11322         if (amdgpu_dm_connector->dc_link &&
11323                 dm_helpers_dp_read_dpcd(
11324                                 NULL,
11325                                 amdgpu_dm_connector->dc_link,
11326                                 DP_DOWN_STREAM_PORT_COUNT,
11327                                 &dpcd_data,
11328                                 sizeof(dpcd_data))) {
11329                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11330         }
11331
11332         return capable;
11333 }
11334
11335 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11336                 unsigned int offset,
11337                 unsigned int total_length,
11338                 uint8_t *data,
11339                 unsigned int length,
11340                 struct amdgpu_hdmi_vsdb_info *vsdb)
11341 {
11342         bool res;
11343         union dmub_rb_cmd cmd;
11344         struct dmub_cmd_send_edid_cea *input;
11345         struct dmub_cmd_edid_cea_output *output;
11346
11347         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11348                 return false;
11349
11350         memset(&cmd, 0, sizeof(cmd));
11351
11352         input = &cmd.edid_cea.data.input;
11353
11354         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11355         cmd.edid_cea.header.sub_type = 0;
11356         cmd.edid_cea.header.payload_bytes =
11357                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11358         input->offset = offset;
11359         input->length = length;
11360         input->cea_total_length = total_length;
11361         memcpy(input->payload, data, length);
11362
11363         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11364         if (!res) {
11365                 DRM_ERROR("EDID CEA parser failed\n");
11366                 return false;
11367         }
11368
11369         output = &cmd.edid_cea.data.output;
11370
11371         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11372                 if (!output->ack.success) {
11373                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11374                                         output->ack.offset);
11375                 }
11376         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11377                 if (!output->amd_vsdb.vsdb_found)
11378                         return false;
11379
11380                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11381                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11382                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11383                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11384         } else {
11385                 DRM_WARN("Unknown EDID CEA parser results\n");
11386                 return false;
11387         }
11388
11389         return true;
11390 }
11391
11392 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11393                 uint8_t *edid_ext, int len,
11394                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11395 {
11396         int i;
11397
11398         /* send extension block to DMCU for parsing */
11399         for (i = 0; i < len; i += 8) {
11400                 bool res;
11401                 int offset;
11402
11403                 /* send 8 bytes a time */
11404                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11405                         return false;
11406
11407                 if (i+8 == len) {
11408                         /* EDID block sent completed, expect result */
11409                         int version, min_rate, max_rate;
11410
11411                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11412                         if (res) {
11413                                 /* amd vsdb found */
11414                                 vsdb_info->freesync_supported = 1;
11415                                 vsdb_info->amd_vsdb_version = version;
11416                                 vsdb_info->min_refresh_rate_hz = min_rate;
11417                                 vsdb_info->max_refresh_rate_hz = max_rate;
11418                                 return true;
11419                         }
11420                         /* not amd vsdb */
11421                         return false;
11422                 }
11423
11424                 /* check for ack*/
11425                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11426                 if (!res)
11427                         return false;
11428         }
11429
11430         return false;
11431 }
11432
11433 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11434                 uint8_t *edid_ext, int len,
11435                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11436 {
11437         int i;
11438
11439         /* send extension block to DMCU for parsing */
11440         for (i = 0; i < len; i += 8) {
11441                 /* send 8 bytes a time */
11442                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11443                         return false;
11444         }
11445
11446         return vsdb_info->freesync_supported;
11447 }
11448
11449 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11450                 uint8_t *edid_ext, int len,
11451                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11452 {
11453         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11454
11455         if (adev->dm.dmub_srv)
11456                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11457         else
11458                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11459 }
11460
11461 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11462                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11463 {
11464         uint8_t *edid_ext = NULL;
11465         int i;
11466         bool valid_vsdb_found = false;
11467
11468         /*----- drm_find_cea_extension() -----*/
11469         /* No EDID or EDID extensions */
11470         if (edid == NULL || edid->extensions == 0)
11471                 return -ENODEV;
11472
11473         /* Find CEA extension */
11474         for (i = 0; i < edid->extensions; i++) {
11475                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11476                 if (edid_ext[0] == CEA_EXT)
11477                         break;
11478         }
11479
11480         if (i == edid->extensions)
11481                 return -ENODEV;
11482
11483         /*----- cea_db_offsets() -----*/
11484         if (edid_ext[0] != CEA_EXT)
11485                 return -ENODEV;
11486
11487         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11488
11489         return valid_vsdb_found ? i : -ENODEV;
11490 }
11491
11492 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11493                                         struct edid *edid)
11494 {
11495         int i = 0;
11496         struct detailed_timing *timing;
11497         struct detailed_non_pixel *data;
11498         struct detailed_data_monitor_range *range;
11499         struct amdgpu_dm_connector *amdgpu_dm_connector =
11500                         to_amdgpu_dm_connector(connector);
11501         struct dm_connector_state *dm_con_state = NULL;
11502         struct dc_sink *sink;
11503
11504         struct drm_device *dev = connector->dev;
11505         struct amdgpu_device *adev = drm_to_adev(dev);
11506         bool freesync_capable = false;
11507         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11508
11509         if (!connector->state) {
11510                 DRM_ERROR("%s - Connector has no state", __func__);
11511                 goto update;
11512         }
11513
11514         sink = amdgpu_dm_connector->dc_sink ?
11515                 amdgpu_dm_connector->dc_sink :
11516                 amdgpu_dm_connector->dc_em_sink;
11517
11518         if (!edid || !sink) {
11519                 dm_con_state = to_dm_connector_state(connector->state);
11520
11521                 amdgpu_dm_connector->min_vfreq = 0;
11522                 amdgpu_dm_connector->max_vfreq = 0;
11523                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11524                 connector->display_info.monitor_range.min_vfreq = 0;
11525                 connector->display_info.monitor_range.max_vfreq = 0;
11526                 freesync_capable = false;
11527
11528                 goto update;
11529         }
11530
11531         dm_con_state = to_dm_connector_state(connector->state);
11532
11533         if (!adev->dm.freesync_module)
11534                 goto update;
11535
11536
11537         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11538                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11539                 bool edid_check_required = false;
11540
11541                 if (edid) {
11542                         edid_check_required = is_dp_capable_without_timing_msa(
11543                                                 adev->dm.dc,
11544                                                 amdgpu_dm_connector);
11545                 }
11546
11547                 if (edid_check_required == true && (edid->version > 1 ||
11548                    (edid->version == 1 && edid->revision > 1))) {
11549                         for (i = 0; i < 4; i++) {
11550
11551                                 timing  = &edid->detailed_timings[i];
11552                                 data    = &timing->data.other_data;
11553                                 range   = &data->data.range;
11554                                 /*
11555                                  * Check if monitor has continuous frequency mode
11556                                  */
11557                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11558                                         continue;
11559                                 /*
11560                                  * Check for flag range limits only. If flag == 1 then
11561                                  * no additional timing information provided.
11562                                  * Default GTF, GTF Secondary curve and CVT are not
11563                                  * supported
11564                                  */
11565                                 if (range->flags != 1)
11566                                         continue;
11567
11568                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11569                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11570                                 amdgpu_dm_connector->pixel_clock_mhz =
11571                                         range->pixel_clock_mhz * 10;
11572
11573                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11574                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11575
11576                                 break;
11577                         }
11578
11579                         if (amdgpu_dm_connector->max_vfreq -
11580                             amdgpu_dm_connector->min_vfreq > 10) {
11581
11582                                 freesync_capable = true;
11583                         }
11584                 }
11585         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11586                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11587                 if (i >= 0 && vsdb_info.freesync_supported) {
11588                         timing  = &edid->detailed_timings[i];
11589                         data    = &timing->data.other_data;
11590
11591                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11592                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11593                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11594                                 freesync_capable = true;
11595
11596                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11597                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11598                 }
11599         }
11600
11601 update:
11602         if (dm_con_state)
11603                 dm_con_state->freesync_capable = freesync_capable;
11604
11605         if (connector->vrr_capable_property)
11606                 drm_connector_set_vrr_capable_property(connector,
11607                                                        freesync_capable);
11608 }
11609
11610 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11611 {
11612         struct amdgpu_device *adev = drm_to_adev(dev);
11613         struct dc *dc = adev->dm.dc;
11614         int i;
11615
11616         mutex_lock(&adev->dm.dc_lock);
11617         if (dc->current_state) {
11618                 for (i = 0; i < dc->current_state->stream_count; ++i)
11619                         dc->current_state->streams[i]
11620                                 ->triggered_crtc_reset.enabled =
11621                                 adev->dm.force_timing_sync;
11622
11623                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11624                 dc_trigger_sync(dc, dc->current_state);
11625         }
11626         mutex_unlock(&adev->dm.dc_lock);
11627 }
11628
11629 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11630                        uint32_t value, const char *func_name)
11631 {
11632 #ifdef DM_CHECK_ADDR_0
11633         if (address == 0) {
11634                 DC_ERR("invalid register write. address = 0");
11635                 return;
11636         }
11637 #endif
11638         cgs_write_register(ctx->cgs_device, address, value);
11639         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11640 }
11641
11642 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11643                           const char *func_name)
11644 {
11645         uint32_t value;
11646 #ifdef DM_CHECK_ADDR_0
11647         if (address == 0) {
11648                 DC_ERR("invalid register read; address = 0\n");
11649                 return 0;
11650         }
11651 #endif
11652
11653         if (ctx->dmub_srv &&
11654             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11655             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11656                 ASSERT(false);
11657                 return 0;
11658         }
11659
11660         value = cgs_read_register(ctx->cgs_device, address);
11661
11662         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11663
11664         return value;
11665 }
11666
11667 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11668                                                 struct dc_context *ctx,
11669                                                 uint8_t status_type,
11670                                                 uint32_t *operation_result)
11671 {
11672         struct amdgpu_device *adev = ctx->driver_context;
11673         int return_status = -1;
11674         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11675
11676         if (is_cmd_aux) {
11677                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11678                         return_status = p_notify->aux_reply.length;
11679                         *operation_result = p_notify->result;
11680                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11681                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11682                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11683                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11684                 } else {
11685                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11686                 }
11687         } else {
11688                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11689                         return_status = 0;
11690                         *operation_result = p_notify->sc_status;
11691                 } else {
11692                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11693                 }
11694         }
11695
11696         return return_status;
11697 }
11698
11699 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11700         unsigned int link_index, void *cmd_payload, void *operation_result)
11701 {
11702         struct amdgpu_device *adev = ctx->driver_context;
11703         int ret = 0;
11704
11705         if (is_cmd_aux) {
11706                 dc_process_dmub_aux_transfer_async(ctx->dc,
11707                         link_index, (struct aux_payload *)cmd_payload);
11708         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11709                                         (struct set_config_cmd_payload *)cmd_payload,
11710                                         adev->dm.dmub_notify)) {
11711                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11712                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11713                                         (uint32_t *)operation_result);
11714         }
11715
11716         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11717         if (ret == 0) {
11718                 DRM_ERROR("wait_for_completion_timeout timeout!");
11719                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11720                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11721                                 (uint32_t *)operation_result);
11722         }
11723
11724         if (is_cmd_aux) {
11725                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11726                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11727
11728                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11729                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11730                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11731                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11732                                        adev->dm.dmub_notify->aux_reply.length);
11733                         }
11734                 }
11735         }
11736
11737         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11738                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11739                         (uint32_t *)operation_result);
11740 }
11741
11742 /*
11743  * Check whether seamless boot is supported.
11744  *
11745  * So far we only support seamless boot on CHIP_VANGOGH.
11746  * If everything goes well, we may consider expanding
11747  * seamless boot to other ASICs.
11748  */
11749 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11750 {
11751         switch (adev->asic_type) {
11752         case CHIP_VANGOGH:
11753                 if (!adev->mman.keep_stolen_vga_memory)
11754                         return true;
11755                 break;
11756         default:
11757                 break;
11758         }
11759
11760         return false;
11761 }