Merge tag 'for-v6.3-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "link_enc_cfg.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 #include "dpcd_defs.h"
42 #include "link/protocols/link_dpcd.h"
43 #include "link_service_types.h"
44 #include "link/protocols/link_dp_capability.h"
45 #include "link/protocols/link_ddc.h"
46
47 #include "vid.h"
48 #include "amdgpu.h"
49 #include "amdgpu_display.h"
50 #include "amdgpu_ucode.h"
51 #include "atom.h"
52 #include "amdgpu_dm.h"
53 #include "amdgpu_dm_plane.h"
54 #include "amdgpu_dm_crtc.h"
55 #ifdef CONFIG_DRM_AMD_DC_HDCP
56 #include "amdgpu_dm_hdcp.h"
57 #include <drm/display/drm_hdcp_helper.h>
58 #endif
59 #include "amdgpu_pm.h"
60 #include "amdgpu_atombios.h"
61
62 #include "amd_shared.h"
63 #include "amdgpu_dm_irq.h"
64 #include "dm_helpers.h"
65 #include "amdgpu_dm_mst_types.h"
66 #if defined(CONFIG_DEBUG_FS)
67 #include "amdgpu_dm_debugfs.h"
68 #endif
69 #include "amdgpu_dm_psr.h"
70
71 #include "ivsrcid/ivsrcid_vislands30.h"
72
73 #include <linux/backlight.h>
74 #include <linux/module.h>
75 #include <linux/moduleparam.h>
76 #include <linux/types.h>
77 #include <linux/pm_runtime.h>
78 #include <linux/pci.h>
79 #include <linux/firmware.h>
80 #include <linux/component.h>
81 #include <linux/dmi.h>
82
83 #include <drm/display/drm_dp_mst_helper.h>
84 #include <drm/display/drm_hdmi_helper.h>
85 #include <drm/drm_atomic.h>
86 #include <drm/drm_atomic_uapi.h>
87 #include <drm/drm_atomic_helper.h>
88 #include <drm/drm_blend.h>
89 #include <drm/drm_fourcc.h>
90 #include <drm/drm_edid.h>
91 #include <drm/drm_vblank.h>
92 #include <drm/drm_audio_component.h>
93 #include <drm/drm_gem_atomic_helper.h>
94 #include <drm/drm_plane_helper.h>
95
96 #include <acpi/video.h>
97
98 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
99
100 #include "dcn/dcn_1_0_offset.h"
101 #include "dcn/dcn_1_0_sh_mask.h"
102 #include "soc15_hw_ip.h"
103 #include "soc15_common.h"
104 #include "vega10_ip_offset.h"
105
106 #include "gc/gc_11_0_0_offset.h"
107 #include "gc/gc_11_0_0_sh_mask.h"
108
109 #include "modules/inc/mod_freesync.h"
110 #include "modules/power/power_helpers.h"
111
112 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
114 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
116 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
118 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
120 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
122 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
123 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
124 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
125 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
126 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
127 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
128 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
129 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
130 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
131 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
132 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
134
135 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
136 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
137 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
138 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
139
140 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
141 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
142
143 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
144 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
145
146 /* Number of bytes in PSP header for firmware. */
147 #define PSP_HEADER_BYTES 0x100
148
149 /* Number of bytes in PSP footer for firmware. */
150 #define PSP_FOOTER_BYTES 0x100
151
152 /**
153  * DOC: overview
154  *
155  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
156  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
157  * requests into DC requests, and DC responses into DRM responses.
158  *
159  * The root control structure is &struct amdgpu_display_manager.
160  */
161
162 /* basic init/fini API */
163 static int amdgpu_dm_init(struct amdgpu_device *adev);
164 static void amdgpu_dm_fini(struct amdgpu_device *adev);
165 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
166
167 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
168 {
169         switch (link->dpcd_caps.dongle_type) {
170         case DISPLAY_DONGLE_NONE:
171                 return DRM_MODE_SUBCONNECTOR_Native;
172         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
173                 return DRM_MODE_SUBCONNECTOR_VGA;
174         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
175         case DISPLAY_DONGLE_DP_DVI_DONGLE:
176                 return DRM_MODE_SUBCONNECTOR_DVID;
177         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
178         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
179                 return DRM_MODE_SUBCONNECTOR_HDMIA;
180         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
181         default:
182                 return DRM_MODE_SUBCONNECTOR_Unknown;
183         }
184 }
185
186 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
187 {
188         struct dc_link *link = aconnector->dc_link;
189         struct drm_connector *connector = &aconnector->base;
190         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
191
192         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
193                 return;
194
195         if (aconnector->dc_sink)
196                 subconnector = get_subconnector_type(link);
197
198         drm_object_property_set_value(&connector->base,
199                         connector->dev->mode_config.dp_subconnector_property,
200                         subconnector);
201 }
202
203 /*
204  * initializes drm_device display related structures, based on the information
205  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
206  * drm_encoder, drm_mode_config
207  *
208  * Returns 0 on success
209  */
210 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
211 /* removes and deallocates the drm structures, created by the above function */
212 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
213
214 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
215                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
216                                     u32 link_index,
217                                     struct amdgpu_encoder *amdgpu_encoder);
218 static int amdgpu_dm_encoder_init(struct drm_device *dev,
219                                   struct amdgpu_encoder *aencoder,
220                                   uint32_t link_index);
221
222 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
223
224 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
225
226 static int amdgpu_dm_atomic_check(struct drm_device *dev,
227                                   struct drm_atomic_state *state);
228
229 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
230 static void handle_hpd_rx_irq(void *param);
231
232 static bool
233 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
234                                  struct drm_crtc_state *new_crtc_state);
235 /*
236  * dm_vblank_get_counter
237  *
238  * @brief
239  * Get counter for number of vertical blanks
240  *
241  * @param
242  * struct amdgpu_device *adev - [in] desired amdgpu device
243  * int disp_idx - [in] which CRTC to get the counter from
244  *
245  * @return
246  * Counter for vertical blanks
247  */
248 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
249 {
250         if (crtc >= adev->mode_info.num_crtc)
251                 return 0;
252         else {
253                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
255                 if (acrtc->dm_irq_params.stream == NULL) {
256                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257                                   crtc);
258                         return 0;
259                 }
260
261                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
262         }
263 }
264
265 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
266                                   u32 *vbl, u32 *position)
267 {
268         u32 v_blank_start, v_blank_end, h_position, v_position;
269
270         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
271                 return -EINVAL;
272         else {
273                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
274
275                 if (acrtc->dm_irq_params.stream ==  NULL) {
276                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
277                                   crtc);
278                         return 0;
279                 }
280
281                 /*
282                  * TODO rework base driver to use values directly.
283                  * for now parse it back into reg-format
284                  */
285                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
286                                          &v_blank_start,
287                                          &v_blank_end,
288                                          &h_position,
289                                          &v_position);
290
291                 *position = v_position | (h_position << 16);
292                 *vbl = v_blank_start | (v_blank_end << 16);
293         }
294
295         return 0;
296 }
297
298 static bool dm_is_idle(void *handle)
299 {
300         /* XXX todo */
301         return true;
302 }
303
304 static int dm_wait_for_idle(void *handle)
305 {
306         /* XXX todo */
307         return 0;
308 }
309
310 static bool dm_check_soft_reset(void *handle)
311 {
312         return false;
313 }
314
315 static int dm_soft_reset(void *handle)
316 {
317         /* XXX todo */
318         return 0;
319 }
320
321 static struct amdgpu_crtc *
322 get_crtc_by_otg_inst(struct amdgpu_device *adev,
323                      int otg_inst)
324 {
325         struct drm_device *dev = adev_to_drm(adev);
326         struct drm_crtc *crtc;
327         struct amdgpu_crtc *amdgpu_crtc;
328
329         if (WARN_ON(otg_inst == -1))
330                 return adev->mode_info.crtcs[0];
331
332         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
333                 amdgpu_crtc = to_amdgpu_crtc(crtc);
334
335                 if (amdgpu_crtc->otg_inst == otg_inst)
336                         return amdgpu_crtc;
337         }
338
339         return NULL;
340 }
341
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343                                               struct dm_crtc_state *new_state)
344 {
345         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346                 return true;
347         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348                 return true;
349         else
350                 return false;
351 }
352
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362         struct amdgpu_crtc *amdgpu_crtc;
363         struct common_irq_params *irq_params = interrupt_params;
364         struct amdgpu_device *adev = irq_params->adev;
365         unsigned long flags;
366         struct drm_pending_vblank_event *e;
367         u32 vpos, hpos, v_blank_start, v_blank_end;
368         bool vrr_active;
369
370         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371
372         /* IRQ could occur when in initial stage */
373         /* TODO work and BO cleanup */
374         if (amdgpu_crtc == NULL) {
375                 DC_LOG_PFLIP("CRTC is null, returning.\n");
376                 return;
377         }
378
379         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380
381         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383                                                  amdgpu_crtc->pflip_status,
384                                                  AMDGPU_FLIP_SUBMITTED,
385                                                  amdgpu_crtc->crtc_id,
386                                                  amdgpu_crtc);
387                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388                 return;
389         }
390
391         /* page flip completed. */
392         e = amdgpu_crtc->event;
393         amdgpu_crtc->event = NULL;
394
395         WARN_ON(!e);
396
397         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398
399         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
400         if (!vrr_active ||
401             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402                                       &v_blank_end, &hpos, &vpos) ||
403             (vpos < v_blank_start)) {
404                 /* Update to correct count and vblank timestamp if racing with
405                  * vblank irq. This also updates to the correct vblank timestamp
406                  * even in VRR mode, as scanout is past the front-porch atm.
407                  */
408                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409
410                 /* Wake up userspace by sending the pageflip event with proper
411                  * count and timestamp of vblank of flip completion.
412                  */
413                 if (e) {
414                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415
416                         /* Event sent, so done with vblank for this flip */
417                         drm_crtc_vblank_put(&amdgpu_crtc->base);
418                 }
419         } else if (e) {
420                 /* VRR active and inside front-porch: vblank count and
421                  * timestamp for pageflip event will only be up to date after
422                  * drm_crtc_handle_vblank() has been executed from late vblank
423                  * irq handler after start of back-porch (vline 0). We queue the
424                  * pageflip event for send-out by drm_crtc_handle_vblank() with
425                  * updated timestamp and count, once it runs after us.
426                  *
427                  * We need to open-code this instead of using the helper
428                  * drm_crtc_arm_vblank_event(), as that helper would
429                  * call drm_crtc_accurate_vblank_count(), which we must
430                  * not call in VRR mode while we are in front-porch!
431                  */
432
433                 /* sequence will be replaced by real count during send-out. */
434                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435                 e->pipe = amdgpu_crtc->crtc_id;
436
437                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438                 e = NULL;
439         }
440
441         /* Keep track of vblank of this flip for flip throttling. We use the
442          * cooked hw counter, as that one incremented at start of this vblank
443          * of pageflip completion, so last_flip_vblank is the forbidden count
444          * for queueing new pageflips if vsync + VRR is enabled.
445          */
446         amdgpu_crtc->dm_irq_params.last_flip_vblank =
447                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448
449         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451
452         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453                      amdgpu_crtc->crtc_id, amdgpu_crtc,
454                      vrr_active, (int) !e);
455 }
456
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459         struct common_irq_params *irq_params = interrupt_params;
460         struct amdgpu_device *adev = irq_params->adev;
461         struct amdgpu_crtc *acrtc;
462         struct drm_device *drm_dev;
463         struct drm_vblank_crtc *vblank;
464         ktime_t frame_duration_ns, previous_timestamp;
465         unsigned long flags;
466         int vrr_active;
467
468         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469
470         if (acrtc) {
471                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472                 drm_dev = acrtc->base.dev;
473                 vblank = &drm_dev->vblank[acrtc->base.index];
474                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475                 frame_duration_ns = vblank->time - previous_timestamp;
476
477                 if (frame_duration_ns > 0) {
478                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
479                                                 frame_duration_ns,
480                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
482                 }
483
484                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485                               acrtc->crtc_id,
486                               vrr_active);
487
488                 /* Core vblank handling is done here after end of front-porch in
489                  * vrr mode, as vblank timestamping will give valid results
490                  * while now done after front-porch. This will also deliver
491                  * page-flip completion events that have been queued to us
492                  * if a pageflip happened inside front-porch.
493                  */
494                 if (vrr_active) {
495                         dm_crtc_handle_vblank(acrtc);
496
497                         /* BTR processing for pre-DCE12 ASICs */
498                         if (acrtc->dm_irq_params.stream &&
499                             adev->family < AMDGPU_FAMILY_AI) {
500                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501                                 mod_freesync_handle_v_update(
502                                     adev->dm.freesync_module,
503                                     acrtc->dm_irq_params.stream,
504                                     &acrtc->dm_irq_params.vrr_params);
505
506                                 dc_stream_adjust_vmin_vmax(
507                                     adev->dm.dc,
508                                     acrtc->dm_irq_params.stream,
509                                     &acrtc->dm_irq_params.vrr_params.adjust);
510                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
511                         }
512                 }
513         }
514 }
515
516 /**
517  * dm_crtc_high_irq() - Handles CRTC interrupt
518  * @interrupt_params: used for determining the CRTC instance
519  *
520  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
521  * event handler.
522  */
523 static void dm_crtc_high_irq(void *interrupt_params)
524 {
525         struct common_irq_params *irq_params = interrupt_params;
526         struct amdgpu_device *adev = irq_params->adev;
527         struct amdgpu_crtc *acrtc;
528         unsigned long flags;
529         int vrr_active;
530
531         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532         if (!acrtc)
533                 return;
534
535         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
536
537         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538                       vrr_active, acrtc->dm_irq_params.active_planes);
539
540         /**
541          * Core vblank handling at start of front-porch is only possible
542          * in non-vrr mode, as only there vblank timestamping will give
543          * valid results while done in front-porch. Otherwise defer it
544          * to dm_vupdate_high_irq after end of front-porch.
545          */
546         if (!vrr_active)
547                 dm_crtc_handle_vblank(acrtc);
548
549         /**
550          * Following stuff must happen at start of vblank, for crc
551          * computation and below-the-range btr support in vrr mode.
552          */
553         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
554
555         /* BTR updates need to happen before VUPDATE on Vega and above. */
556         if (adev->family < AMDGPU_FAMILY_AI)
557                 return;
558
559         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
560
561         if (acrtc->dm_irq_params.stream &&
562             acrtc->dm_irq_params.vrr_params.supported &&
563             acrtc->dm_irq_params.freesync_config.state ==
564                     VRR_STATE_ACTIVE_VARIABLE) {
565                 mod_freesync_handle_v_update(adev->dm.freesync_module,
566                                              acrtc->dm_irq_params.stream,
567                                              &acrtc->dm_irq_params.vrr_params);
568
569                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570                                            &acrtc->dm_irq_params.vrr_params.adjust);
571         }
572
573         /*
574          * If there aren't any active_planes then DCH HUBP may be clock-gated.
575          * In that case, pageflip completion interrupts won't fire and pageflip
576          * completion events won't get delivered. Prevent this by sending
577          * pending pageflip events from here if a flip is still pending.
578          *
579          * If any planes are enabled, use dm_pflip_high_irq() instead, to
580          * avoid race conditions between flip programming and completion,
581          * which could cause too early flip completion events.
582          */
583         if (adev->family >= AMDGPU_FAMILY_RV &&
584             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585             acrtc->dm_irq_params.active_planes == 0) {
586                 if (acrtc->event) {
587                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588                         acrtc->event = NULL;
589                         drm_crtc_vblank_put(&acrtc->base);
590                 }
591                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
592         }
593
594         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
595 }
596
597 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
598 /**
599  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
600  * DCN generation ASICs
601  * @interrupt_params: interrupt parameters
602  *
603  * Used to set crc window/read out crc value at vertical line 0 position
604  */
605 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
606 {
607         struct common_irq_params *irq_params = interrupt_params;
608         struct amdgpu_device *adev = irq_params->adev;
609         struct amdgpu_crtc *acrtc;
610
611         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
612
613         if (!acrtc)
614                 return;
615
616         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
617 }
618 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
619
620 /**
621  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
622  * @adev: amdgpu_device pointer
623  * @notify: dmub notification structure
624  *
625  * Dmub AUX or SET_CONFIG command completion processing callback
626  * Copies dmub notification to DM which is to be read by AUX command.
627  * issuing thread and also signals the event to wake up the thread.
628  */
629 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
630                                         struct dmub_notification *notify)
631 {
632         if (adev->dm.dmub_notify)
633                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
634         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
635                 complete(&adev->dm.dmub_aux_transfer_done);
636 }
637
638 /**
639  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
640  * @adev: amdgpu_device pointer
641  * @notify: dmub notification structure
642  *
643  * Dmub Hpd interrupt processing callback. Gets displayindex through the
644  * ink index and calls helper to do the processing.
645  */
646 static void dmub_hpd_callback(struct amdgpu_device *adev,
647                               struct dmub_notification *notify)
648 {
649         struct amdgpu_dm_connector *aconnector;
650         struct amdgpu_dm_connector *hpd_aconnector = NULL;
651         struct drm_connector *connector;
652         struct drm_connector_list_iter iter;
653         struct dc_link *link;
654         u8 link_index = 0;
655         struct drm_device *dev;
656
657         if (adev == NULL)
658                 return;
659
660         if (notify == NULL) {
661                 DRM_ERROR("DMUB HPD callback notification was NULL");
662                 return;
663         }
664
665         if (notify->link_index > adev->dm.dc->link_count) {
666                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
667                 return;
668         }
669
670         link_index = notify->link_index;
671         link = adev->dm.dc->links[link_index];
672         dev = adev->dm.ddev;
673
674         drm_connector_list_iter_begin(dev, &iter);
675         drm_for_each_connector_iter(connector, &iter) {
676                 aconnector = to_amdgpu_dm_connector(connector);
677                 if (link && aconnector->dc_link == link) {
678                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
679                         hpd_aconnector = aconnector;
680                         break;
681                 }
682         }
683         drm_connector_list_iter_end(&iter);
684
685         if (hpd_aconnector) {
686                 if (notify->type == DMUB_NOTIFICATION_HPD)
687                         handle_hpd_irq_helper(hpd_aconnector);
688                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
689                         handle_hpd_rx_irq(hpd_aconnector);
690         }
691 }
692
693 /**
694  * register_dmub_notify_callback - Sets callback for DMUB notify
695  * @adev: amdgpu_device pointer
696  * @type: Type of dmub notification
697  * @callback: Dmub interrupt callback function
698  * @dmub_int_thread_offload: offload indicator
699  *
700  * API to register a dmub callback handler for a dmub notification
701  * Also sets indicator whether callback processing to be offloaded.
702  * to dmub interrupt handling thread
703  * Return: true if successfully registered, false if there is existing registration
704  */
705 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
706                                           enum dmub_notification_type type,
707                                           dmub_notify_interrupt_callback_t callback,
708                                           bool dmub_int_thread_offload)
709 {
710         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
711                 adev->dm.dmub_callback[type] = callback;
712                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
713         } else
714                 return false;
715
716         return true;
717 }
718
719 static void dm_handle_hpd_work(struct work_struct *work)
720 {
721         struct dmub_hpd_work *dmub_hpd_wrk;
722
723         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
724
725         if (!dmub_hpd_wrk->dmub_notify) {
726                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
727                 return;
728         }
729
730         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
731                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
732                 dmub_hpd_wrk->dmub_notify);
733         }
734
735         kfree(dmub_hpd_wrk->dmub_notify);
736         kfree(dmub_hpd_wrk);
737
738 }
739
740 #define DMUB_TRACE_MAX_READ 64
741 /**
742  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
743  * @interrupt_params: used for determining the Outbox instance
744  *
745  * Handles the Outbox Interrupt
746  * event handler.
747  */
748 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
749 {
750         struct dmub_notification notify;
751         struct common_irq_params *irq_params = interrupt_params;
752         struct amdgpu_device *adev = irq_params->adev;
753         struct amdgpu_display_manager *dm = &adev->dm;
754         struct dmcub_trace_buf_entry entry = { 0 };
755         u32 count = 0;
756         struct dmub_hpd_work *dmub_hpd_wrk;
757         struct dc_link *plink = NULL;
758
759         if (dc_enable_dmub_notifications(adev->dm.dc) &&
760                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
761
762                 do {
763                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
764                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
765                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
766                                 continue;
767                         }
768                         if (!dm->dmub_callback[notify.type]) {
769                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
770                                 continue;
771                         }
772                         if (dm->dmub_thread_offload[notify.type] == true) {
773                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
774                                 if (!dmub_hpd_wrk) {
775                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
776                                         return;
777                                 }
778                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
779                                 if (!dmub_hpd_wrk->dmub_notify) {
780                                         kfree(dmub_hpd_wrk);
781                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
782                                         return;
783                                 }
784                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
785                                 if (dmub_hpd_wrk->dmub_notify)
786                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
787                                 dmub_hpd_wrk->adev = adev;
788                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
789                                         plink = adev->dm.dc->links[notify.link_index];
790                                         if (plink) {
791                                                 plink->hpd_status =
792                                                         notify.hpd_status == DP_HPD_PLUG;
793                                         }
794                                 }
795                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
796                         } else {
797                                 dm->dmub_callback[notify.type](adev, &notify);
798                         }
799                 } while (notify.pending_notification);
800         }
801
802
803         do {
804                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
805                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
806                                                         entry.param0, entry.param1);
807
808                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
809                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
810                 } else
811                         break;
812
813                 count++;
814
815         } while (count <= DMUB_TRACE_MAX_READ);
816
817         if (count > DMUB_TRACE_MAX_READ)
818                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
819 }
820
821 static int dm_set_clockgating_state(void *handle,
822                   enum amd_clockgating_state state)
823 {
824         return 0;
825 }
826
827 static int dm_set_powergating_state(void *handle,
828                   enum amd_powergating_state state)
829 {
830         return 0;
831 }
832
833 /* Prototypes of private functions */
834 static int dm_early_init(void* handle);
835
836 /* Allocate memory for FBC compressed data  */
837 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
838 {
839         struct drm_device *dev = connector->dev;
840         struct amdgpu_device *adev = drm_to_adev(dev);
841         struct dm_compressor_info *compressor = &adev->dm.compressor;
842         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
843         struct drm_display_mode *mode;
844         unsigned long max_size = 0;
845
846         if (adev->dm.dc->fbc_compressor == NULL)
847                 return;
848
849         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
850                 return;
851
852         if (compressor->bo_ptr)
853                 return;
854
855
856         list_for_each_entry(mode, &connector->modes, head) {
857                 if (max_size < mode->htotal * mode->vtotal)
858                         max_size = mode->htotal * mode->vtotal;
859         }
860
861         if (max_size) {
862                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
863                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
864                             &compressor->gpu_addr, &compressor->cpu_addr);
865
866                 if (r)
867                         DRM_ERROR("DM: Failed to initialize FBC\n");
868                 else {
869                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
870                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
871                 }
872
873         }
874
875 }
876
877 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
878                                           int pipe, bool *enabled,
879                                           unsigned char *buf, int max_bytes)
880 {
881         struct drm_device *dev = dev_get_drvdata(kdev);
882         struct amdgpu_device *adev = drm_to_adev(dev);
883         struct drm_connector *connector;
884         struct drm_connector_list_iter conn_iter;
885         struct amdgpu_dm_connector *aconnector;
886         int ret = 0;
887
888         *enabled = false;
889
890         mutex_lock(&adev->dm.audio_lock);
891
892         drm_connector_list_iter_begin(dev, &conn_iter);
893         drm_for_each_connector_iter(connector, &conn_iter) {
894                 aconnector = to_amdgpu_dm_connector(connector);
895                 if (aconnector->audio_inst != port)
896                         continue;
897
898                 *enabled = true;
899                 ret = drm_eld_size(connector->eld);
900                 memcpy(buf, connector->eld, min(max_bytes, ret));
901
902                 break;
903         }
904         drm_connector_list_iter_end(&conn_iter);
905
906         mutex_unlock(&adev->dm.audio_lock);
907
908         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
909
910         return ret;
911 }
912
913 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
914         .get_eld = amdgpu_dm_audio_component_get_eld,
915 };
916
917 static int amdgpu_dm_audio_component_bind(struct device *kdev,
918                                        struct device *hda_kdev, void *data)
919 {
920         struct drm_device *dev = dev_get_drvdata(kdev);
921         struct amdgpu_device *adev = drm_to_adev(dev);
922         struct drm_audio_component *acomp = data;
923
924         acomp->ops = &amdgpu_dm_audio_component_ops;
925         acomp->dev = kdev;
926         adev->dm.audio_component = acomp;
927
928         return 0;
929 }
930
931 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
932                                           struct device *hda_kdev, void *data)
933 {
934         struct drm_device *dev = dev_get_drvdata(kdev);
935         struct amdgpu_device *adev = drm_to_adev(dev);
936         struct drm_audio_component *acomp = data;
937
938         acomp->ops = NULL;
939         acomp->dev = NULL;
940         adev->dm.audio_component = NULL;
941 }
942
943 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
944         .bind   = amdgpu_dm_audio_component_bind,
945         .unbind = amdgpu_dm_audio_component_unbind,
946 };
947
948 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
949 {
950         int i, ret;
951
952         if (!amdgpu_audio)
953                 return 0;
954
955         adev->mode_info.audio.enabled = true;
956
957         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
958
959         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
960                 adev->mode_info.audio.pin[i].channels = -1;
961                 adev->mode_info.audio.pin[i].rate = -1;
962                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
963                 adev->mode_info.audio.pin[i].status_bits = 0;
964                 adev->mode_info.audio.pin[i].category_code = 0;
965                 adev->mode_info.audio.pin[i].connected = false;
966                 adev->mode_info.audio.pin[i].id =
967                         adev->dm.dc->res_pool->audios[i]->inst;
968                 adev->mode_info.audio.pin[i].offset = 0;
969         }
970
971         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
972         if (ret < 0)
973                 return ret;
974
975         adev->dm.audio_registered = true;
976
977         return 0;
978 }
979
980 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
981 {
982         if (!amdgpu_audio)
983                 return;
984
985         if (!adev->mode_info.audio.enabled)
986                 return;
987
988         if (adev->dm.audio_registered) {
989                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
990                 adev->dm.audio_registered = false;
991         }
992
993         /* TODO: Disable audio? */
994
995         adev->mode_info.audio.enabled = false;
996 }
997
998 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
999 {
1000         struct drm_audio_component *acomp = adev->dm.audio_component;
1001
1002         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1003                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1004
1005                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1006                                                  pin, -1);
1007         }
1008 }
1009
1010 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1011 {
1012         const struct dmcub_firmware_header_v1_0 *hdr;
1013         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1014         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1015         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1016         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1017         struct abm *abm = adev->dm.dc->res_pool->abm;
1018         struct dmub_srv_hw_params hw_params;
1019         enum dmub_status status;
1020         const unsigned char *fw_inst_const, *fw_bss_data;
1021         u32 i, fw_inst_const_size, fw_bss_data_size;
1022         bool has_hw_support;
1023
1024         if (!dmub_srv)
1025                 /* DMUB isn't supported on the ASIC. */
1026                 return 0;
1027
1028         if (!fb_info) {
1029                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1030                 return -EINVAL;
1031         }
1032
1033         if (!dmub_fw) {
1034                 /* Firmware required for DMUB support. */
1035                 DRM_ERROR("No firmware provided for DMUB.\n");
1036                 return -EINVAL;
1037         }
1038
1039         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1040         if (status != DMUB_STATUS_OK) {
1041                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1042                 return -EINVAL;
1043         }
1044
1045         if (!has_hw_support) {
1046                 DRM_INFO("DMUB unsupported on ASIC\n");
1047                 return 0;
1048         }
1049
1050         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1051         status = dmub_srv_hw_reset(dmub_srv);
1052         if (status != DMUB_STATUS_OK)
1053                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1054
1055         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1056
1057         fw_inst_const = dmub_fw->data +
1058                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1059                         PSP_HEADER_BYTES;
1060
1061         fw_bss_data = dmub_fw->data +
1062                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1063                       le32_to_cpu(hdr->inst_const_bytes);
1064
1065         /* Copy firmware and bios info into FB memory. */
1066         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1067                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1068
1069         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1070
1071         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1072          * amdgpu_ucode_init_single_fw will load dmub firmware
1073          * fw_inst_const part to cw0; otherwise, the firmware back door load
1074          * will be done by dm_dmub_hw_init
1075          */
1076         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1077                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1078                                 fw_inst_const_size);
1079         }
1080
1081         if (fw_bss_data_size)
1082                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1083                        fw_bss_data, fw_bss_data_size);
1084
1085         /* Copy firmware bios info into FB memory. */
1086         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1087                adev->bios_size);
1088
1089         /* Reset regions that need to be reset. */
1090         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1091         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1092
1093         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1094                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1095
1096         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1097                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1098
1099         /* Initialize hardware. */
1100         memset(&hw_params, 0, sizeof(hw_params));
1101         hw_params.fb_base = adev->gmc.fb_start;
1102         hw_params.fb_offset = adev->vm_manager.vram_base_offset;
1103
1104         /* backdoor load firmware and trigger dmub running */
1105         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1106                 hw_params.load_inst_const = true;
1107
1108         if (dmcu)
1109                 hw_params.psp_version = dmcu->psp_version;
1110
1111         for (i = 0; i < fb_info->num_fb; ++i)
1112                 hw_params.fb[i] = &fb_info->fb[i];
1113
1114         switch (adev->ip_versions[DCE_HWIP][0]) {
1115         case IP_VERSION(3, 1, 3):
1116         case IP_VERSION(3, 1, 4):
1117                 hw_params.dpia_supported = true;
1118                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1119                 break;
1120         default:
1121                 break;
1122         }
1123
1124         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1125         if (status != DMUB_STATUS_OK) {
1126                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1127                 return -EINVAL;
1128         }
1129
1130         /* Wait for firmware load to finish. */
1131         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1132         if (status != DMUB_STATUS_OK)
1133                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1134
1135         /* Init DMCU and ABM if available. */
1136         if (dmcu && abm) {
1137                 dmcu->funcs->dmcu_init(dmcu);
1138                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1139         }
1140
1141         if (!adev->dm.dc->ctx->dmub_srv)
1142                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1143         if (!adev->dm.dc->ctx->dmub_srv) {
1144                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1145                 return -ENOMEM;
1146         }
1147
1148         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1149                  adev->dm.dmcub_fw_version);
1150
1151         return 0;
1152 }
1153
1154 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1155 {
1156         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1157         enum dmub_status status;
1158         bool init;
1159
1160         if (!dmub_srv) {
1161                 /* DMUB isn't supported on the ASIC. */
1162                 return;
1163         }
1164
1165         status = dmub_srv_is_hw_init(dmub_srv, &init);
1166         if (status != DMUB_STATUS_OK)
1167                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1168
1169         if (status == DMUB_STATUS_OK && init) {
1170                 /* Wait for firmware load to finish. */
1171                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1172                 if (status != DMUB_STATUS_OK)
1173                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1174         } else {
1175                 /* Perform the full hardware initialization. */
1176                 dm_dmub_hw_init(adev);
1177         }
1178 }
1179
1180 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1181 {
1182         u64 pt_base;
1183         u32 logical_addr_low;
1184         u32 logical_addr_high;
1185         u32 agp_base, agp_bot, agp_top;
1186         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1187
1188         memset(pa_config, 0, sizeof(*pa_config));
1189
1190         agp_base = 0;
1191         agp_bot = adev->gmc.agp_start >> 24;
1192         agp_top = adev->gmc.agp_end >> 24;
1193
1194         /* AGP aperture is disabled */
1195         if (agp_bot == agp_top) {
1196                 logical_addr_low = adev->gmc.fb_start >> 18;
1197                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1198                         /*
1199                          * Raven2 has a HW issue that it is unable to use the vram which
1200                          * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1201                          * workaround that increase system aperture high address (add 1)
1202                          * to get rid of the VM fault and hardware hang.
1203                          */
1204                         logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
1205                 else
1206                         logical_addr_high = adev->gmc.fb_end >> 18;
1207         } else {
1208                 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1209                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1210                         /*
1211                          * Raven2 has a HW issue that it is unable to use the vram which
1212                          * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1213                          * workaround that increase system aperture high address (add 1)
1214                          * to get rid of the VM fault and hardware hang.
1215                          */
1216                         logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1217                 else
1218                         logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1219         }
1220
1221         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1222
1223         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1224         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1225         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1226         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1227         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1228         page_table_base.low_part = lower_32_bits(pt_base);
1229
1230         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1231         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1232
1233         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1234         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1235         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1236
1237         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1238         pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
1239         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1240
1241         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1242         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1243         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1244
1245         pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
1246
1247 }
1248
1249 static void force_connector_state(
1250         struct amdgpu_dm_connector *aconnector,
1251         enum drm_connector_force force_state)
1252 {
1253         struct drm_connector *connector = &aconnector->base;
1254
1255         mutex_lock(&connector->dev->mode_config.mutex);
1256         aconnector->base.force = force_state;
1257         mutex_unlock(&connector->dev->mode_config.mutex);
1258
1259         mutex_lock(&aconnector->hpd_lock);
1260         drm_kms_helper_connector_hotplug_event(connector);
1261         mutex_unlock(&aconnector->hpd_lock);
1262 }
1263
1264 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1265 {
1266         struct hpd_rx_irq_offload_work *offload_work;
1267         struct amdgpu_dm_connector *aconnector;
1268         struct dc_link *dc_link;
1269         struct amdgpu_device *adev;
1270         enum dc_connection_type new_connection_type = dc_connection_none;
1271         unsigned long flags;
1272         union test_response test_response;
1273
1274         memset(&test_response, 0, sizeof(test_response));
1275
1276         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1277         aconnector = offload_work->offload_wq->aconnector;
1278
1279         if (!aconnector) {
1280                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1281                 goto skip;
1282         }
1283
1284         adev = drm_to_adev(aconnector->base.dev);
1285         dc_link = aconnector->dc_link;
1286
1287         mutex_lock(&aconnector->hpd_lock);
1288         if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
1289                 DRM_ERROR("KMS: Failed to detect connector\n");
1290         mutex_unlock(&aconnector->hpd_lock);
1291
1292         if (new_connection_type == dc_connection_none)
1293                 goto skip;
1294
1295         if (amdgpu_in_reset(adev))
1296                 goto skip;
1297
1298         mutex_lock(&adev->dm.dc_lock);
1299         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
1300                 dc_link_dp_handle_automated_test(dc_link);
1301
1302                 if (aconnector->timing_changed) {
1303                         /* force connector disconnect and reconnect */
1304                         force_connector_state(aconnector, DRM_FORCE_OFF);
1305                         msleep(100);
1306                         force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
1307                 }
1308
1309                 test_response.bits.ACK = 1;
1310
1311                 core_link_write_dpcd(
1312                 dc_link,
1313                 DP_TEST_RESPONSE,
1314                 &test_response.raw,
1315                 sizeof(test_response));
1316         }
1317         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1318                         dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
1319                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1320                 /* offload_work->data is from handle_hpd_rx_irq->
1321                  * schedule_hpd_rx_offload_work.this is defer handle
1322                  * for hpd short pulse. upon here, link status may be
1323                  * changed, need get latest link status from dpcd
1324                  * registers. if link status is good, skip run link
1325                  * training again.
1326                  */
1327                 union hpd_irq_data irq_data;
1328
1329                 memset(&irq_data, 0, sizeof(irq_data));
1330
1331                 /* before dc_link_dp_handle_link_loss, allow new link lost handle
1332                  * request be added to work queue if link lost at end of dc_link_
1333                  * dp_handle_link_loss
1334                  */
1335                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1336                 offload_work->offload_wq->is_handling_link_loss = false;
1337                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1338
1339                 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
1340                         dc_link_check_link_loss_status(dc_link, &irq_data))
1341                         dc_link_dp_handle_link_loss(dc_link);
1342         }
1343         mutex_unlock(&adev->dm.dc_lock);
1344
1345 skip:
1346         kfree(offload_work);
1347
1348 }
1349
1350 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1351 {
1352         int max_caps = dc->caps.max_links;
1353         int i = 0;
1354         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1355
1356         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1357
1358         if (!hpd_rx_offload_wq)
1359                 return NULL;
1360
1361
1362         for (i = 0; i < max_caps; i++) {
1363                 hpd_rx_offload_wq[i].wq =
1364                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1365
1366                 if (hpd_rx_offload_wq[i].wq == NULL) {
1367                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1368                         goto out_err;
1369                 }
1370
1371                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1372         }
1373
1374         return hpd_rx_offload_wq;
1375
1376 out_err:
1377         for (i = 0; i < max_caps; i++) {
1378                 if (hpd_rx_offload_wq[i].wq)
1379                         destroy_workqueue(hpd_rx_offload_wq[i].wq);
1380         }
1381         kfree(hpd_rx_offload_wq);
1382         return NULL;
1383 }
1384
1385 struct amdgpu_stutter_quirk {
1386         u16 chip_vendor;
1387         u16 chip_device;
1388         u16 subsys_vendor;
1389         u16 subsys_device;
1390         u8 revision;
1391 };
1392
1393 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1394         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1395         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1396         { 0, 0, 0, 0, 0 },
1397 };
1398
1399 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1400 {
1401         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1402
1403         while (p && p->chip_device != 0) {
1404                 if (pdev->vendor == p->chip_vendor &&
1405                     pdev->device == p->chip_device &&
1406                     pdev->subsystem_vendor == p->subsys_vendor &&
1407                     pdev->subsystem_device == p->subsys_device &&
1408                     pdev->revision == p->revision) {
1409                         return true;
1410                 }
1411                 ++p;
1412         }
1413         return false;
1414 }
1415
1416 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1417         {
1418                 .matches = {
1419                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1420                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1421                 },
1422         },
1423         {
1424                 .matches = {
1425                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1426                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1427                 },
1428         },
1429         {
1430                 .matches = {
1431                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1432                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1433                 },
1434         },
1435         {
1436                 .matches = {
1437                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1438                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1439                 },
1440         },
1441         {
1442                 .matches = {
1443                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1444                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1445                 },
1446         },
1447         {
1448                 .matches = {
1449                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1450                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1451                 },
1452         },
1453         {
1454                 .matches = {
1455                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1456                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1457                 },
1458         },
1459         {
1460                 .matches = {
1461                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1462                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1463                 },
1464         },
1465         {
1466                 .matches = {
1467                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1468                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1469                 },
1470         },
1471         {}
1472         /* TODO: refactor this from a fixed table to a dynamic option */
1473 };
1474
1475 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1476 {
1477         const struct dmi_system_id *dmi_id;
1478
1479         dm->aux_hpd_discon_quirk = false;
1480
1481         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1482         if (dmi_id) {
1483                 dm->aux_hpd_discon_quirk = true;
1484                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1485         }
1486 }
1487
1488 static int amdgpu_dm_init(struct amdgpu_device *adev)
1489 {
1490         struct dc_init_data init_data;
1491 #ifdef CONFIG_DRM_AMD_DC_HDCP
1492         struct dc_callback_init init_params;
1493 #endif
1494         int r;
1495
1496         adev->dm.ddev = adev_to_drm(adev);
1497         adev->dm.adev = adev;
1498
1499         /* Zero all the fields */
1500         memset(&init_data, 0, sizeof(init_data));
1501 #ifdef CONFIG_DRM_AMD_DC_HDCP
1502         memset(&init_params, 0, sizeof(init_params));
1503 #endif
1504
1505         mutex_init(&adev->dm.dpia_aux_lock);
1506         mutex_init(&adev->dm.dc_lock);
1507         mutex_init(&adev->dm.audio_lock);
1508
1509         if(amdgpu_dm_irq_init(adev)) {
1510                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1511                 goto error;
1512         }
1513
1514         init_data.asic_id.chip_family = adev->family;
1515
1516         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1517         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1518         init_data.asic_id.chip_id = adev->pdev->device;
1519
1520         init_data.asic_id.vram_width = adev->gmc.vram_width;
1521         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1522         init_data.asic_id.atombios_base_address =
1523                 adev->mode_info.atom_context->bios;
1524
1525         init_data.driver = adev;
1526
1527         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1528
1529         if (!adev->dm.cgs_device) {
1530                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1531                 goto error;
1532         }
1533
1534         init_data.cgs_device = adev->dm.cgs_device;
1535
1536         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1537
1538         switch (adev->ip_versions[DCE_HWIP][0]) {
1539         case IP_VERSION(2, 1, 0):
1540                 switch (adev->dm.dmcub_fw_version) {
1541                 case 0: /* development */
1542                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1543                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1544                         init_data.flags.disable_dmcu = false;
1545                         break;
1546                 default:
1547                         init_data.flags.disable_dmcu = true;
1548                 }
1549                 break;
1550         case IP_VERSION(2, 0, 3):
1551                 init_data.flags.disable_dmcu = true;
1552                 break;
1553         default:
1554                 break;
1555         }
1556
1557         switch (adev->asic_type) {
1558         case CHIP_CARRIZO:
1559         case CHIP_STONEY:
1560                 init_data.flags.gpu_vm_support = true;
1561                 break;
1562         default:
1563                 switch (adev->ip_versions[DCE_HWIP][0]) {
1564                 case IP_VERSION(1, 0, 0):
1565                 case IP_VERSION(1, 0, 1):
1566                         /* enable S/G on PCO and RV2 */
1567                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1568                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1569                                 init_data.flags.gpu_vm_support = true;
1570                         break;
1571                 case IP_VERSION(2, 1, 0):
1572                 case IP_VERSION(3, 0, 1):
1573                 case IP_VERSION(3, 1, 2):
1574                 case IP_VERSION(3, 1, 3):
1575                 case IP_VERSION(3, 1, 4):
1576                 case IP_VERSION(3, 1, 5):
1577                 case IP_VERSION(3, 1, 6):
1578                         init_data.flags.gpu_vm_support = true;
1579                         break;
1580                 default:
1581                         break;
1582                 }
1583                 break;
1584         }
1585         if (init_data.flags.gpu_vm_support &&
1586             (amdgpu_sg_display == 0))
1587                 init_data.flags.gpu_vm_support = false;
1588
1589         if (init_data.flags.gpu_vm_support)
1590                 adev->mode_info.gpu_vm_support = true;
1591
1592         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1593                 init_data.flags.fbc_support = true;
1594
1595         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1596                 init_data.flags.multi_mon_pp_mclk_switch = true;
1597
1598         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1599                 init_data.flags.disable_fractional_pwm = true;
1600
1601         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1602                 init_data.flags.edp_no_power_sequencing = true;
1603
1604         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1605                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1606         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1607                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1608
1609         /* Disable SubVP + DRR config by default */
1610         init_data.flags.disable_subvp_drr = true;
1611         if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
1612                 init_data.flags.disable_subvp_drr = false;
1613
1614         init_data.flags.seamless_boot_edp_requested = false;
1615
1616         if (check_seamless_boot_capability(adev)) {
1617                 init_data.flags.seamless_boot_edp_requested = true;
1618                 init_data.flags.allow_seamless_boot_optimization = true;
1619                 DRM_INFO("Seamless boot condition check passed\n");
1620         }
1621
1622         init_data.flags.enable_mipi_converter_optimization = true;
1623
1624         init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1625         init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1626
1627         INIT_LIST_HEAD(&adev->dm.da_list);
1628
1629         retrieve_dmi_info(&adev->dm);
1630
1631         /* Display Core create. */
1632         adev->dm.dc = dc_create(&init_data);
1633
1634         if (adev->dm.dc) {
1635                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1636         } else {
1637                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1638                 goto error;
1639         }
1640
1641         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1642                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1643                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1644         }
1645
1646         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1647                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1648         if (dm_should_disable_stutter(adev->pdev))
1649                 adev->dm.dc->debug.disable_stutter = true;
1650
1651         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1652                 adev->dm.dc->debug.disable_stutter = true;
1653
1654         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1655                 adev->dm.dc->debug.disable_dsc = true;
1656         }
1657
1658         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1659                 adev->dm.dc->debug.disable_clock_gate = true;
1660
1661         if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1662                 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1663
1664         adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1665
1666         /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
1667         adev->dm.dc->debug.ignore_cable_id = true;
1668
1669         /* TODO: There is a new drm mst change where the freedom of
1670          * vc_next_start_slot update is revoked/moved into drm, instead of in
1671          * driver. This forces us to make sure to get vc_next_start_slot updated
1672          * in drm function each time without considering if mst_state is active
1673          * or not. Otherwise, next time hotplug will give wrong start_slot
1674          * number. We are implementing a temporary solution to even notify drm
1675          * mst deallocation when link is no longer of MST type when uncommitting
1676          * the stream so we will have more time to work on a proper solution.
1677          * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
1678          * should notify drm to do a complete "reset" of its states and stop
1679          * calling further drm mst functions when link is no longer of an MST
1680          * type. This could happen when we unplug an MST hubs/displays. When
1681          * uncommit stream comes later after unplug, we should just reset
1682          * hardware states only.
1683          */
1684         adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
1685
1686         if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
1687                 DRM_INFO("DP-HDMI FRL PCON supported\n");
1688
1689         r = dm_dmub_hw_init(adev);
1690         if (r) {
1691                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1692                 goto error;
1693         }
1694
1695         dc_hardware_init(adev->dm.dc);
1696
1697         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1698         if (!adev->dm.hpd_rx_offload_wq) {
1699                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1700                 goto error;
1701         }
1702
1703         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1704                 struct dc_phy_addr_space_config pa_config;
1705
1706                 mmhub_read_system_context(adev, &pa_config);
1707
1708                 // Call the DC init_memory func
1709                 dc_setup_system_context(adev->dm.dc, &pa_config);
1710         }
1711
1712         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1713         if (!adev->dm.freesync_module) {
1714                 DRM_ERROR(
1715                 "amdgpu: failed to initialize freesync_module.\n");
1716         } else
1717                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1718                                 adev->dm.freesync_module);
1719
1720         amdgpu_dm_init_color_mod();
1721
1722         if (adev->dm.dc->caps.max_links > 0) {
1723                 adev->dm.vblank_control_workqueue =
1724                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1725                 if (!adev->dm.vblank_control_workqueue)
1726                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1727         }
1728
1729 #ifdef CONFIG_DRM_AMD_DC_HDCP
1730         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1731                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1732
1733                 if (!adev->dm.hdcp_workqueue)
1734                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1735                 else
1736                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1737
1738                 dc_init_callbacks(adev->dm.dc, &init_params);
1739         }
1740 #endif
1741 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1742         adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
1743         if (!adev->dm.secure_display_ctxs) {
1744                 DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
1745         }
1746 #endif
1747         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1748                 init_completion(&adev->dm.dmub_aux_transfer_done);
1749                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1750                 if (!adev->dm.dmub_notify) {
1751                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1752                         goto error;
1753                 }
1754
1755                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1756                 if (!adev->dm.delayed_hpd_wq) {
1757                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1758                         goto error;
1759                 }
1760
1761                 amdgpu_dm_outbox_init(adev);
1762                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1763                         dmub_aux_setconfig_callback, false)) {
1764                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1765                         goto error;
1766                 }
1767                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1768                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1769                         goto error;
1770                 }
1771                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1772                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1773                         goto error;
1774                 }
1775         }
1776
1777         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1778          * It is expected that DMUB will resend any pending notifications at this point, for
1779          * example HPD from DPIA.
1780          */
1781         if (dc_is_dmub_outbox_supported(adev->dm.dc))
1782                 dc_enable_dmub_outbox(adev->dm.dc);
1783
1784         if (amdgpu_dm_initialize_drm_device(adev)) {
1785                 DRM_ERROR(
1786                 "amdgpu: failed to initialize sw for display support.\n");
1787                 goto error;
1788         }
1789
1790         /* create fake encoders for MST */
1791         dm_dp_create_fake_mst_encoders(adev);
1792
1793         /* TODO: Add_display_info? */
1794
1795         /* TODO use dynamic cursor width */
1796         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1797         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1798
1799         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1800                 DRM_ERROR(
1801                 "amdgpu: failed to initialize sw for display support.\n");
1802                 goto error;
1803         }
1804
1805
1806         DRM_DEBUG_DRIVER("KMS initialized.\n");
1807
1808         return 0;
1809 error:
1810         amdgpu_dm_fini(adev);
1811
1812         return -EINVAL;
1813 }
1814
1815 static int amdgpu_dm_early_fini(void *handle)
1816 {
1817         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1818
1819         amdgpu_dm_audio_fini(adev);
1820
1821         return 0;
1822 }
1823
1824 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1825 {
1826         int i;
1827
1828         if (adev->dm.vblank_control_workqueue) {
1829                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1830                 adev->dm.vblank_control_workqueue = NULL;
1831         }
1832
1833         amdgpu_dm_destroy_drm_device(&adev->dm);
1834
1835 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1836         if (adev->dm.secure_display_ctxs) {
1837                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1838                         if (adev->dm.secure_display_ctxs[i].crtc) {
1839                                 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
1840                                 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
1841                         }
1842                 }
1843                 kfree(adev->dm.secure_display_ctxs);
1844                 adev->dm.secure_display_ctxs = NULL;
1845         }
1846 #endif
1847 #ifdef CONFIG_DRM_AMD_DC_HDCP
1848         if (adev->dm.hdcp_workqueue) {
1849                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1850                 adev->dm.hdcp_workqueue = NULL;
1851         }
1852
1853         if (adev->dm.dc)
1854                 dc_deinit_callbacks(adev->dm.dc);
1855 #endif
1856
1857         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1858
1859         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1860                 kfree(adev->dm.dmub_notify);
1861                 adev->dm.dmub_notify = NULL;
1862                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1863                 adev->dm.delayed_hpd_wq = NULL;
1864         }
1865
1866         if (adev->dm.dmub_bo)
1867                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1868                                       &adev->dm.dmub_bo_gpu_addr,
1869                                       &adev->dm.dmub_bo_cpu_addr);
1870
1871         if (adev->dm.hpd_rx_offload_wq) {
1872                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1873                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1874                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1875                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1876                         }
1877                 }
1878
1879                 kfree(adev->dm.hpd_rx_offload_wq);
1880                 adev->dm.hpd_rx_offload_wq = NULL;
1881         }
1882
1883         /* DC Destroy TODO: Replace destroy DAL */
1884         if (adev->dm.dc)
1885                 dc_destroy(&adev->dm.dc);
1886         /*
1887          * TODO: pageflip, vlank interrupt
1888          *
1889          * amdgpu_dm_irq_fini(adev);
1890          */
1891
1892         if (adev->dm.cgs_device) {
1893                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1894                 adev->dm.cgs_device = NULL;
1895         }
1896         if (adev->dm.freesync_module) {
1897                 mod_freesync_destroy(adev->dm.freesync_module);
1898                 adev->dm.freesync_module = NULL;
1899         }
1900
1901         mutex_destroy(&adev->dm.audio_lock);
1902         mutex_destroy(&adev->dm.dc_lock);
1903         mutex_destroy(&adev->dm.dpia_aux_lock);
1904
1905         return;
1906 }
1907
1908 static int load_dmcu_fw(struct amdgpu_device *adev)
1909 {
1910         const char *fw_name_dmcu = NULL;
1911         int r;
1912         const struct dmcu_firmware_header_v1_0 *hdr;
1913
1914         switch(adev->asic_type) {
1915 #if defined(CONFIG_DRM_AMD_DC_SI)
1916         case CHIP_TAHITI:
1917         case CHIP_PITCAIRN:
1918         case CHIP_VERDE:
1919         case CHIP_OLAND:
1920 #endif
1921         case CHIP_BONAIRE:
1922         case CHIP_HAWAII:
1923         case CHIP_KAVERI:
1924         case CHIP_KABINI:
1925         case CHIP_MULLINS:
1926         case CHIP_TONGA:
1927         case CHIP_FIJI:
1928         case CHIP_CARRIZO:
1929         case CHIP_STONEY:
1930         case CHIP_POLARIS11:
1931         case CHIP_POLARIS10:
1932         case CHIP_POLARIS12:
1933         case CHIP_VEGAM:
1934         case CHIP_VEGA10:
1935         case CHIP_VEGA12:
1936         case CHIP_VEGA20:
1937                 return 0;
1938         case CHIP_NAVI12:
1939                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1940                 break;
1941         case CHIP_RAVEN:
1942                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1943                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1944                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1945                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1946                 else
1947                         return 0;
1948                 break;
1949         default:
1950                 switch (adev->ip_versions[DCE_HWIP][0]) {
1951                 case IP_VERSION(2, 0, 2):
1952                 case IP_VERSION(2, 0, 3):
1953                 case IP_VERSION(2, 0, 0):
1954                 case IP_VERSION(2, 1, 0):
1955                 case IP_VERSION(3, 0, 0):
1956                 case IP_VERSION(3, 0, 2):
1957                 case IP_VERSION(3, 0, 3):
1958                 case IP_VERSION(3, 0, 1):
1959                 case IP_VERSION(3, 1, 2):
1960                 case IP_VERSION(3, 1, 3):
1961                 case IP_VERSION(3, 1, 4):
1962                 case IP_VERSION(3, 1, 5):
1963                 case IP_VERSION(3, 1, 6):
1964                 case IP_VERSION(3, 2, 0):
1965                 case IP_VERSION(3, 2, 1):
1966                         return 0;
1967                 default:
1968                         break;
1969                 }
1970                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1971                 return -EINVAL;
1972         }
1973
1974         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1975                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1976                 return 0;
1977         }
1978
1979         r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
1980         if (r == -ENODEV) {
1981                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1982                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1983                 adev->dm.fw_dmcu = NULL;
1984                 return 0;
1985         }
1986         if (r) {
1987                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1988                         fw_name_dmcu);
1989                 amdgpu_ucode_release(&adev->dm.fw_dmcu);
1990                 return r;
1991         }
1992
1993         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1994         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1995         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1996         adev->firmware.fw_size +=
1997                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1998
1999         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
2000         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
2001         adev->firmware.fw_size +=
2002                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2003
2004         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
2005
2006         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
2007
2008         return 0;
2009 }
2010
2011 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
2012 {
2013         struct amdgpu_device *adev = ctx;
2014
2015         return dm_read_reg(adev->dm.dc->ctx, address);
2016 }
2017
2018 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
2019                                      uint32_t value)
2020 {
2021         struct amdgpu_device *adev = ctx;
2022
2023         return dm_write_reg(adev->dm.dc->ctx, address, value);
2024 }
2025
2026 static int dm_dmub_sw_init(struct amdgpu_device *adev)
2027 {
2028         struct dmub_srv_create_params create_params;
2029         struct dmub_srv_region_params region_params;
2030         struct dmub_srv_region_info region_info;
2031         struct dmub_srv_fb_params fb_params;
2032         struct dmub_srv_fb_info *fb_info;
2033         struct dmub_srv *dmub_srv;
2034         const struct dmcub_firmware_header_v1_0 *hdr;
2035         enum dmub_asic dmub_asic;
2036         enum dmub_status status;
2037         int r;
2038
2039         switch (adev->ip_versions[DCE_HWIP][0]) {
2040         case IP_VERSION(2, 1, 0):
2041                 dmub_asic = DMUB_ASIC_DCN21;
2042                 break;
2043         case IP_VERSION(3, 0, 0):
2044                 dmub_asic = DMUB_ASIC_DCN30;
2045                 break;
2046         case IP_VERSION(3, 0, 1):
2047                 dmub_asic = DMUB_ASIC_DCN301;
2048                 break;
2049         case IP_VERSION(3, 0, 2):
2050                 dmub_asic = DMUB_ASIC_DCN302;
2051                 break;
2052         case IP_VERSION(3, 0, 3):
2053                 dmub_asic = DMUB_ASIC_DCN303;
2054                 break;
2055         case IP_VERSION(3, 1, 2):
2056         case IP_VERSION(3, 1, 3):
2057                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2058                 break;
2059         case IP_VERSION(3, 1, 4):
2060                 dmub_asic = DMUB_ASIC_DCN314;
2061                 break;
2062         case IP_VERSION(3, 1, 5):
2063                 dmub_asic = DMUB_ASIC_DCN315;
2064                 break;
2065         case IP_VERSION(3, 1, 6):
2066                 dmub_asic = DMUB_ASIC_DCN316;
2067                 break;
2068         case IP_VERSION(3, 2, 0):
2069                 dmub_asic = DMUB_ASIC_DCN32;
2070                 break;
2071         case IP_VERSION(3, 2, 1):
2072                 dmub_asic = DMUB_ASIC_DCN321;
2073                 break;
2074         default:
2075                 /* ASIC doesn't support DMUB. */
2076                 return 0;
2077         }
2078
2079         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2080         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2081
2082         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2083                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2084                         AMDGPU_UCODE_ID_DMCUB;
2085                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2086                         adev->dm.dmub_fw;
2087                 adev->firmware.fw_size +=
2088                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2089
2090                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2091                          adev->dm.dmcub_fw_version);
2092         }
2093
2094
2095         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2096         dmub_srv = adev->dm.dmub_srv;
2097
2098         if (!dmub_srv) {
2099                 DRM_ERROR("Failed to allocate DMUB service!\n");
2100                 return -ENOMEM;
2101         }
2102
2103         memset(&create_params, 0, sizeof(create_params));
2104         create_params.user_ctx = adev;
2105         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2106         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2107         create_params.asic = dmub_asic;
2108
2109         /* Create the DMUB service. */
2110         status = dmub_srv_create(dmub_srv, &create_params);
2111         if (status != DMUB_STATUS_OK) {
2112                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2113                 return -EINVAL;
2114         }
2115
2116         /* Calculate the size of all the regions for the DMUB service. */
2117         memset(&region_params, 0, sizeof(region_params));
2118
2119         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2120                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2121         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2122         region_params.vbios_size = adev->bios_size;
2123         region_params.fw_bss_data = region_params.bss_data_size ?
2124                 adev->dm.dmub_fw->data +
2125                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2126                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2127         region_params.fw_inst_const =
2128                 adev->dm.dmub_fw->data +
2129                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2130                 PSP_HEADER_BYTES;
2131
2132         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2133                                            &region_info);
2134
2135         if (status != DMUB_STATUS_OK) {
2136                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2137                 return -EINVAL;
2138         }
2139
2140         /*
2141          * Allocate a framebuffer based on the total size of all the regions.
2142          * TODO: Move this into GART.
2143          */
2144         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2145                                     AMDGPU_GEM_DOMAIN_VRAM |
2146                                     AMDGPU_GEM_DOMAIN_GTT,
2147                                     &adev->dm.dmub_bo,
2148                                     &adev->dm.dmub_bo_gpu_addr,
2149                                     &adev->dm.dmub_bo_cpu_addr);
2150         if (r)
2151                 return r;
2152
2153         /* Rebase the regions on the framebuffer address. */
2154         memset(&fb_params, 0, sizeof(fb_params));
2155         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2156         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2157         fb_params.region_info = &region_info;
2158
2159         adev->dm.dmub_fb_info =
2160                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2161         fb_info = adev->dm.dmub_fb_info;
2162
2163         if (!fb_info) {
2164                 DRM_ERROR(
2165                         "Failed to allocate framebuffer info for DMUB service!\n");
2166                 return -ENOMEM;
2167         }
2168
2169         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2170         if (status != DMUB_STATUS_OK) {
2171                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2172                 return -EINVAL;
2173         }
2174
2175         return 0;
2176 }
2177
2178 static int dm_sw_init(void *handle)
2179 {
2180         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2181         int r;
2182
2183         r = dm_dmub_sw_init(adev);
2184         if (r)
2185                 return r;
2186
2187         return load_dmcu_fw(adev);
2188 }
2189
2190 static int dm_sw_fini(void *handle)
2191 {
2192         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2193
2194         kfree(adev->dm.dmub_fb_info);
2195         adev->dm.dmub_fb_info = NULL;
2196
2197         if (adev->dm.dmub_srv) {
2198                 dmub_srv_destroy(adev->dm.dmub_srv);
2199                 adev->dm.dmub_srv = NULL;
2200         }
2201
2202         amdgpu_ucode_release(&adev->dm.dmub_fw);
2203         amdgpu_ucode_release(&adev->dm.fw_dmcu);
2204
2205         return 0;
2206 }
2207
2208 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2209 {
2210         struct amdgpu_dm_connector *aconnector;
2211         struct drm_connector *connector;
2212         struct drm_connector_list_iter iter;
2213         int ret = 0;
2214
2215         drm_connector_list_iter_begin(dev, &iter);
2216         drm_for_each_connector_iter(connector, &iter) {
2217                 aconnector = to_amdgpu_dm_connector(connector);
2218                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2219                     aconnector->mst_mgr.aux) {
2220                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2221                                          aconnector,
2222                                          aconnector->base.base.id);
2223
2224                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2225                         if (ret < 0) {
2226                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2227                                 aconnector->dc_link->type =
2228                                         dc_connection_single;
2229                                 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2230                                                                      aconnector->dc_link);
2231                                 break;
2232                         }
2233                 }
2234         }
2235         drm_connector_list_iter_end(&iter);
2236
2237         return ret;
2238 }
2239
2240 static int dm_late_init(void *handle)
2241 {
2242         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2243
2244         struct dmcu_iram_parameters params;
2245         unsigned int linear_lut[16];
2246         int i;
2247         struct dmcu *dmcu = NULL;
2248
2249         dmcu = adev->dm.dc->res_pool->dmcu;
2250
2251         for (i = 0; i < 16; i++)
2252                 linear_lut[i] = 0xFFFF * i / 15;
2253
2254         params.set = 0;
2255         params.backlight_ramping_override = false;
2256         params.backlight_ramping_start = 0xCCCC;
2257         params.backlight_ramping_reduction = 0xCCCCCCCC;
2258         params.backlight_lut_array_size = 16;
2259         params.backlight_lut_array = linear_lut;
2260
2261         /* Min backlight level after ABM reduction,  Don't allow below 1%
2262          * 0xFFFF x 0.01 = 0x28F
2263          */
2264         params.min_abm_backlight = 0x28F;
2265         /* In the case where abm is implemented on dmcub,
2266         * dmcu object will be null.
2267         * ABM 2.4 and up are implemented on dmcub.
2268         */
2269         if (dmcu) {
2270                 if (!dmcu_load_iram(dmcu, params))
2271                         return -EINVAL;
2272         } else if (adev->dm.dc->ctx->dmub_srv) {
2273                 struct dc_link *edp_links[MAX_NUM_EDP];
2274                 int edp_num;
2275
2276                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2277                 for (i = 0; i < edp_num; i++) {
2278                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2279                                 return -EINVAL;
2280                 }
2281         }
2282
2283         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2284 }
2285
2286 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2287 {
2288         struct amdgpu_dm_connector *aconnector;
2289         struct drm_connector *connector;
2290         struct drm_connector_list_iter iter;
2291         struct drm_dp_mst_topology_mgr *mgr;
2292         int ret;
2293         bool need_hotplug = false;
2294
2295         drm_connector_list_iter_begin(dev, &iter);
2296         drm_for_each_connector_iter(connector, &iter) {
2297                 aconnector = to_amdgpu_dm_connector(connector);
2298                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2299                     aconnector->mst_root)
2300                         continue;
2301
2302                 mgr = &aconnector->mst_mgr;
2303
2304                 if (suspend) {
2305                         drm_dp_mst_topology_mgr_suspend(mgr);
2306                 } else {
2307                         /* if extended timeout is supported in hardware,
2308                          * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
2309                          * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
2310                          */
2311                         try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
2312                         if (!dp_is_lttpr_present(aconnector->dc_link))
2313                                 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2314
2315                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2316                         if (ret < 0) {
2317                                 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2318                                         aconnector->dc_link);
2319                                 need_hotplug = true;
2320                         }
2321                 }
2322         }
2323         drm_connector_list_iter_end(&iter);
2324
2325         if (need_hotplug)
2326                 drm_kms_helper_hotplug_event(dev);
2327 }
2328
2329 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2330 {
2331         int ret = 0;
2332
2333         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2334          * on window driver dc implementation.
2335          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2336          * should be passed to smu during boot up and resume from s3.
2337          * boot up: dc calculate dcn watermark clock settings within dc_create,
2338          * dcn20_resource_construct
2339          * then call pplib functions below to pass the settings to smu:
2340          * smu_set_watermarks_for_clock_ranges
2341          * smu_set_watermarks_table
2342          * navi10_set_watermarks_table
2343          * smu_write_watermarks_table
2344          *
2345          * For Renoir, clock settings of dcn watermark are also fixed values.
2346          * dc has implemented different flow for window driver:
2347          * dc_hardware_init / dc_set_power_state
2348          * dcn10_init_hw
2349          * notify_wm_ranges
2350          * set_wm_ranges
2351          * -- Linux
2352          * smu_set_watermarks_for_clock_ranges
2353          * renoir_set_watermarks_table
2354          * smu_write_watermarks_table
2355          *
2356          * For Linux,
2357          * dc_hardware_init -> amdgpu_dm_init
2358          * dc_set_power_state --> dm_resume
2359          *
2360          * therefore, this function apply to navi10/12/14 but not Renoir
2361          * *
2362          */
2363         switch (adev->ip_versions[DCE_HWIP][0]) {
2364         case IP_VERSION(2, 0, 2):
2365         case IP_VERSION(2, 0, 0):
2366                 break;
2367         default:
2368                 return 0;
2369         }
2370
2371         ret = amdgpu_dpm_write_watermarks_table(adev);
2372         if (ret) {
2373                 DRM_ERROR("Failed to update WMTABLE!\n");
2374                 return ret;
2375         }
2376
2377         return 0;
2378 }
2379
2380 /**
2381  * dm_hw_init() - Initialize DC device
2382  * @handle: The base driver device containing the amdgpu_dm device.
2383  *
2384  * Initialize the &struct amdgpu_display_manager device. This involves calling
2385  * the initializers of each DM component, then populating the struct with them.
2386  *
2387  * Although the function implies hardware initialization, both hardware and
2388  * software are initialized here. Splitting them out to their relevant init
2389  * hooks is a future TODO item.
2390  *
2391  * Some notable things that are initialized here:
2392  *
2393  * - Display Core, both software and hardware
2394  * - DC modules that we need (freesync and color management)
2395  * - DRM software states
2396  * - Interrupt sources and handlers
2397  * - Vblank support
2398  * - Debug FS entries, if enabled
2399  */
2400 static int dm_hw_init(void *handle)
2401 {
2402         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2403         /* Create DAL display manager */
2404         amdgpu_dm_init(adev);
2405         amdgpu_dm_hpd_init(adev);
2406
2407         return 0;
2408 }
2409
2410 /**
2411  * dm_hw_fini() - Teardown DC device
2412  * @handle: The base driver device containing the amdgpu_dm device.
2413  *
2414  * Teardown components within &struct amdgpu_display_manager that require
2415  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2416  * were loaded. Also flush IRQ workqueues and disable them.
2417  */
2418 static int dm_hw_fini(void *handle)
2419 {
2420         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2421
2422         amdgpu_dm_hpd_fini(adev);
2423
2424         amdgpu_dm_irq_fini(adev);
2425         amdgpu_dm_fini(adev);
2426         return 0;
2427 }
2428
2429
2430 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2431                                  struct dc_state *state, bool enable)
2432 {
2433         enum dc_irq_source irq_source;
2434         struct amdgpu_crtc *acrtc;
2435         int rc = -EBUSY;
2436         int i = 0;
2437
2438         for (i = 0; i < state->stream_count; i++) {
2439                 acrtc = get_crtc_by_otg_inst(
2440                                 adev, state->stream_status[i].primary_otg_inst);
2441
2442                 if (acrtc && state->stream_status[i].plane_count != 0) {
2443                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2444                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2445                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2446                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2447                         if (rc)
2448                                 DRM_WARN("Failed to %s pflip interrupts\n",
2449                                          enable ? "enable" : "disable");
2450
2451                         if (enable) {
2452                                 rc = dm_enable_vblank(&acrtc->base);
2453                                 if (rc)
2454                                         DRM_WARN("Failed to enable vblank interrupts\n");
2455                         } else {
2456                                 dm_disable_vblank(&acrtc->base);
2457                         }
2458
2459                 }
2460         }
2461
2462 }
2463
2464 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2465 {
2466         struct dc_state *context = NULL;
2467         enum dc_status res = DC_ERROR_UNEXPECTED;
2468         int i;
2469         struct dc_stream_state *del_streams[MAX_PIPES];
2470         int del_streams_count = 0;
2471
2472         memset(del_streams, 0, sizeof(del_streams));
2473
2474         context = dc_create_state(dc);
2475         if (context == NULL)
2476                 goto context_alloc_fail;
2477
2478         dc_resource_state_copy_construct_current(dc, context);
2479
2480         /* First remove from context all streams */
2481         for (i = 0; i < context->stream_count; i++) {
2482                 struct dc_stream_state *stream = context->streams[i];
2483
2484                 del_streams[del_streams_count++] = stream;
2485         }
2486
2487         /* Remove all planes for removed streams and then remove the streams */
2488         for (i = 0; i < del_streams_count; i++) {
2489                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2490                         res = DC_FAIL_DETACH_SURFACES;
2491                         goto fail;
2492                 }
2493
2494                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2495                 if (res != DC_OK)
2496                         goto fail;
2497         }
2498
2499         res = dc_commit_state(dc, context);
2500
2501 fail:
2502         dc_release_state(context);
2503
2504 context_alloc_fail:
2505         return res;
2506 }
2507
2508 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2509 {
2510         int i;
2511
2512         if (dm->hpd_rx_offload_wq) {
2513                 for (i = 0; i < dm->dc->caps.max_links; i++)
2514                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2515         }
2516 }
2517
2518 static int dm_suspend(void *handle)
2519 {
2520         struct amdgpu_device *adev = handle;
2521         struct amdgpu_display_manager *dm = &adev->dm;
2522         int ret = 0;
2523
2524         if (amdgpu_in_reset(adev)) {
2525                 mutex_lock(&dm->dc_lock);
2526
2527                 dc_allow_idle_optimizations(adev->dm.dc, false);
2528
2529                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2530
2531                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2532
2533                 amdgpu_dm_commit_zero_streams(dm->dc);
2534
2535                 amdgpu_dm_irq_suspend(adev);
2536
2537                 hpd_rx_irq_work_suspend(dm);
2538
2539                 return ret;
2540         }
2541
2542         WARN_ON(adev->dm.cached_state);
2543         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2544
2545         s3_handle_mst(adev_to_drm(adev), true);
2546
2547         amdgpu_dm_irq_suspend(adev);
2548
2549         hpd_rx_irq_work_suspend(dm);
2550
2551         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2552
2553         return 0;
2554 }
2555
2556 struct amdgpu_dm_connector *
2557 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2558                                              struct drm_crtc *crtc)
2559 {
2560         u32 i;
2561         struct drm_connector_state *new_con_state;
2562         struct drm_connector *connector;
2563         struct drm_crtc *crtc_from_state;
2564
2565         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2566                 crtc_from_state = new_con_state->crtc;
2567
2568                 if (crtc_from_state == crtc)
2569                         return to_amdgpu_dm_connector(connector);
2570         }
2571
2572         return NULL;
2573 }
2574
2575 static void emulated_link_detect(struct dc_link *link)
2576 {
2577         struct dc_sink_init_data sink_init_data = { 0 };
2578         struct display_sink_capability sink_caps = { 0 };
2579         enum dc_edid_status edid_status;
2580         struct dc_context *dc_ctx = link->ctx;
2581         struct dc_sink *sink = NULL;
2582         struct dc_sink *prev_sink = NULL;
2583
2584         link->type = dc_connection_none;
2585         prev_sink = link->local_sink;
2586
2587         if (prev_sink)
2588                 dc_sink_release(prev_sink);
2589
2590         switch (link->connector_signal) {
2591         case SIGNAL_TYPE_HDMI_TYPE_A: {
2592                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2593                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2594                 break;
2595         }
2596
2597         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2598                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2599                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2600                 break;
2601         }
2602
2603         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2604                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2605                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2606                 break;
2607         }
2608
2609         case SIGNAL_TYPE_LVDS: {
2610                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2611                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2612                 break;
2613         }
2614
2615         case SIGNAL_TYPE_EDP: {
2616                 sink_caps.transaction_type =
2617                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2618                 sink_caps.signal = SIGNAL_TYPE_EDP;
2619                 break;
2620         }
2621
2622         case SIGNAL_TYPE_DISPLAY_PORT: {
2623                 sink_caps.transaction_type =
2624                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2625                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2626                 break;
2627         }
2628
2629         default:
2630                 DC_ERROR("Invalid connector type! signal:%d\n",
2631                         link->connector_signal);
2632                 return;
2633         }
2634
2635         sink_init_data.link = link;
2636         sink_init_data.sink_signal = sink_caps.signal;
2637
2638         sink = dc_sink_create(&sink_init_data);
2639         if (!sink) {
2640                 DC_ERROR("Failed to create sink!\n");
2641                 return;
2642         }
2643
2644         /* dc_sink_create returns a new reference */
2645         link->local_sink = sink;
2646
2647         edid_status = dm_helpers_read_local_edid(
2648                         link->ctx,
2649                         link,
2650                         sink);
2651
2652         if (edid_status != EDID_OK)
2653                 DC_ERROR("Failed to read EDID");
2654
2655 }
2656
2657 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2658                                      struct amdgpu_display_manager *dm)
2659 {
2660         struct {
2661                 struct dc_surface_update surface_updates[MAX_SURFACES];
2662                 struct dc_plane_info plane_infos[MAX_SURFACES];
2663                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2664                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2665                 struct dc_stream_update stream_update;
2666         } * bundle;
2667         int k, m;
2668
2669         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2670
2671         if (!bundle) {
2672                 dm_error("Failed to allocate update bundle\n");
2673                 goto cleanup;
2674         }
2675
2676         for (k = 0; k < dc_state->stream_count; k++) {
2677                 bundle->stream_update.stream = dc_state->streams[k];
2678
2679                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2680                         bundle->surface_updates[m].surface =
2681                                 dc_state->stream_status->plane_states[m];
2682                         bundle->surface_updates[m].surface->force_full_update =
2683                                 true;
2684                 }
2685                 dc_commit_updates_for_stream(
2686                         dm->dc, bundle->surface_updates,
2687                         dc_state->stream_status->plane_count,
2688                         dc_state->streams[k], &bundle->stream_update, dc_state);
2689         }
2690
2691 cleanup:
2692         kfree(bundle);
2693
2694         return;
2695 }
2696
2697 static int dm_resume(void *handle)
2698 {
2699         struct amdgpu_device *adev = handle;
2700         struct drm_device *ddev = adev_to_drm(adev);
2701         struct amdgpu_display_manager *dm = &adev->dm;
2702         struct amdgpu_dm_connector *aconnector;
2703         struct drm_connector *connector;
2704         struct drm_connector_list_iter iter;
2705         struct drm_crtc *crtc;
2706         struct drm_crtc_state *new_crtc_state;
2707         struct dm_crtc_state *dm_new_crtc_state;
2708         struct drm_plane *plane;
2709         struct drm_plane_state *new_plane_state;
2710         struct dm_plane_state *dm_new_plane_state;
2711         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2712         enum dc_connection_type new_connection_type = dc_connection_none;
2713         struct dc_state *dc_state;
2714         int i, r, j;
2715
2716         if (amdgpu_in_reset(adev)) {
2717                 dc_state = dm->cached_dc_state;
2718
2719                 /*
2720                  * The dc->current_state is backed up into dm->cached_dc_state
2721                  * before we commit 0 streams.
2722                  *
2723                  * DC will clear link encoder assignments on the real state
2724                  * but the changes won't propagate over to the copy we made
2725                  * before the 0 streams commit.
2726                  *
2727                  * DC expects that link encoder assignments are *not* valid
2728                  * when committing a state, so as a workaround we can copy
2729                  * off of the current state.
2730                  *
2731                  * We lose the previous assignments, but we had already
2732                  * commit 0 streams anyway.
2733                  */
2734                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2735
2736                 r = dm_dmub_hw_init(adev);
2737                 if (r)
2738                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2739
2740                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2741                 dc_resume(dm->dc);
2742
2743                 amdgpu_dm_irq_resume_early(adev);
2744
2745                 for (i = 0; i < dc_state->stream_count; i++) {
2746                         dc_state->streams[i]->mode_changed = true;
2747                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2748                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2749                                         = 0xffffffff;
2750                         }
2751                 }
2752
2753                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2754                         amdgpu_dm_outbox_init(adev);
2755                         dc_enable_dmub_outbox(adev->dm.dc);
2756                 }
2757
2758                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2759
2760                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2761
2762                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2763
2764                 dc_release_state(dm->cached_dc_state);
2765                 dm->cached_dc_state = NULL;
2766
2767                 amdgpu_dm_irq_resume_late(adev);
2768
2769                 mutex_unlock(&dm->dc_lock);
2770
2771                 return 0;
2772         }
2773         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2774         dc_release_state(dm_state->context);
2775         dm_state->context = dc_create_state(dm->dc);
2776         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2777         dc_resource_state_construct(dm->dc, dm_state->context);
2778
2779         /* Before powering on DC we need to re-initialize DMUB. */
2780         dm_dmub_hw_resume(adev);
2781
2782         /* Re-enable outbox interrupts for DPIA. */
2783         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2784                 amdgpu_dm_outbox_init(adev);
2785                 dc_enable_dmub_outbox(adev->dm.dc);
2786         }
2787
2788         /* power on hardware */
2789         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2790
2791         /* program HPD filter */
2792         dc_resume(dm->dc);
2793
2794         /*
2795          * early enable HPD Rx IRQ, should be done before set mode as short
2796          * pulse interrupts are used for MST
2797          */
2798         amdgpu_dm_irq_resume_early(adev);
2799
2800         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2801         s3_handle_mst(ddev, false);
2802
2803         /* Do detection*/
2804         drm_connector_list_iter_begin(ddev, &iter);
2805         drm_for_each_connector_iter(connector, &iter) {
2806                 aconnector = to_amdgpu_dm_connector(connector);
2807
2808                 if (!aconnector->dc_link)
2809                         continue;
2810
2811                 /*
2812                  * this is the case when traversing through already created
2813                  * MST connectors, should be skipped
2814                  */
2815                 if (aconnector->dc_link->type == dc_connection_mst_branch)
2816                         continue;
2817
2818                 mutex_lock(&aconnector->hpd_lock);
2819                 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
2820                         DRM_ERROR("KMS: Failed to detect connector\n");
2821
2822                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2823                         emulated_link_detect(aconnector->dc_link);
2824                 } else {
2825                         mutex_lock(&dm->dc_lock);
2826                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2827                         mutex_unlock(&dm->dc_lock);
2828                 }
2829
2830                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2831                         aconnector->fake_enable = false;
2832
2833                 if (aconnector->dc_sink)
2834                         dc_sink_release(aconnector->dc_sink);
2835                 aconnector->dc_sink = NULL;
2836                 amdgpu_dm_update_connector_after_detect(aconnector);
2837                 mutex_unlock(&aconnector->hpd_lock);
2838         }
2839         drm_connector_list_iter_end(&iter);
2840
2841         /* Force mode set in atomic commit */
2842         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2843                 new_crtc_state->active_changed = true;
2844
2845         /*
2846          * atomic_check is expected to create the dc states. We need to release
2847          * them here, since they were duplicated as part of the suspend
2848          * procedure.
2849          */
2850         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2851                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2852                 if (dm_new_crtc_state->stream) {
2853                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2854                         dc_stream_release(dm_new_crtc_state->stream);
2855                         dm_new_crtc_state->stream = NULL;
2856                 }
2857         }
2858
2859         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2860                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2861                 if (dm_new_plane_state->dc_state) {
2862                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2863                         dc_plane_state_release(dm_new_plane_state->dc_state);
2864                         dm_new_plane_state->dc_state = NULL;
2865                 }
2866         }
2867
2868         drm_atomic_helper_resume(ddev, dm->cached_state);
2869
2870         dm->cached_state = NULL;
2871
2872         amdgpu_dm_irq_resume_late(adev);
2873
2874         amdgpu_dm_smu_write_watermarks_table(adev);
2875
2876         return 0;
2877 }
2878
2879 /**
2880  * DOC: DM Lifecycle
2881  *
2882  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2883  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2884  * the base driver's device list to be initialized and torn down accordingly.
2885  *
2886  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2887  */
2888
2889 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2890         .name = "dm",
2891         .early_init = dm_early_init,
2892         .late_init = dm_late_init,
2893         .sw_init = dm_sw_init,
2894         .sw_fini = dm_sw_fini,
2895         .early_fini = amdgpu_dm_early_fini,
2896         .hw_init = dm_hw_init,
2897         .hw_fini = dm_hw_fini,
2898         .suspend = dm_suspend,
2899         .resume = dm_resume,
2900         .is_idle = dm_is_idle,
2901         .wait_for_idle = dm_wait_for_idle,
2902         .check_soft_reset = dm_check_soft_reset,
2903         .soft_reset = dm_soft_reset,
2904         .set_clockgating_state = dm_set_clockgating_state,
2905         .set_powergating_state = dm_set_powergating_state,
2906 };
2907
2908 const struct amdgpu_ip_block_version dm_ip_block =
2909 {
2910         .type = AMD_IP_BLOCK_TYPE_DCE,
2911         .major = 1,
2912         .minor = 0,
2913         .rev = 0,
2914         .funcs = &amdgpu_dm_funcs,
2915 };
2916
2917
2918 /**
2919  * DOC: atomic
2920  *
2921  * *WIP*
2922  */
2923
2924 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2925         .fb_create = amdgpu_display_user_framebuffer_create,
2926         .get_format_info = amd_get_format_info,
2927         .atomic_check = amdgpu_dm_atomic_check,
2928         .atomic_commit = drm_atomic_helper_commit,
2929 };
2930
2931 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2932         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
2933         .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
2934 };
2935
2936 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2937 {
2938         struct amdgpu_dm_backlight_caps *caps;
2939         struct amdgpu_display_manager *dm;
2940         struct drm_connector *conn_base;
2941         struct amdgpu_device *adev;
2942         struct dc_link *link = NULL;
2943         struct drm_luminance_range_info *luminance_range;
2944         int i;
2945
2946         if (!aconnector || !aconnector->dc_link)
2947                 return;
2948
2949         link = aconnector->dc_link;
2950         if (link->connector_signal != SIGNAL_TYPE_EDP)
2951                 return;
2952
2953         conn_base = &aconnector->base;
2954         adev = drm_to_adev(conn_base->dev);
2955         dm = &adev->dm;
2956         for (i = 0; i < dm->num_of_edps; i++) {
2957                 if (link == dm->backlight_link[i])
2958                         break;
2959         }
2960         if (i >= dm->num_of_edps)
2961                 return;
2962         caps = &dm->backlight_caps[i];
2963         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2964         caps->aux_support = false;
2965
2966         if (caps->ext_caps->bits.oled == 1 /*||
2967             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2968             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2969                 caps->aux_support = true;
2970
2971         if (amdgpu_backlight == 0)
2972                 caps->aux_support = false;
2973         else if (amdgpu_backlight == 1)
2974                 caps->aux_support = true;
2975
2976         luminance_range = &conn_base->display_info.luminance_range;
2977         caps->aux_min_input_signal = luminance_range->min_luminance;
2978         caps->aux_max_input_signal = luminance_range->max_luminance;
2979 }
2980
2981 void amdgpu_dm_update_connector_after_detect(
2982                 struct amdgpu_dm_connector *aconnector)
2983 {
2984         struct drm_connector *connector = &aconnector->base;
2985         struct drm_device *dev = connector->dev;
2986         struct dc_sink *sink;
2987
2988         /* MST handled by drm_mst framework */
2989         if (aconnector->mst_mgr.mst_state == true)
2990                 return;
2991
2992         sink = aconnector->dc_link->local_sink;
2993         if (sink)
2994                 dc_sink_retain(sink);
2995
2996         /*
2997          * Edid mgmt connector gets first update only in mode_valid hook and then
2998          * the connector sink is set to either fake or physical sink depends on link status.
2999          * Skip if already done during boot.
3000          */
3001         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
3002                         && aconnector->dc_em_sink) {
3003
3004                 /*
3005                  * For S3 resume with headless use eml_sink to fake stream
3006                  * because on resume connector->sink is set to NULL
3007                  */
3008                 mutex_lock(&dev->mode_config.mutex);
3009
3010                 if (sink) {
3011                         if (aconnector->dc_sink) {
3012                                 amdgpu_dm_update_freesync_caps(connector, NULL);
3013                                 /*
3014                                  * retain and release below are used to
3015                                  * bump up refcount for sink because the link doesn't point
3016                                  * to it anymore after disconnect, so on next crtc to connector
3017                                  * reshuffle by UMD we will get into unwanted dc_sink release
3018                                  */
3019                                 dc_sink_release(aconnector->dc_sink);
3020                         }
3021                         aconnector->dc_sink = sink;
3022                         dc_sink_retain(aconnector->dc_sink);
3023                         amdgpu_dm_update_freesync_caps(connector,
3024                                         aconnector->edid);
3025                 } else {
3026                         amdgpu_dm_update_freesync_caps(connector, NULL);
3027                         if (!aconnector->dc_sink) {
3028                                 aconnector->dc_sink = aconnector->dc_em_sink;
3029                                 dc_sink_retain(aconnector->dc_sink);
3030                         }
3031                 }
3032
3033                 mutex_unlock(&dev->mode_config.mutex);
3034
3035                 if (sink)
3036                         dc_sink_release(sink);
3037                 return;
3038         }
3039
3040         /*
3041          * TODO: temporary guard to look for proper fix
3042          * if this sink is MST sink, we should not do anything
3043          */
3044         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3045                 dc_sink_release(sink);
3046                 return;
3047         }
3048
3049         if (aconnector->dc_sink == sink) {
3050                 /*
3051                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
3052                  * Do nothing!!
3053                  */
3054                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3055                                 aconnector->connector_id);
3056                 if (sink)
3057                         dc_sink_release(sink);
3058                 return;
3059         }
3060
3061         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3062                 aconnector->connector_id, aconnector->dc_sink, sink);
3063
3064         mutex_lock(&dev->mode_config.mutex);
3065
3066         /*
3067          * 1. Update status of the drm connector
3068          * 2. Send an event and let userspace tell us what to do
3069          */
3070         if (sink) {
3071                 /*
3072                  * TODO: check if we still need the S3 mode update workaround.
3073                  * If yes, put it here.
3074                  */
3075                 if (aconnector->dc_sink) {
3076                         amdgpu_dm_update_freesync_caps(connector, NULL);
3077                         dc_sink_release(aconnector->dc_sink);
3078                 }
3079
3080                 aconnector->dc_sink = sink;
3081                 dc_sink_retain(aconnector->dc_sink);
3082                 if (sink->dc_edid.length == 0) {
3083                         aconnector->edid = NULL;
3084                         if (aconnector->dc_link->aux_mode) {
3085                                 drm_dp_cec_unset_edid(
3086                                         &aconnector->dm_dp_aux.aux);
3087                         }
3088                 } else {
3089                         aconnector->edid =
3090                                 (struct edid *)sink->dc_edid.raw_edid;
3091
3092                         if (aconnector->dc_link->aux_mode)
3093                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3094                                                     aconnector->edid);
3095                 }
3096
3097                 aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
3098                 if (!aconnector->timing_requested)
3099                         dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
3100
3101                 drm_connector_update_edid_property(connector, aconnector->edid);
3102                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3103                 update_connector_ext_caps(aconnector);
3104         } else {
3105                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3106                 amdgpu_dm_update_freesync_caps(connector, NULL);
3107                 drm_connector_update_edid_property(connector, NULL);
3108                 aconnector->num_modes = 0;
3109                 dc_sink_release(aconnector->dc_sink);
3110                 aconnector->dc_sink = NULL;
3111                 aconnector->edid = NULL;
3112                 kfree(aconnector->timing_requested);
3113                 aconnector->timing_requested = NULL;
3114 #ifdef CONFIG_DRM_AMD_DC_HDCP
3115                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3116                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3117                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3118 #endif
3119         }
3120
3121         mutex_unlock(&dev->mode_config.mutex);
3122
3123         update_subconnector_property(aconnector);
3124
3125         if (sink)
3126                 dc_sink_release(sink);
3127 }
3128
3129 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3130 {
3131         struct drm_connector *connector = &aconnector->base;
3132         struct drm_device *dev = connector->dev;
3133         enum dc_connection_type new_connection_type = dc_connection_none;
3134         struct amdgpu_device *adev = drm_to_adev(dev);
3135 #ifdef CONFIG_DRM_AMD_DC_HDCP
3136         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3137 #endif
3138         bool ret = false;
3139
3140         if (adev->dm.disable_hpd_irq)
3141                 return;
3142
3143         /*
3144          * In case of failure or MST no need to update connector status or notify the OS
3145          * since (for MST case) MST does this in its own context.
3146          */
3147         mutex_lock(&aconnector->hpd_lock);
3148
3149 #ifdef CONFIG_DRM_AMD_DC_HDCP
3150         if (adev->dm.hdcp_workqueue) {
3151                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3152                 dm_con_state->update_hdcp = true;
3153         }
3154 #endif
3155         if (aconnector->fake_enable)
3156                 aconnector->fake_enable = false;
3157
3158         aconnector->timing_changed = false;
3159
3160         if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
3161                 DRM_ERROR("KMS: Failed to detect connector\n");
3162
3163         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3164                 emulated_link_detect(aconnector->dc_link);
3165
3166                 drm_modeset_lock_all(dev);
3167                 dm_restore_drm_connector_state(dev, connector);
3168                 drm_modeset_unlock_all(dev);
3169
3170                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3171                         drm_kms_helper_connector_hotplug_event(connector);
3172         } else {
3173                 mutex_lock(&adev->dm.dc_lock);
3174                 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3175                 mutex_unlock(&adev->dm.dc_lock);
3176                 if (ret) {
3177                         amdgpu_dm_update_connector_after_detect(aconnector);
3178
3179                         drm_modeset_lock_all(dev);
3180                         dm_restore_drm_connector_state(dev, connector);
3181                         drm_modeset_unlock_all(dev);
3182
3183                         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3184                                 drm_kms_helper_connector_hotplug_event(connector);
3185                 }
3186         }
3187         mutex_unlock(&aconnector->hpd_lock);
3188
3189 }
3190
3191 static void handle_hpd_irq(void *param)
3192 {
3193         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3194
3195         handle_hpd_irq_helper(aconnector);
3196
3197 }
3198
3199 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3200 {
3201         u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3202         u8 dret;
3203         bool new_irq_handled = false;
3204         int dpcd_addr;
3205         int dpcd_bytes_to_read;
3206
3207         const int max_process_count = 30;
3208         int process_count = 0;
3209
3210         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3211
3212         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3213                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3214                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3215                 dpcd_addr = DP_SINK_COUNT;
3216         } else {
3217                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3218                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3219                 dpcd_addr = DP_SINK_COUNT_ESI;
3220         }
3221
3222         dret = drm_dp_dpcd_read(
3223                 &aconnector->dm_dp_aux.aux,
3224                 dpcd_addr,
3225                 esi,
3226                 dpcd_bytes_to_read);
3227
3228         while (dret == dpcd_bytes_to_read &&
3229                 process_count < max_process_count) {
3230                 u8 retry;
3231                 dret = 0;
3232
3233                 process_count++;
3234
3235                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3236                 /* handle HPD short pulse irq */
3237                 if (aconnector->mst_mgr.mst_state)
3238                         drm_dp_mst_hpd_irq(
3239                                 &aconnector->mst_mgr,
3240                                 esi,
3241                                 &new_irq_handled);
3242
3243                 if (new_irq_handled) {
3244                         /* ACK at DPCD to notify down stream */
3245                         const int ack_dpcd_bytes_to_write =
3246                                 dpcd_bytes_to_read - 1;
3247
3248                         for (retry = 0; retry < 3; retry++) {
3249                                 u8 wret;
3250
3251                                 wret = drm_dp_dpcd_write(
3252                                         &aconnector->dm_dp_aux.aux,
3253                                         dpcd_addr + 1,
3254                                         &esi[1],
3255                                         ack_dpcd_bytes_to_write);
3256                                 if (wret == ack_dpcd_bytes_to_write)
3257                                         break;
3258                         }
3259
3260                         /* check if there is new irq to be handled */
3261                         dret = drm_dp_dpcd_read(
3262                                 &aconnector->dm_dp_aux.aux,
3263                                 dpcd_addr,
3264                                 esi,
3265                                 dpcd_bytes_to_read);
3266
3267                         new_irq_handled = false;
3268                 } else {
3269                         break;
3270                 }
3271         }
3272
3273         if (process_count == max_process_count)
3274                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3275 }
3276
3277 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3278                                                         union hpd_irq_data hpd_irq_data)
3279 {
3280         struct hpd_rx_irq_offload_work *offload_work =
3281                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3282
3283         if (!offload_work) {
3284                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3285                 return;
3286         }
3287
3288         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3289         offload_work->data = hpd_irq_data;
3290         offload_work->offload_wq = offload_wq;
3291
3292         queue_work(offload_wq->wq, &offload_work->work);
3293         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3294 }
3295
3296 static void handle_hpd_rx_irq(void *param)
3297 {
3298         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3299         struct drm_connector *connector = &aconnector->base;
3300         struct drm_device *dev = connector->dev;
3301         struct dc_link *dc_link = aconnector->dc_link;
3302         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3303         bool result = false;
3304         enum dc_connection_type new_connection_type = dc_connection_none;
3305         struct amdgpu_device *adev = drm_to_adev(dev);
3306         union hpd_irq_data hpd_irq_data;
3307         bool link_loss = false;
3308         bool has_left_work = false;
3309         int idx = dc_link->link_index;
3310         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3311
3312         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3313
3314         if (adev->dm.disable_hpd_irq)
3315                 return;
3316
3317         /*
3318          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3319          * conflict, after implement i2c helper, this mutex should be
3320          * retired.
3321          */
3322         mutex_lock(&aconnector->hpd_lock);
3323
3324         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3325                                                 &link_loss, true, &has_left_work);
3326
3327         if (!has_left_work)
3328                 goto out;
3329
3330         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3331                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3332                 goto out;
3333         }
3334
3335         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3336                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3337                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3338                         dm_handle_mst_sideband_msg(aconnector);
3339                         goto out;
3340                 }
3341
3342                 if (link_loss) {
3343                         bool skip = false;
3344
3345                         spin_lock(&offload_wq->offload_lock);
3346                         skip = offload_wq->is_handling_link_loss;
3347
3348                         if (!skip)
3349                                 offload_wq->is_handling_link_loss = true;
3350
3351                         spin_unlock(&offload_wq->offload_lock);
3352
3353                         if (!skip)
3354                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3355
3356                         goto out;
3357                 }
3358         }
3359
3360 out:
3361         if (result && !is_mst_root_connector) {
3362                 /* Downstream Port status changed. */
3363                 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
3364                         DRM_ERROR("KMS: Failed to detect connector\n");
3365
3366                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3367                         emulated_link_detect(dc_link);
3368
3369                         if (aconnector->fake_enable)
3370                                 aconnector->fake_enable = false;
3371
3372                         amdgpu_dm_update_connector_after_detect(aconnector);
3373
3374
3375                         drm_modeset_lock_all(dev);
3376                         dm_restore_drm_connector_state(dev, connector);
3377                         drm_modeset_unlock_all(dev);
3378
3379                         drm_kms_helper_connector_hotplug_event(connector);
3380                 } else {
3381                         bool ret = false;
3382
3383                         mutex_lock(&adev->dm.dc_lock);
3384                         ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3385                         mutex_unlock(&adev->dm.dc_lock);
3386
3387                         if (ret) {
3388                                 if (aconnector->fake_enable)
3389                                         aconnector->fake_enable = false;
3390
3391                                 amdgpu_dm_update_connector_after_detect(aconnector);
3392
3393                                 drm_modeset_lock_all(dev);
3394                                 dm_restore_drm_connector_state(dev, connector);
3395                                 drm_modeset_unlock_all(dev);
3396
3397                                 drm_kms_helper_connector_hotplug_event(connector);
3398                         }
3399                 }
3400         }
3401 #ifdef CONFIG_DRM_AMD_DC_HDCP
3402         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3403                 if (adev->dm.hdcp_workqueue)
3404                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3405         }
3406 #endif
3407
3408         if (dc_link->type != dc_connection_mst_branch)
3409                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3410
3411         mutex_unlock(&aconnector->hpd_lock);
3412 }
3413
3414 static void register_hpd_handlers(struct amdgpu_device *adev)
3415 {
3416         struct drm_device *dev = adev_to_drm(adev);
3417         struct drm_connector *connector;
3418         struct amdgpu_dm_connector *aconnector;
3419         const struct dc_link *dc_link;
3420         struct dc_interrupt_params int_params = {0};
3421
3422         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3423         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3424
3425         list_for_each_entry(connector,
3426                         &dev->mode_config.connector_list, head) {
3427
3428                 aconnector = to_amdgpu_dm_connector(connector);
3429                 dc_link = aconnector->dc_link;
3430
3431                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3432                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3433                         int_params.irq_source = dc_link->irq_source_hpd;
3434
3435                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436                                         handle_hpd_irq,
3437                                         (void *) aconnector);
3438                 }
3439
3440                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3441
3442                         /* Also register for DP short pulse (hpd_rx). */
3443                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3444                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3445
3446                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3447                                         handle_hpd_rx_irq,
3448                                         (void *) aconnector);
3449
3450                         if (adev->dm.hpd_rx_offload_wq)
3451                                 adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
3452                                         aconnector;
3453                 }
3454         }
3455 }
3456
3457 #if defined(CONFIG_DRM_AMD_DC_SI)
3458 /* Register IRQ sources and initialize IRQ callbacks */
3459 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3460 {
3461         struct dc *dc = adev->dm.dc;
3462         struct common_irq_params *c_irq_params;
3463         struct dc_interrupt_params int_params = {0};
3464         int r;
3465         int i;
3466         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3467
3468         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3469         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3470
3471         /*
3472          * Actions of amdgpu_irq_add_id():
3473          * 1. Register a set() function with base driver.
3474          *    Base driver will call set() function to enable/disable an
3475          *    interrupt in DC hardware.
3476          * 2. Register amdgpu_dm_irq_handler().
3477          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3478          *    coming from DC hardware.
3479          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3480          *    for acknowledging and handling. */
3481
3482         /* Use VBLANK interrupt */
3483         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3484                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3485                 if (r) {
3486                         DRM_ERROR("Failed to add crtc irq id!\n");
3487                         return r;
3488                 }
3489
3490                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3491                 int_params.irq_source =
3492                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3493
3494                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3495
3496                 c_irq_params->adev = adev;
3497                 c_irq_params->irq_src = int_params.irq_source;
3498
3499                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3500                                 dm_crtc_high_irq, c_irq_params);
3501         }
3502
3503         /* Use GRPH_PFLIP interrupt */
3504         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3505                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3506                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3507                 if (r) {
3508                         DRM_ERROR("Failed to add page flip irq id!\n");
3509                         return r;
3510                 }
3511
3512                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3513                 int_params.irq_source =
3514                         dc_interrupt_to_irq_source(dc, i, 0);
3515
3516                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3517
3518                 c_irq_params->adev = adev;
3519                 c_irq_params->irq_src = int_params.irq_source;
3520
3521                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3522                                 dm_pflip_high_irq, c_irq_params);
3523
3524         }
3525
3526         /* HPD */
3527         r = amdgpu_irq_add_id(adev, client_id,
3528                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3529         if (r) {
3530                 DRM_ERROR("Failed to add hpd irq id!\n");
3531                 return r;
3532         }
3533
3534         register_hpd_handlers(adev);
3535
3536         return 0;
3537 }
3538 #endif
3539
3540 /* Register IRQ sources and initialize IRQ callbacks */
3541 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3542 {
3543         struct dc *dc = adev->dm.dc;
3544         struct common_irq_params *c_irq_params;
3545         struct dc_interrupt_params int_params = {0};
3546         int r;
3547         int i;
3548         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3549
3550         if (adev->family >= AMDGPU_FAMILY_AI)
3551                 client_id = SOC15_IH_CLIENTID_DCE;
3552
3553         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3554         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3555
3556         /*
3557          * Actions of amdgpu_irq_add_id():
3558          * 1. Register a set() function with base driver.
3559          *    Base driver will call set() function to enable/disable an
3560          *    interrupt in DC hardware.
3561          * 2. Register amdgpu_dm_irq_handler().
3562          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3563          *    coming from DC hardware.
3564          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3565          *    for acknowledging and handling. */
3566
3567         /* Use VBLANK interrupt */
3568         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3569                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3570                 if (r) {
3571                         DRM_ERROR("Failed to add crtc irq id!\n");
3572                         return r;
3573                 }
3574
3575                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3576                 int_params.irq_source =
3577                         dc_interrupt_to_irq_source(dc, i, 0);
3578
3579                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3580
3581                 c_irq_params->adev = adev;
3582                 c_irq_params->irq_src = int_params.irq_source;
3583
3584                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3585                                 dm_crtc_high_irq, c_irq_params);
3586         }
3587
3588         /* Use VUPDATE interrupt */
3589         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3590                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3591                 if (r) {
3592                         DRM_ERROR("Failed to add vupdate irq id!\n");
3593                         return r;
3594                 }
3595
3596                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3597                 int_params.irq_source =
3598                         dc_interrupt_to_irq_source(dc, i, 0);
3599
3600                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3601
3602                 c_irq_params->adev = adev;
3603                 c_irq_params->irq_src = int_params.irq_source;
3604
3605                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3606                                 dm_vupdate_high_irq, c_irq_params);
3607         }
3608
3609         /* Use GRPH_PFLIP interrupt */
3610         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3611                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3612                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3613                 if (r) {
3614                         DRM_ERROR("Failed to add page flip irq id!\n");
3615                         return r;
3616                 }
3617
3618                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3619                 int_params.irq_source =
3620                         dc_interrupt_to_irq_source(dc, i, 0);
3621
3622                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3623
3624                 c_irq_params->adev = adev;
3625                 c_irq_params->irq_src = int_params.irq_source;
3626
3627                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3628                                 dm_pflip_high_irq, c_irq_params);
3629
3630         }
3631
3632         /* HPD */
3633         r = amdgpu_irq_add_id(adev, client_id,
3634                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3635         if (r) {
3636                 DRM_ERROR("Failed to add hpd irq id!\n");
3637                 return r;
3638         }
3639
3640         register_hpd_handlers(adev);
3641
3642         return 0;
3643 }
3644
3645 /* Register IRQ sources and initialize IRQ callbacks */
3646 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3647 {
3648         struct dc *dc = adev->dm.dc;
3649         struct common_irq_params *c_irq_params;
3650         struct dc_interrupt_params int_params = {0};
3651         int r;
3652         int i;
3653 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3654         static const unsigned int vrtl_int_srcid[] = {
3655                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3656                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3657                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3658                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3659                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3660                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3661         };
3662 #endif
3663
3664         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3665         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3666
3667         /*
3668          * Actions of amdgpu_irq_add_id():
3669          * 1. Register a set() function with base driver.
3670          *    Base driver will call set() function to enable/disable an
3671          *    interrupt in DC hardware.
3672          * 2. Register amdgpu_dm_irq_handler().
3673          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3674          *    coming from DC hardware.
3675          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3676          *    for acknowledging and handling.
3677          */
3678
3679         /* Use VSTARTUP interrupt */
3680         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3681                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3682                         i++) {
3683                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3684
3685                 if (r) {
3686                         DRM_ERROR("Failed to add crtc irq id!\n");
3687                         return r;
3688                 }
3689
3690                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3691                 int_params.irq_source =
3692                         dc_interrupt_to_irq_source(dc, i, 0);
3693
3694                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3695
3696                 c_irq_params->adev = adev;
3697                 c_irq_params->irq_src = int_params.irq_source;
3698
3699                 amdgpu_dm_irq_register_interrupt(
3700                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3701         }
3702
3703         /* Use otg vertical line interrupt */
3704 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3705         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3706                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3707                                 vrtl_int_srcid[i], &adev->vline0_irq);
3708
3709                 if (r) {
3710                         DRM_ERROR("Failed to add vline0 irq id!\n");
3711                         return r;
3712                 }
3713
3714                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3715                 int_params.irq_source =
3716                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3717
3718                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3719                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3720                         break;
3721                 }
3722
3723                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3724                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3725
3726                 c_irq_params->adev = adev;
3727                 c_irq_params->irq_src = int_params.irq_source;
3728
3729                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3730                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3731         }
3732 #endif
3733
3734         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3735          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3736          * to trigger at end of each vblank, regardless of state of the lock,
3737          * matching DCE behaviour.
3738          */
3739         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3740              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3741              i++) {
3742                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3743
3744                 if (r) {
3745                         DRM_ERROR("Failed to add vupdate irq id!\n");
3746                         return r;
3747                 }
3748
3749                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3750                 int_params.irq_source =
3751                         dc_interrupt_to_irq_source(dc, i, 0);
3752
3753                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3754
3755                 c_irq_params->adev = adev;
3756                 c_irq_params->irq_src = int_params.irq_source;
3757
3758                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3759                                 dm_vupdate_high_irq, c_irq_params);
3760         }
3761
3762         /* Use GRPH_PFLIP interrupt */
3763         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3764                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3765                         i++) {
3766                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3767                 if (r) {
3768                         DRM_ERROR("Failed to add page flip irq id!\n");
3769                         return r;
3770                 }
3771
3772                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3773                 int_params.irq_source =
3774                         dc_interrupt_to_irq_source(dc, i, 0);
3775
3776                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3777
3778                 c_irq_params->adev = adev;
3779                 c_irq_params->irq_src = int_params.irq_source;
3780
3781                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3782                                 dm_pflip_high_irq, c_irq_params);
3783
3784         }
3785
3786         /* HPD */
3787         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3788                         &adev->hpd_irq);
3789         if (r) {
3790                 DRM_ERROR("Failed to add hpd irq id!\n");
3791                 return r;
3792         }
3793
3794         register_hpd_handlers(adev);
3795
3796         return 0;
3797 }
3798 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3799 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3800 {
3801         struct dc *dc = adev->dm.dc;
3802         struct common_irq_params *c_irq_params;
3803         struct dc_interrupt_params int_params = {0};
3804         int r, i;
3805
3806         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3807         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3808
3809         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3810                         &adev->dmub_outbox_irq);
3811         if (r) {
3812                 DRM_ERROR("Failed to add outbox irq id!\n");
3813                 return r;
3814         }
3815
3816         if (dc->ctx->dmub_srv) {
3817                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3818                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3819                 int_params.irq_source =
3820                 dc_interrupt_to_irq_source(dc, i, 0);
3821
3822                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3823
3824                 c_irq_params->adev = adev;
3825                 c_irq_params->irq_src = int_params.irq_source;
3826
3827                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3828                                 dm_dmub_outbox1_low_irq, c_irq_params);
3829         }
3830
3831         return 0;
3832 }
3833
3834 /*
3835  * Acquires the lock for the atomic state object and returns
3836  * the new atomic state.
3837  *
3838  * This should only be called during atomic check.
3839  */
3840 int dm_atomic_get_state(struct drm_atomic_state *state,
3841                         struct dm_atomic_state **dm_state)
3842 {
3843         struct drm_device *dev = state->dev;
3844         struct amdgpu_device *adev = drm_to_adev(dev);
3845         struct amdgpu_display_manager *dm = &adev->dm;
3846         struct drm_private_state *priv_state;
3847
3848         if (*dm_state)
3849                 return 0;
3850
3851         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3852         if (IS_ERR(priv_state))
3853                 return PTR_ERR(priv_state);
3854
3855         *dm_state = to_dm_atomic_state(priv_state);
3856
3857         return 0;
3858 }
3859
3860 static struct dm_atomic_state *
3861 dm_atomic_get_new_state(struct drm_atomic_state *state)
3862 {
3863         struct drm_device *dev = state->dev;
3864         struct amdgpu_device *adev = drm_to_adev(dev);
3865         struct amdgpu_display_manager *dm = &adev->dm;
3866         struct drm_private_obj *obj;
3867         struct drm_private_state *new_obj_state;
3868         int i;
3869
3870         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3871                 if (obj->funcs == dm->atomic_obj.funcs)
3872                         return to_dm_atomic_state(new_obj_state);
3873         }
3874
3875         return NULL;
3876 }
3877
3878 static struct drm_private_state *
3879 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3880 {
3881         struct dm_atomic_state *old_state, *new_state;
3882
3883         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3884         if (!new_state)
3885                 return NULL;
3886
3887         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3888
3889         old_state = to_dm_atomic_state(obj->state);
3890
3891         if (old_state && old_state->context)
3892                 new_state->context = dc_copy_state(old_state->context);
3893
3894         if (!new_state->context) {
3895                 kfree(new_state);
3896                 return NULL;
3897         }
3898
3899         return &new_state->base;
3900 }
3901
3902 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3903                                     struct drm_private_state *state)
3904 {
3905         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3906
3907         if (dm_state && dm_state->context)
3908                 dc_release_state(dm_state->context);
3909
3910         kfree(dm_state);
3911 }
3912
3913 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3914         .atomic_duplicate_state = dm_atomic_duplicate_state,
3915         .atomic_destroy_state = dm_atomic_destroy_state,
3916 };
3917
3918 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3919 {
3920         struct dm_atomic_state *state;
3921         int r;
3922
3923         adev->mode_info.mode_config_initialized = true;
3924
3925         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3926         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3927
3928         adev_to_drm(adev)->mode_config.max_width = 16384;
3929         adev_to_drm(adev)->mode_config.max_height = 16384;
3930
3931         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3932         if (adev->asic_type == CHIP_HAWAII)
3933                 /* disable prefer shadow for now due to hibernation issues */
3934                 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3935         else
3936                 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3937         /* indicates support for immediate flip */
3938         adev_to_drm(adev)->mode_config.async_page_flip = true;
3939
3940         state = kzalloc(sizeof(*state), GFP_KERNEL);
3941         if (!state)
3942                 return -ENOMEM;
3943
3944         state->context = dc_create_state(adev->dm.dc);
3945         if (!state->context) {
3946                 kfree(state);
3947                 return -ENOMEM;
3948         }
3949
3950         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3951
3952         drm_atomic_private_obj_init(adev_to_drm(adev),
3953                                     &adev->dm.atomic_obj,
3954                                     &state->base,
3955                                     &dm_atomic_state_funcs);
3956
3957         r = amdgpu_display_modeset_create_props(adev);
3958         if (r) {
3959                 dc_release_state(state->context);
3960                 kfree(state);
3961                 return r;
3962         }
3963
3964         r = amdgpu_dm_audio_init(adev);
3965         if (r) {
3966                 dc_release_state(state->context);
3967                 kfree(state);
3968                 return r;
3969         }
3970
3971         return 0;
3972 }
3973
3974 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3975 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3976 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3977
3978 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3979                                             int bl_idx)
3980 {
3981 #if defined(CONFIG_ACPI)
3982         struct amdgpu_dm_backlight_caps caps;
3983
3984         memset(&caps, 0, sizeof(caps));
3985
3986         if (dm->backlight_caps[bl_idx].caps_valid)
3987                 return;
3988
3989         amdgpu_acpi_get_backlight_caps(&caps);
3990         if (caps.caps_valid) {
3991                 dm->backlight_caps[bl_idx].caps_valid = true;
3992                 if (caps.aux_support)
3993                         return;
3994                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3995                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3996         } else {
3997                 dm->backlight_caps[bl_idx].min_input_signal =
3998                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3999                 dm->backlight_caps[bl_idx].max_input_signal =
4000                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4001         }
4002 #else
4003         if (dm->backlight_caps[bl_idx].aux_support)
4004                 return;
4005
4006         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4007         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4008 #endif
4009 }
4010
4011 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
4012                                 unsigned *min, unsigned *max)
4013 {
4014         if (!caps)
4015                 return 0;
4016
4017         if (caps->aux_support) {
4018                 // Firmware limits are in nits, DC API wants millinits.
4019                 *max = 1000 * caps->aux_max_input_signal;
4020                 *min = 1000 * caps->aux_min_input_signal;
4021         } else {
4022                 // Firmware limits are 8-bit, PWM control is 16-bit.
4023                 *max = 0x101 * caps->max_input_signal;
4024                 *min = 0x101 * caps->min_input_signal;
4025         }
4026         return 1;
4027 }
4028
4029 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4030                                         uint32_t brightness)
4031 {
4032         unsigned min, max;
4033
4034         if (!get_brightness_range(caps, &min, &max))
4035                 return brightness;
4036
4037         // Rescale 0..255 to min..max
4038         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4039                                        AMDGPU_MAX_BL_LEVEL);
4040 }
4041
4042 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4043                                       uint32_t brightness)
4044 {
4045         unsigned min, max;
4046
4047         if (!get_brightness_range(caps, &min, &max))
4048                 return brightness;
4049
4050         if (brightness < min)
4051                 return 0;
4052         // Rescale min..max to 0..255
4053         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4054                                  max - min);
4055 }
4056
4057 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4058                                          int bl_idx,
4059                                          u32 user_brightness)
4060 {
4061         struct amdgpu_dm_backlight_caps caps;
4062         struct dc_link *link;
4063         u32 brightness;
4064         bool rc;
4065
4066         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4067         caps = dm->backlight_caps[bl_idx];
4068
4069         dm->brightness[bl_idx] = user_brightness;
4070         /* update scratch register */
4071         if (bl_idx == 0)
4072                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4073         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4074         link = (struct dc_link *)dm->backlight_link[bl_idx];
4075
4076         /* Change brightness based on AUX property */
4077         if (caps.aux_support) {
4078                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4079                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4080                 if (!rc)
4081                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4082         } else {
4083                 rc = dc_link_set_backlight_level(link, brightness, 0);
4084                 if (!rc)
4085                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4086         }
4087
4088         if (rc)
4089                 dm->actual_brightness[bl_idx] = user_brightness;
4090 }
4091
4092 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4093 {
4094         struct amdgpu_display_manager *dm = bl_get_data(bd);
4095         int i;
4096
4097         for (i = 0; i < dm->num_of_edps; i++) {
4098                 if (bd == dm->backlight_dev[i])
4099                         break;
4100         }
4101         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4102                 i = 0;
4103         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4104
4105         return 0;
4106 }
4107
4108 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4109                                          int bl_idx)
4110 {
4111         struct amdgpu_dm_backlight_caps caps;
4112         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4113
4114         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4115         caps = dm->backlight_caps[bl_idx];
4116
4117         if (caps.aux_support) {
4118                 u32 avg, peak;
4119                 bool rc;
4120
4121                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4122                 if (!rc)
4123                         return dm->brightness[bl_idx];
4124                 return convert_brightness_to_user(&caps, avg);
4125         } else {
4126                 int ret = dc_link_get_backlight_level(link);
4127
4128                 if (ret == DC_ERROR_UNEXPECTED)
4129                         return dm->brightness[bl_idx];
4130                 return convert_brightness_to_user(&caps, ret);
4131         }
4132 }
4133
4134 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4135 {
4136         struct amdgpu_display_manager *dm = bl_get_data(bd);
4137         int i;
4138
4139         for (i = 0; i < dm->num_of_edps; i++) {
4140                 if (bd == dm->backlight_dev[i])
4141                         break;
4142         }
4143         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4144                 i = 0;
4145         return amdgpu_dm_backlight_get_level(dm, i);
4146 }
4147
4148 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4149         .options = BL_CORE_SUSPENDRESUME,
4150         .get_brightness = amdgpu_dm_backlight_get_brightness,
4151         .update_status  = amdgpu_dm_backlight_update_status,
4152 };
4153
4154 static void
4155 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4156 {
4157         char bl_name[16];
4158         struct backlight_properties props = { 0 };
4159
4160         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4161         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4162
4163         if (!acpi_video_backlight_use_native()) {
4164                 drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n");
4165                 /* Try registering an ACPI video backlight device instead. */
4166                 acpi_video_register_backlight();
4167                 return;
4168         }
4169
4170         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4171         props.brightness = AMDGPU_MAX_BL_LEVEL;
4172         props.type = BACKLIGHT_RAW;
4173
4174         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4175                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4176
4177         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4178                                                                        adev_to_drm(dm->adev)->dev,
4179                                                                        dm,
4180                                                                        &amdgpu_dm_backlight_ops,
4181                                                                        &props);
4182
4183         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4184                 DRM_ERROR("DM: Backlight registration failed!\n");
4185         else
4186                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4187 }
4188
4189 static int initialize_plane(struct amdgpu_display_manager *dm,
4190                             struct amdgpu_mode_info *mode_info, int plane_id,
4191                             enum drm_plane_type plane_type,
4192                             const struct dc_plane_cap *plane_cap)
4193 {
4194         struct drm_plane *plane;
4195         unsigned long possible_crtcs;
4196         int ret = 0;
4197
4198         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4199         if (!plane) {
4200                 DRM_ERROR("KMS: Failed to allocate plane\n");
4201                 return -ENOMEM;
4202         }
4203         plane->type = plane_type;
4204
4205         /*
4206          * HACK: IGT tests expect that the primary plane for a CRTC
4207          * can only have one possible CRTC. Only expose support for
4208          * any CRTC if they're not going to be used as a primary plane
4209          * for a CRTC - like overlay or underlay planes.
4210          */
4211         possible_crtcs = 1 << plane_id;
4212         if (plane_id >= dm->dc->caps.max_streams)
4213                 possible_crtcs = 0xff;
4214
4215         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4216
4217         if (ret) {
4218                 DRM_ERROR("KMS: Failed to initialize plane\n");
4219                 kfree(plane);
4220                 return ret;
4221         }
4222
4223         if (mode_info)
4224                 mode_info->planes[plane_id] = plane;
4225
4226         return ret;
4227 }
4228
4229
4230 static void register_backlight_device(struct amdgpu_display_manager *dm,
4231                                       struct dc_link *link)
4232 {
4233         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4234             link->type != dc_connection_none) {
4235                 /*
4236                  * Event if registration failed, we should continue with
4237                  * DM initialization because not having a backlight control
4238                  * is better then a black screen.
4239                  */
4240                 if (!dm->backlight_dev[dm->num_of_edps])
4241                         amdgpu_dm_register_backlight_device(dm);
4242
4243                 if (dm->backlight_dev[dm->num_of_edps]) {
4244                         dm->backlight_link[dm->num_of_edps] = link;
4245                         dm->num_of_edps++;
4246                 }
4247         }
4248 }
4249
4250 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4251
4252 /*
4253  * In this architecture, the association
4254  * connector -> encoder -> crtc
4255  * id not really requried. The crtc and connector will hold the
4256  * display_index as an abstraction to use with DAL component
4257  *
4258  * Returns 0 on success
4259  */
4260 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4261 {
4262         struct amdgpu_display_manager *dm = &adev->dm;
4263         s32 i;
4264         struct amdgpu_dm_connector *aconnector = NULL;
4265         struct amdgpu_encoder *aencoder = NULL;
4266         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4267         u32 link_cnt;
4268         s32 primary_planes;
4269         enum dc_connection_type new_connection_type = dc_connection_none;
4270         const struct dc_plane_cap *plane;
4271         bool psr_feature_enabled = false;
4272         int max_overlay = dm->dc->caps.max_slave_planes;
4273
4274         dm->display_indexes_num = dm->dc->caps.max_streams;
4275         /* Update the actual used number of crtc */
4276         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4277
4278         amdgpu_dm_set_irq_funcs(adev);
4279
4280         link_cnt = dm->dc->caps.max_links;
4281         if (amdgpu_dm_mode_config_init(dm->adev)) {
4282                 DRM_ERROR("DM: Failed to initialize mode config\n");
4283                 return -EINVAL;
4284         }
4285
4286         /* There is one primary plane per CRTC */
4287         primary_planes = dm->dc->caps.max_streams;
4288         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4289
4290         /*
4291          * Initialize primary planes, implicit planes for legacy IOCTLS.
4292          * Order is reversed to match iteration order in atomic check.
4293          */
4294         for (i = (primary_planes - 1); i >= 0; i--) {
4295                 plane = &dm->dc->caps.planes[i];
4296
4297                 if (initialize_plane(dm, mode_info, i,
4298                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4299                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4300                         goto fail;
4301                 }
4302         }
4303
4304         /*
4305          * Initialize overlay planes, index starting after primary planes.
4306          * These planes have a higher DRM index than the primary planes since
4307          * they should be considered as having a higher z-order.
4308          * Order is reversed to match iteration order in atomic check.
4309          *
4310          * Only support DCN for now, and only expose one so we don't encourage
4311          * userspace to use up all the pipes.
4312          */
4313         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4314                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4315
4316                 /* Do not create overlay if MPO disabled */
4317                 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4318                         break;
4319
4320                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4321                         continue;
4322
4323                 if (!plane->blends_with_above || !plane->blends_with_below)
4324                         continue;
4325
4326                 if (!plane->pixel_format_support.argb8888)
4327                         continue;
4328
4329                 if (max_overlay-- == 0)
4330                         break;
4331
4332                 if (initialize_plane(dm, NULL, primary_planes + i,
4333                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4334                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4335                         goto fail;
4336                 }
4337         }
4338
4339         for (i = 0; i < dm->dc->caps.max_streams; i++)
4340                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4341                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4342                         goto fail;
4343                 }
4344
4345         /* Use Outbox interrupt */
4346         switch (adev->ip_versions[DCE_HWIP][0]) {
4347         case IP_VERSION(3, 0, 0):
4348         case IP_VERSION(3, 1, 2):
4349         case IP_VERSION(3, 1, 3):
4350         case IP_VERSION(3, 1, 4):
4351         case IP_VERSION(3, 1, 5):
4352         case IP_VERSION(3, 1, 6):
4353         case IP_VERSION(3, 2, 0):
4354         case IP_VERSION(3, 2, 1):
4355         case IP_VERSION(2, 1, 0):
4356                 if (register_outbox_irq_handlers(dm->adev)) {
4357                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4358                         goto fail;
4359                 }
4360                 break;
4361         default:
4362                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4363                               adev->ip_versions[DCE_HWIP][0]);
4364         }
4365
4366         /* Determine whether to enable PSR support by default. */
4367         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4368                 switch (adev->ip_versions[DCE_HWIP][0]) {
4369                 case IP_VERSION(3, 1, 2):
4370                 case IP_VERSION(3, 1, 3):
4371                 case IP_VERSION(3, 1, 4):
4372                 case IP_VERSION(3, 1, 5):
4373                 case IP_VERSION(3, 1, 6):
4374                 case IP_VERSION(3, 2, 0):
4375                 case IP_VERSION(3, 2, 1):
4376                         psr_feature_enabled = true;
4377                         break;
4378                 default:
4379                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4380                         break;
4381                 }
4382         }
4383
4384         /* loops over all connectors on the board */
4385         for (i = 0; i < link_cnt; i++) {
4386                 struct dc_link *link = NULL;
4387
4388                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4389                         DRM_ERROR(
4390                                 "KMS: Cannot support more than %d display indexes\n",
4391                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4392                         continue;
4393                 }
4394
4395                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4396                 if (!aconnector)
4397                         goto fail;
4398
4399                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4400                 if (!aencoder)
4401                         goto fail;
4402
4403                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4404                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4405                         goto fail;
4406                 }
4407
4408                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4409                         DRM_ERROR("KMS: Failed to initialize connector\n");
4410                         goto fail;
4411                 }
4412
4413                 link = dc_get_link_at_index(dm->dc, i);
4414
4415                 if (!dc_link_detect_connection_type(link, &new_connection_type))
4416                         DRM_ERROR("KMS: Failed to detect connector\n");
4417
4418                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4419                         emulated_link_detect(link);
4420                         amdgpu_dm_update_connector_after_detect(aconnector);
4421                 } else {
4422                         bool ret = false;
4423
4424                         mutex_lock(&dm->dc_lock);
4425                         ret = dc_link_detect(link, DETECT_REASON_BOOT);
4426                         mutex_unlock(&dm->dc_lock);
4427
4428                         if (ret) {
4429                                 amdgpu_dm_update_connector_after_detect(aconnector);
4430                                 register_backlight_device(dm, link);
4431
4432                                 if (dm->num_of_edps)
4433                                         update_connector_ext_caps(aconnector);
4434
4435                                 if (psr_feature_enabled)
4436                                         amdgpu_dm_set_psr_caps(link);
4437
4438                                 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4439                                  * PSR is also supported.
4440                                  */
4441                                 if (link->psr_settings.psr_feature_enabled)
4442                                         adev_to_drm(adev)->vblank_disable_immediate = false;
4443                         }
4444                 }
4445                 amdgpu_set_panel_orientation(&aconnector->base);
4446         }
4447
4448         /* If we didn't find a panel, notify the acpi video detection */
4449         if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
4450                 acpi_video_report_nolcd();
4451
4452         /* Software is initialized. Now we can register interrupt handlers. */
4453         switch (adev->asic_type) {
4454 #if defined(CONFIG_DRM_AMD_DC_SI)
4455         case CHIP_TAHITI:
4456         case CHIP_PITCAIRN:
4457         case CHIP_VERDE:
4458         case CHIP_OLAND:
4459                 if (dce60_register_irq_handlers(dm->adev)) {
4460                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4461                         goto fail;
4462                 }
4463                 break;
4464 #endif
4465         case CHIP_BONAIRE:
4466         case CHIP_HAWAII:
4467         case CHIP_KAVERI:
4468         case CHIP_KABINI:
4469         case CHIP_MULLINS:
4470         case CHIP_TONGA:
4471         case CHIP_FIJI:
4472         case CHIP_CARRIZO:
4473         case CHIP_STONEY:
4474         case CHIP_POLARIS11:
4475         case CHIP_POLARIS10:
4476         case CHIP_POLARIS12:
4477         case CHIP_VEGAM:
4478         case CHIP_VEGA10:
4479         case CHIP_VEGA12:
4480         case CHIP_VEGA20:
4481                 if (dce110_register_irq_handlers(dm->adev)) {
4482                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4483                         goto fail;
4484                 }
4485                 break;
4486         default:
4487                 switch (adev->ip_versions[DCE_HWIP][0]) {
4488                 case IP_VERSION(1, 0, 0):
4489                 case IP_VERSION(1, 0, 1):
4490                 case IP_VERSION(2, 0, 2):
4491                 case IP_VERSION(2, 0, 3):
4492                 case IP_VERSION(2, 0, 0):
4493                 case IP_VERSION(2, 1, 0):
4494                 case IP_VERSION(3, 0, 0):
4495                 case IP_VERSION(3, 0, 2):
4496                 case IP_VERSION(3, 0, 3):
4497                 case IP_VERSION(3, 0, 1):
4498                 case IP_VERSION(3, 1, 2):
4499                 case IP_VERSION(3, 1, 3):
4500                 case IP_VERSION(3, 1, 4):
4501                 case IP_VERSION(3, 1, 5):
4502                 case IP_VERSION(3, 1, 6):
4503                 case IP_VERSION(3, 2, 0):
4504                 case IP_VERSION(3, 2, 1):
4505                         if (dcn10_register_irq_handlers(dm->adev)) {
4506                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4507                                 goto fail;
4508                         }
4509                         break;
4510                 default:
4511                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4512                                         adev->ip_versions[DCE_HWIP][0]);
4513                         goto fail;
4514                 }
4515                 break;
4516         }
4517
4518         return 0;
4519 fail:
4520         kfree(aencoder);
4521         kfree(aconnector);
4522
4523         return -EINVAL;
4524 }
4525
4526 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4527 {
4528         drm_atomic_private_obj_fini(&dm->atomic_obj);
4529         return;
4530 }
4531
4532 /******************************************************************************
4533  * amdgpu_display_funcs functions
4534  *****************************************************************************/
4535
4536 /*
4537  * dm_bandwidth_update - program display watermarks
4538  *
4539  * @adev: amdgpu_device pointer
4540  *
4541  * Calculate and program the display watermarks and line buffer allocation.
4542  */
4543 static void dm_bandwidth_update(struct amdgpu_device *adev)
4544 {
4545         /* TODO: implement later */
4546 }
4547
4548 static const struct amdgpu_display_funcs dm_display_funcs = {
4549         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4550         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4551         .backlight_set_level = NULL, /* never called for DC */
4552         .backlight_get_level = NULL, /* never called for DC */
4553         .hpd_sense = NULL,/* called unconditionally */
4554         .hpd_set_polarity = NULL, /* called unconditionally */
4555         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4556         .page_flip_get_scanoutpos =
4557                 dm_crtc_get_scanoutpos,/* called unconditionally */
4558         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4559         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4560 };
4561
4562 #if defined(CONFIG_DEBUG_KERNEL_DC)
4563
4564 static ssize_t s3_debug_store(struct device *device,
4565                               struct device_attribute *attr,
4566                               const char *buf,
4567                               size_t count)
4568 {
4569         int ret;
4570         int s3_state;
4571         struct drm_device *drm_dev = dev_get_drvdata(device);
4572         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4573
4574         ret = kstrtoint(buf, 0, &s3_state);
4575
4576         if (ret == 0) {
4577                 if (s3_state) {
4578                         dm_resume(adev);
4579                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4580                 } else
4581                         dm_suspend(adev);
4582         }
4583
4584         return ret == 0 ? count : 0;
4585 }
4586
4587 DEVICE_ATTR_WO(s3_debug);
4588
4589 #endif
4590
4591 static int dm_init_microcode(struct amdgpu_device *adev)
4592 {
4593         char *fw_name_dmub;
4594         int r;
4595
4596         switch (adev->ip_versions[DCE_HWIP][0]) {
4597         case IP_VERSION(2, 1, 0):
4598                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
4599                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
4600                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
4601                 break;
4602         case IP_VERSION(3, 0, 0):
4603                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
4604                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
4605                 else
4606                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
4607                 break;
4608         case IP_VERSION(3, 0, 1):
4609                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
4610                 break;
4611         case IP_VERSION(3, 0, 2):
4612                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
4613                 break;
4614         case IP_VERSION(3, 0, 3):
4615                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
4616                 break;
4617         case IP_VERSION(3, 1, 2):
4618         case IP_VERSION(3, 1, 3):
4619                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
4620                 break;
4621         case IP_VERSION(3, 1, 4):
4622                 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
4623                 break;
4624         case IP_VERSION(3, 1, 5):
4625                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
4626                 break;
4627         case IP_VERSION(3, 1, 6):
4628                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
4629                 break;
4630         case IP_VERSION(3, 2, 0):
4631                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
4632                 break;
4633         case IP_VERSION(3, 2, 1):
4634                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
4635                 break;
4636         default:
4637                 /* ASIC doesn't support DMUB. */
4638                 return 0;
4639         }
4640         r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
4641         if (r)
4642                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
4643         return r;
4644 }
4645
4646 static int dm_early_init(void *handle)
4647 {
4648         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4649         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4650         struct atom_context *ctx = mode_info->atom_context;
4651         int index = GetIndexIntoMasterTable(DATA, Object_Header);
4652         u16 data_offset;
4653
4654         /* if there is no object header, skip DM */
4655         if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
4656                 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
4657                 dev_info(adev->dev, "No object header, skipping DM\n");
4658                 return -ENOENT;
4659         }
4660
4661         switch (adev->asic_type) {
4662 #if defined(CONFIG_DRM_AMD_DC_SI)
4663         case CHIP_TAHITI:
4664         case CHIP_PITCAIRN:
4665         case CHIP_VERDE:
4666                 adev->mode_info.num_crtc = 6;
4667                 adev->mode_info.num_hpd = 6;
4668                 adev->mode_info.num_dig = 6;
4669                 break;
4670         case CHIP_OLAND:
4671                 adev->mode_info.num_crtc = 2;
4672                 adev->mode_info.num_hpd = 2;
4673                 adev->mode_info.num_dig = 2;
4674                 break;
4675 #endif
4676         case CHIP_BONAIRE:
4677         case CHIP_HAWAII:
4678                 adev->mode_info.num_crtc = 6;
4679                 adev->mode_info.num_hpd = 6;
4680                 adev->mode_info.num_dig = 6;
4681                 break;
4682         case CHIP_KAVERI:
4683                 adev->mode_info.num_crtc = 4;
4684                 adev->mode_info.num_hpd = 6;
4685                 adev->mode_info.num_dig = 7;
4686                 break;
4687         case CHIP_KABINI:
4688         case CHIP_MULLINS:
4689                 adev->mode_info.num_crtc = 2;
4690                 adev->mode_info.num_hpd = 6;
4691                 adev->mode_info.num_dig = 6;
4692                 break;
4693         case CHIP_FIJI:
4694         case CHIP_TONGA:
4695                 adev->mode_info.num_crtc = 6;
4696                 adev->mode_info.num_hpd = 6;
4697                 adev->mode_info.num_dig = 7;
4698                 break;
4699         case CHIP_CARRIZO:
4700                 adev->mode_info.num_crtc = 3;
4701                 adev->mode_info.num_hpd = 6;
4702                 adev->mode_info.num_dig = 9;
4703                 break;
4704         case CHIP_STONEY:
4705                 adev->mode_info.num_crtc = 2;
4706                 adev->mode_info.num_hpd = 6;
4707                 adev->mode_info.num_dig = 9;
4708                 break;
4709         case CHIP_POLARIS11:
4710         case CHIP_POLARIS12:
4711                 adev->mode_info.num_crtc = 5;
4712                 adev->mode_info.num_hpd = 5;
4713                 adev->mode_info.num_dig = 5;
4714                 break;
4715         case CHIP_POLARIS10:
4716         case CHIP_VEGAM:
4717                 adev->mode_info.num_crtc = 6;
4718                 adev->mode_info.num_hpd = 6;
4719                 adev->mode_info.num_dig = 6;
4720                 break;
4721         case CHIP_VEGA10:
4722         case CHIP_VEGA12:
4723         case CHIP_VEGA20:
4724                 adev->mode_info.num_crtc = 6;
4725                 adev->mode_info.num_hpd = 6;
4726                 adev->mode_info.num_dig = 6;
4727                 break;
4728         default:
4729
4730                 switch (adev->ip_versions[DCE_HWIP][0]) {
4731                 case IP_VERSION(2, 0, 2):
4732                 case IP_VERSION(3, 0, 0):
4733                         adev->mode_info.num_crtc = 6;
4734                         adev->mode_info.num_hpd = 6;
4735                         adev->mode_info.num_dig = 6;
4736                         break;
4737                 case IP_VERSION(2, 0, 0):
4738                 case IP_VERSION(3, 0, 2):
4739                         adev->mode_info.num_crtc = 5;
4740                         adev->mode_info.num_hpd = 5;
4741                         adev->mode_info.num_dig = 5;
4742                         break;
4743                 case IP_VERSION(2, 0, 3):
4744                 case IP_VERSION(3, 0, 3):
4745                         adev->mode_info.num_crtc = 2;
4746                         adev->mode_info.num_hpd = 2;
4747                         adev->mode_info.num_dig = 2;
4748                         break;
4749                 case IP_VERSION(1, 0, 0):
4750                 case IP_VERSION(1, 0, 1):
4751                 case IP_VERSION(3, 0, 1):
4752                 case IP_VERSION(2, 1, 0):
4753                 case IP_VERSION(3, 1, 2):
4754                 case IP_VERSION(3, 1, 3):
4755                 case IP_VERSION(3, 1, 4):
4756                 case IP_VERSION(3, 1, 5):
4757                 case IP_VERSION(3, 1, 6):
4758                 case IP_VERSION(3, 2, 0):
4759                 case IP_VERSION(3, 2, 1):
4760                         adev->mode_info.num_crtc = 4;
4761                         adev->mode_info.num_hpd = 4;
4762                         adev->mode_info.num_dig = 4;
4763                         break;
4764                 default:
4765                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4766                                         adev->ip_versions[DCE_HWIP][0]);
4767                         return -EINVAL;
4768                 }
4769                 break;
4770         }
4771
4772         if (adev->mode_info.funcs == NULL)
4773                 adev->mode_info.funcs = &dm_display_funcs;
4774
4775         /*
4776          * Note: Do NOT change adev->audio_endpt_rreg and
4777          * adev->audio_endpt_wreg because they are initialised in
4778          * amdgpu_device_init()
4779          */
4780 #if defined(CONFIG_DEBUG_KERNEL_DC)
4781         device_create_file(
4782                 adev_to_drm(adev)->dev,
4783                 &dev_attr_s3_debug);
4784 #endif
4785         adev->dc_enabled = true;
4786
4787         return dm_init_microcode(adev);
4788 }
4789
4790 static bool modereset_required(struct drm_crtc_state *crtc_state)
4791 {
4792         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4793 }
4794
4795 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4796 {
4797         drm_encoder_cleanup(encoder);
4798         kfree(encoder);
4799 }
4800
4801 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4802         .destroy = amdgpu_dm_encoder_destroy,
4803 };
4804
4805 static int
4806 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4807                             const enum surface_pixel_format format,
4808                             enum dc_color_space *color_space)
4809 {
4810         bool full_range;
4811
4812         *color_space = COLOR_SPACE_SRGB;
4813
4814         /* DRM color properties only affect non-RGB formats. */
4815         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4816                 return 0;
4817
4818         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4819
4820         switch (plane_state->color_encoding) {
4821         case DRM_COLOR_YCBCR_BT601:
4822                 if (full_range)
4823                         *color_space = COLOR_SPACE_YCBCR601;
4824                 else
4825                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4826                 break;
4827
4828         case DRM_COLOR_YCBCR_BT709:
4829                 if (full_range)
4830                         *color_space = COLOR_SPACE_YCBCR709;
4831                 else
4832                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4833                 break;
4834
4835         case DRM_COLOR_YCBCR_BT2020:
4836                 if (full_range)
4837                         *color_space = COLOR_SPACE_2020_YCBCR;
4838                 else
4839                         return -EINVAL;
4840                 break;
4841
4842         default:
4843                 return -EINVAL;
4844         }
4845
4846         return 0;
4847 }
4848
4849 static int
4850 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4851                             const struct drm_plane_state *plane_state,
4852                             const u64 tiling_flags,
4853                             struct dc_plane_info *plane_info,
4854                             struct dc_plane_address *address,
4855                             bool tmz_surface,
4856                             bool force_disable_dcc)
4857 {
4858         const struct drm_framebuffer *fb = plane_state->fb;
4859         const struct amdgpu_framebuffer *afb =
4860                 to_amdgpu_framebuffer(plane_state->fb);
4861         int ret;
4862
4863         memset(plane_info, 0, sizeof(*plane_info));
4864
4865         switch (fb->format->format) {
4866         case DRM_FORMAT_C8:
4867                 plane_info->format =
4868                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4869                 break;
4870         case DRM_FORMAT_RGB565:
4871                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4872                 break;
4873         case DRM_FORMAT_XRGB8888:
4874         case DRM_FORMAT_ARGB8888:
4875                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4876                 break;
4877         case DRM_FORMAT_XRGB2101010:
4878         case DRM_FORMAT_ARGB2101010:
4879                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4880                 break;
4881         case DRM_FORMAT_XBGR2101010:
4882         case DRM_FORMAT_ABGR2101010:
4883                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4884                 break;
4885         case DRM_FORMAT_XBGR8888:
4886         case DRM_FORMAT_ABGR8888:
4887                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4888                 break;
4889         case DRM_FORMAT_NV21:
4890                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4891                 break;
4892         case DRM_FORMAT_NV12:
4893                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4894                 break;
4895         case DRM_FORMAT_P010:
4896                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4897                 break;
4898         case DRM_FORMAT_XRGB16161616F:
4899         case DRM_FORMAT_ARGB16161616F:
4900                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4901                 break;
4902         case DRM_FORMAT_XBGR16161616F:
4903         case DRM_FORMAT_ABGR16161616F:
4904                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4905                 break;
4906         case DRM_FORMAT_XRGB16161616:
4907         case DRM_FORMAT_ARGB16161616:
4908                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4909                 break;
4910         case DRM_FORMAT_XBGR16161616:
4911         case DRM_FORMAT_ABGR16161616:
4912                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4913                 break;
4914         default:
4915                 DRM_ERROR(
4916                         "Unsupported screen format %p4cc\n",
4917                         &fb->format->format);
4918                 return -EINVAL;
4919         }
4920
4921         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4922         case DRM_MODE_ROTATE_0:
4923                 plane_info->rotation = ROTATION_ANGLE_0;
4924                 break;
4925         case DRM_MODE_ROTATE_90:
4926                 plane_info->rotation = ROTATION_ANGLE_90;
4927                 break;
4928         case DRM_MODE_ROTATE_180:
4929                 plane_info->rotation = ROTATION_ANGLE_180;
4930                 break;
4931         case DRM_MODE_ROTATE_270:
4932                 plane_info->rotation = ROTATION_ANGLE_270;
4933                 break;
4934         default:
4935                 plane_info->rotation = ROTATION_ANGLE_0;
4936                 break;
4937         }
4938
4939
4940         plane_info->visible = true;
4941         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4942
4943         plane_info->layer_index = plane_state->normalized_zpos;
4944
4945         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4946                                           &plane_info->color_space);
4947         if (ret)
4948                 return ret;
4949
4950         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4951                                            plane_info->rotation, tiling_flags,
4952                                            &plane_info->tiling_info,
4953                                            &plane_info->plane_size,
4954                                            &plane_info->dcc, address,
4955                                            tmz_surface, force_disable_dcc);
4956         if (ret)
4957                 return ret;
4958
4959         fill_blending_from_plane_state(
4960                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4961                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4962
4963         return 0;
4964 }
4965
4966 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4967                                     struct dc_plane_state *dc_plane_state,
4968                                     struct drm_plane_state *plane_state,
4969                                     struct drm_crtc_state *crtc_state)
4970 {
4971         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4972         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4973         struct dc_scaling_info scaling_info;
4974         struct dc_plane_info plane_info;
4975         int ret;
4976         bool force_disable_dcc = false;
4977
4978         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4979         if (ret)
4980                 return ret;
4981
4982         dc_plane_state->src_rect = scaling_info.src_rect;
4983         dc_plane_state->dst_rect = scaling_info.dst_rect;
4984         dc_plane_state->clip_rect = scaling_info.clip_rect;
4985         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4986
4987         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4988         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4989                                           afb->tiling_flags,
4990                                           &plane_info,
4991                                           &dc_plane_state->address,
4992                                           afb->tmz_surface,
4993                                           force_disable_dcc);
4994         if (ret)
4995                 return ret;
4996
4997         dc_plane_state->format = plane_info.format;
4998         dc_plane_state->color_space = plane_info.color_space;
4999         dc_plane_state->format = plane_info.format;
5000         dc_plane_state->plane_size = plane_info.plane_size;
5001         dc_plane_state->rotation = plane_info.rotation;
5002         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5003         dc_plane_state->stereo_format = plane_info.stereo_format;
5004         dc_plane_state->tiling_info = plane_info.tiling_info;
5005         dc_plane_state->visible = plane_info.visible;
5006         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5007         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5008         dc_plane_state->global_alpha = plane_info.global_alpha;
5009         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5010         dc_plane_state->dcc = plane_info.dcc;
5011         dc_plane_state->layer_index = plane_info.layer_index;
5012         dc_plane_state->flip_int_enabled = true;
5013
5014         /*
5015          * Always set input transfer function, since plane state is refreshed
5016          * every time.
5017          */
5018         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5019         if (ret)
5020                 return ret;
5021
5022         return 0;
5023 }
5024
5025 static inline void fill_dc_dirty_rect(struct drm_plane *plane,
5026                                       struct rect *dirty_rect, int32_t x,
5027                                       s32 y, s32 width, s32 height,
5028                                       int *i, bool ffu)
5029 {
5030         if (*i > DC_MAX_DIRTY_RECTS)
5031                 return;
5032
5033         if (*i == DC_MAX_DIRTY_RECTS)
5034                 goto out;
5035
5036         dirty_rect->x = x;
5037         dirty_rect->y = y;
5038         dirty_rect->width = width;
5039         dirty_rect->height = height;
5040
5041         if (ffu)
5042                 drm_dbg(plane->dev,
5043                         "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5044                         plane->base.id, width, height);
5045         else
5046                 drm_dbg(plane->dev,
5047                         "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
5048                         plane->base.id, x, y, width, height);
5049
5050 out:
5051         (*i)++;
5052 }
5053
5054 /**
5055  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5056  *
5057  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5058  *         remote fb
5059  * @old_plane_state: Old state of @plane
5060  * @new_plane_state: New state of @plane
5061  * @crtc_state: New state of CRTC connected to the @plane
5062  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5063  * @dirty_regions_changed: dirty regions changed
5064  *
5065  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5066  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5067  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5068  * amdgpu_dm's.
5069  *
5070  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5071  * plane with regions that require flushing to the eDP remote buffer. In
5072  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5073  * implicitly provide damage clips without any client support via the plane
5074  * bounds.
5075  */
5076 static void fill_dc_dirty_rects(struct drm_plane *plane,
5077                                 struct drm_plane_state *old_plane_state,
5078                                 struct drm_plane_state *new_plane_state,
5079                                 struct drm_crtc_state *crtc_state,
5080                                 struct dc_flip_addrs *flip_addrs,
5081                                 bool *dirty_regions_changed)
5082 {
5083         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5084         struct rect *dirty_rects = flip_addrs->dirty_rects;
5085         u32 num_clips;
5086         struct drm_mode_rect *clips;
5087         bool bb_changed;
5088         bool fb_changed;
5089         u32 i = 0;
5090         *dirty_regions_changed = false;
5091
5092         /*
5093          * Cursor plane has it's own dirty rect update interface. See
5094          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5095          */
5096         if (plane->type == DRM_PLANE_TYPE_CURSOR)
5097                 return;
5098
5099         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5100         clips = drm_plane_get_damage_clips(new_plane_state);
5101
5102         if (!dm_crtc_state->mpo_requested) {
5103                 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
5104                         goto ffu;
5105
5106                 for (; flip_addrs->dirty_rect_count < num_clips; clips++)
5107                         fill_dc_dirty_rect(new_plane_state->plane,
5108                                            &dirty_rects[i], clips->x1,
5109                                            clips->y1, clips->x2 - clips->x1,
5110                                            clips->y2 - clips->y1,
5111                                            &flip_addrs->dirty_rect_count,
5112                                            false);
5113                 return;
5114         }
5115
5116         /*
5117          * MPO is requested. Add entire plane bounding box to dirty rects if
5118          * flipped to or damaged.
5119          *
5120          * If plane is moved or resized, also add old bounding box to dirty
5121          * rects.
5122          */
5123         fb_changed = old_plane_state->fb->base.id !=
5124                      new_plane_state->fb->base.id;
5125         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5126                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
5127                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
5128                       old_plane_state->crtc_h != new_plane_state->crtc_h);
5129
5130         drm_dbg(plane->dev,
5131                 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5132                 new_plane_state->plane->base.id,
5133                 bb_changed, fb_changed, num_clips);
5134
5135         *dirty_regions_changed = bb_changed;
5136
5137         if (bb_changed) {
5138                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5139                                    new_plane_state->crtc_x,
5140                                    new_plane_state->crtc_y,
5141                                    new_plane_state->crtc_w,
5142                                    new_plane_state->crtc_h, &i, false);
5143
5144                 /* Add old plane bounding-box if plane is moved or resized */
5145                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5146                                    old_plane_state->crtc_x,
5147                                    old_plane_state->crtc_y,
5148                                    old_plane_state->crtc_w,
5149                                    old_plane_state->crtc_h, &i, false);
5150         }
5151
5152         if (num_clips) {
5153                 for (; i < num_clips; clips++)
5154                         fill_dc_dirty_rect(new_plane_state->plane,
5155                                            &dirty_rects[i], clips->x1,
5156                                            clips->y1, clips->x2 - clips->x1,
5157                                            clips->y2 - clips->y1, &i, false);
5158         } else if (fb_changed && !bb_changed) {
5159                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5160                                    new_plane_state->crtc_x,
5161                                    new_plane_state->crtc_y,
5162                                    new_plane_state->crtc_w,
5163                                    new_plane_state->crtc_h, &i, false);
5164         }
5165
5166         if (i > DC_MAX_DIRTY_RECTS)
5167                 goto ffu;
5168
5169         flip_addrs->dirty_rect_count = i;
5170         return;
5171
5172 ffu:
5173         fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
5174                            dm_crtc_state->base.mode.crtc_hdisplay,
5175                            dm_crtc_state->base.mode.crtc_vdisplay,
5176                            &flip_addrs->dirty_rect_count, true);
5177 }
5178
5179 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5180                                            const struct dm_connector_state *dm_state,
5181                                            struct dc_stream_state *stream)
5182 {
5183         enum amdgpu_rmx_type rmx_type;
5184
5185         struct rect src = { 0 }; /* viewport in composition space*/
5186         struct rect dst = { 0 }; /* stream addressable area */
5187
5188         /* no mode. nothing to be done */
5189         if (!mode)
5190                 return;
5191
5192         /* Full screen scaling by default */
5193         src.width = mode->hdisplay;
5194         src.height = mode->vdisplay;
5195         dst.width = stream->timing.h_addressable;
5196         dst.height = stream->timing.v_addressable;
5197
5198         if (dm_state) {
5199                 rmx_type = dm_state->scaling;
5200                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5201                         if (src.width * dst.height <
5202                                         src.height * dst.width) {
5203                                 /* height needs less upscaling/more downscaling */
5204                                 dst.width = src.width *
5205                                                 dst.height / src.height;
5206                         } else {
5207                                 /* width needs less upscaling/more downscaling */
5208                                 dst.height = src.height *
5209                                                 dst.width / src.width;
5210                         }
5211                 } else if (rmx_type == RMX_CENTER) {
5212                         dst = src;
5213                 }
5214
5215                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5216                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5217
5218                 if (dm_state->underscan_enable) {
5219                         dst.x += dm_state->underscan_hborder / 2;
5220                         dst.y += dm_state->underscan_vborder / 2;
5221                         dst.width -= dm_state->underscan_hborder;
5222                         dst.height -= dm_state->underscan_vborder;
5223                 }
5224         }
5225
5226         stream->src = src;
5227         stream->dst = dst;
5228
5229         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5230                       dst.x, dst.y, dst.width, dst.height);
5231
5232 }
5233
5234 static enum dc_color_depth
5235 convert_color_depth_from_display_info(const struct drm_connector *connector,
5236                                       bool is_y420, int requested_bpc)
5237 {
5238         u8 bpc;
5239
5240         if (is_y420) {
5241                 bpc = 8;
5242
5243                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5244                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5245                         bpc = 16;
5246                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5247                         bpc = 12;
5248                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5249                         bpc = 10;
5250         } else {
5251                 bpc = (uint8_t)connector->display_info.bpc;
5252                 /* Assume 8 bpc by default if no bpc is specified. */
5253                 bpc = bpc ? bpc : 8;
5254         }
5255
5256         if (requested_bpc > 0) {
5257                 /*
5258                  * Cap display bpc based on the user requested value.
5259                  *
5260                  * The value for state->max_bpc may not correctly updated
5261                  * depending on when the connector gets added to the state
5262                  * or if this was called outside of atomic check, so it
5263                  * can't be used directly.
5264                  */
5265                 bpc = min_t(u8, bpc, requested_bpc);
5266
5267                 /* Round down to the nearest even number. */
5268                 bpc = bpc - (bpc & 1);
5269         }
5270
5271         switch (bpc) {
5272         case 0:
5273                 /*
5274                  * Temporary Work around, DRM doesn't parse color depth for
5275                  * EDID revision before 1.4
5276                  * TODO: Fix edid parsing
5277                  */
5278                 return COLOR_DEPTH_888;
5279         case 6:
5280                 return COLOR_DEPTH_666;
5281         case 8:
5282                 return COLOR_DEPTH_888;
5283         case 10:
5284                 return COLOR_DEPTH_101010;
5285         case 12:
5286                 return COLOR_DEPTH_121212;
5287         case 14:
5288                 return COLOR_DEPTH_141414;
5289         case 16:
5290                 return COLOR_DEPTH_161616;
5291         default:
5292                 return COLOR_DEPTH_UNDEFINED;
5293         }
5294 }
5295
5296 static enum dc_aspect_ratio
5297 get_aspect_ratio(const struct drm_display_mode *mode_in)
5298 {
5299         /* 1-1 mapping, since both enums follow the HDMI spec. */
5300         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5301 }
5302
5303 static enum dc_color_space
5304 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5305 {
5306         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5307
5308         switch (dc_crtc_timing->pixel_encoding) {
5309         case PIXEL_ENCODING_YCBCR422:
5310         case PIXEL_ENCODING_YCBCR444:
5311         case PIXEL_ENCODING_YCBCR420:
5312         {
5313                 /*
5314                  * 27030khz is the separation point between HDTV and SDTV
5315                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5316                  * respectively
5317                  */
5318                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5319                         if (dc_crtc_timing->flags.Y_ONLY)
5320                                 color_space =
5321                                         COLOR_SPACE_YCBCR709_LIMITED;
5322                         else
5323                                 color_space = COLOR_SPACE_YCBCR709;
5324                 } else {
5325                         if (dc_crtc_timing->flags.Y_ONLY)
5326                                 color_space =
5327                                         COLOR_SPACE_YCBCR601_LIMITED;
5328                         else
5329                                 color_space = COLOR_SPACE_YCBCR601;
5330                 }
5331
5332         }
5333         break;
5334         case PIXEL_ENCODING_RGB:
5335                 color_space = COLOR_SPACE_SRGB;
5336                 break;
5337
5338         default:
5339                 WARN_ON(1);
5340                 break;
5341         }
5342
5343         return color_space;
5344 }
5345
5346 static bool adjust_colour_depth_from_display_info(
5347         struct dc_crtc_timing *timing_out,
5348         const struct drm_display_info *info)
5349 {
5350         enum dc_color_depth depth = timing_out->display_color_depth;
5351         int normalized_clk;
5352         do {
5353                 normalized_clk = timing_out->pix_clk_100hz / 10;
5354                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5355                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5356                         normalized_clk /= 2;
5357                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5358                 switch (depth) {
5359                 case COLOR_DEPTH_888:
5360                         break;
5361                 case COLOR_DEPTH_101010:
5362                         normalized_clk = (normalized_clk * 30) / 24;
5363                         break;
5364                 case COLOR_DEPTH_121212:
5365                         normalized_clk = (normalized_clk * 36) / 24;
5366                         break;
5367                 case COLOR_DEPTH_161616:
5368                         normalized_clk = (normalized_clk * 48) / 24;
5369                         break;
5370                 default:
5371                         /* The above depths are the only ones valid for HDMI. */
5372                         return false;
5373                 }
5374                 if (normalized_clk <= info->max_tmds_clock) {
5375                         timing_out->display_color_depth = depth;
5376                         return true;
5377                 }
5378         } while (--depth > COLOR_DEPTH_666);
5379         return false;
5380 }
5381
5382 static void fill_stream_properties_from_drm_display_mode(
5383         struct dc_stream_state *stream,
5384         const struct drm_display_mode *mode_in,
5385         const struct drm_connector *connector,
5386         const struct drm_connector_state *connector_state,
5387         const struct dc_stream_state *old_stream,
5388         int requested_bpc)
5389 {
5390         struct dc_crtc_timing *timing_out = &stream->timing;
5391         const struct drm_display_info *info = &connector->display_info;
5392         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5393         struct hdmi_vendor_infoframe hv_frame;
5394         struct hdmi_avi_infoframe avi_frame;
5395
5396         memset(&hv_frame, 0, sizeof(hv_frame));
5397         memset(&avi_frame, 0, sizeof(avi_frame));
5398
5399         timing_out->h_border_left = 0;
5400         timing_out->h_border_right = 0;
5401         timing_out->v_border_top = 0;
5402         timing_out->v_border_bottom = 0;
5403         /* TODO: un-hardcode */
5404         if (drm_mode_is_420_only(info, mode_in)
5405                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5406                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5407         else if (drm_mode_is_420_also(info, mode_in)
5408                         && aconnector->force_yuv420_output)
5409                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5410         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5411                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5412                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5413         else
5414                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5415
5416         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5417         timing_out->display_color_depth = convert_color_depth_from_display_info(
5418                 connector,
5419                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5420                 requested_bpc);
5421         timing_out->scan_type = SCANNING_TYPE_NODATA;
5422         timing_out->hdmi_vic = 0;
5423
5424         if (old_stream) {
5425                 timing_out->vic = old_stream->timing.vic;
5426                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5427                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5428         } else {
5429                 timing_out->vic = drm_match_cea_mode(mode_in);
5430                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5431                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5432                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5433                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5434         }
5435
5436         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5437                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5438                 timing_out->vic = avi_frame.video_code;
5439                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5440                 timing_out->hdmi_vic = hv_frame.vic;
5441         }
5442
5443         if (is_freesync_video_mode(mode_in, aconnector)) {
5444                 timing_out->h_addressable = mode_in->hdisplay;
5445                 timing_out->h_total = mode_in->htotal;
5446                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5447                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5448                 timing_out->v_total = mode_in->vtotal;
5449                 timing_out->v_addressable = mode_in->vdisplay;
5450                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5451                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5452                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5453         } else {
5454                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5455                 timing_out->h_total = mode_in->crtc_htotal;
5456                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5457                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5458                 timing_out->v_total = mode_in->crtc_vtotal;
5459                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5460                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5461                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5462                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5463         }
5464
5465         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5466
5467         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5468         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5469         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5470                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5471                     drm_mode_is_420_also(info, mode_in) &&
5472                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5473                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5474                         adjust_colour_depth_from_display_info(timing_out, info);
5475                 }
5476         }
5477
5478         stream->output_color_space = get_output_color_space(timing_out);
5479 }
5480
5481 static void fill_audio_info(struct audio_info *audio_info,
5482                             const struct drm_connector *drm_connector,
5483                             const struct dc_sink *dc_sink)
5484 {
5485         int i = 0;
5486         int cea_revision = 0;
5487         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5488
5489         audio_info->manufacture_id = edid_caps->manufacturer_id;
5490         audio_info->product_id = edid_caps->product_id;
5491
5492         cea_revision = drm_connector->display_info.cea_rev;
5493
5494         strscpy(audio_info->display_name,
5495                 edid_caps->display_name,
5496                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5497
5498         if (cea_revision >= 3) {
5499                 audio_info->mode_count = edid_caps->audio_mode_count;
5500
5501                 for (i = 0; i < audio_info->mode_count; ++i) {
5502                         audio_info->modes[i].format_code =
5503                                         (enum audio_format_code)
5504                                         (edid_caps->audio_modes[i].format_code);
5505                         audio_info->modes[i].channel_count =
5506                                         edid_caps->audio_modes[i].channel_count;
5507                         audio_info->modes[i].sample_rates.all =
5508                                         edid_caps->audio_modes[i].sample_rate;
5509                         audio_info->modes[i].sample_size =
5510                                         edid_caps->audio_modes[i].sample_size;
5511                 }
5512         }
5513
5514         audio_info->flags.all = edid_caps->speaker_flags;
5515
5516         /* TODO: We only check for the progressive mode, check for interlace mode too */
5517         if (drm_connector->latency_present[0]) {
5518                 audio_info->video_latency = drm_connector->video_latency[0];
5519                 audio_info->audio_latency = drm_connector->audio_latency[0];
5520         }
5521
5522         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5523
5524 }
5525
5526 static void
5527 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5528                                       struct drm_display_mode *dst_mode)
5529 {
5530         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5531         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5532         dst_mode->crtc_clock = src_mode->crtc_clock;
5533         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5534         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5535         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5536         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5537         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5538         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5539         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5540         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5541         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5542         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5543         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5544 }
5545
5546 static void
5547 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5548                                         const struct drm_display_mode *native_mode,
5549                                         bool scale_enabled)
5550 {
5551         if (scale_enabled) {
5552                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5553         } else if (native_mode->clock == drm_mode->clock &&
5554                         native_mode->htotal == drm_mode->htotal &&
5555                         native_mode->vtotal == drm_mode->vtotal) {
5556                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5557         } else {
5558                 /* no scaling nor amdgpu inserted, no need to patch */
5559         }
5560 }
5561
5562 static struct dc_sink *
5563 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5564 {
5565         struct dc_sink_init_data sink_init_data = { 0 };
5566         struct dc_sink *sink = NULL;
5567         sink_init_data.link = aconnector->dc_link;
5568         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5569
5570         sink = dc_sink_create(&sink_init_data);
5571         if (!sink) {
5572                 DRM_ERROR("Failed to create sink!\n");
5573                 return NULL;
5574         }
5575         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5576
5577         return sink;
5578 }
5579
5580 static void set_multisync_trigger_params(
5581                 struct dc_stream_state *stream)
5582 {
5583         struct dc_stream_state *master = NULL;
5584
5585         if (stream->triggered_crtc_reset.enabled) {
5586                 master = stream->triggered_crtc_reset.event_source;
5587                 stream->triggered_crtc_reset.event =
5588                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5589                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5590                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5591         }
5592 }
5593
5594 static void set_master_stream(struct dc_stream_state *stream_set[],
5595                               int stream_count)
5596 {
5597         int j, highest_rfr = 0, master_stream = 0;
5598
5599         for (j = 0;  j < stream_count; j++) {
5600                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5601                         int refresh_rate = 0;
5602
5603                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5604                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5605                         if (refresh_rate > highest_rfr) {
5606                                 highest_rfr = refresh_rate;
5607                                 master_stream = j;
5608                         }
5609                 }
5610         }
5611         for (j = 0;  j < stream_count; j++) {
5612                 if (stream_set[j])
5613                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5614         }
5615 }
5616
5617 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5618 {
5619         int i = 0;
5620         struct dc_stream_state *stream;
5621
5622         if (context->stream_count < 2)
5623                 return;
5624         for (i = 0; i < context->stream_count ; i++) {
5625                 if (!context->streams[i])
5626                         continue;
5627                 /*
5628                  * TODO: add a function to read AMD VSDB bits and set
5629                  * crtc_sync_master.multi_sync_enabled flag
5630                  * For now it's set to false
5631                  */
5632         }
5633
5634         set_master_stream(context->streams, context->stream_count);
5635
5636         for (i = 0; i < context->stream_count ; i++) {
5637                 stream = context->streams[i];
5638
5639                 if (!stream)
5640                         continue;
5641
5642                 set_multisync_trigger_params(stream);
5643         }
5644 }
5645
5646 /**
5647  * DOC: FreeSync Video
5648  *
5649  * When a userspace application wants to play a video, the content follows a
5650  * standard format definition that usually specifies the FPS for that format.
5651  * The below list illustrates some video format and the expected FPS,
5652  * respectively:
5653  *
5654  * - TV/NTSC (23.976 FPS)
5655  * - Cinema (24 FPS)
5656  * - TV/PAL (25 FPS)
5657  * - TV/NTSC (29.97 FPS)
5658  * - TV/NTSC (30 FPS)
5659  * - Cinema HFR (48 FPS)
5660  * - TV/PAL (50 FPS)
5661  * - Commonly used (60 FPS)
5662  * - Multiples of 24 (48,72,96 FPS)
5663  *
5664  * The list of standards video format is not huge and can be added to the
5665  * connector modeset list beforehand. With that, userspace can leverage
5666  * FreeSync to extends the front porch in order to attain the target refresh
5667  * rate. Such a switch will happen seamlessly, without screen blanking or
5668  * reprogramming of the output in any other way. If the userspace requests a
5669  * modesetting change compatible with FreeSync modes that only differ in the
5670  * refresh rate, DC will skip the full update and avoid blink during the
5671  * transition. For example, the video player can change the modesetting from
5672  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5673  * causing any display blink. This same concept can be applied to a mode
5674  * setting change.
5675  */
5676 static struct drm_display_mode *
5677 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5678                 bool use_probed_modes)
5679 {
5680         struct drm_display_mode *m, *m_pref = NULL;
5681         u16 current_refresh, highest_refresh;
5682         struct list_head *list_head = use_probed_modes ?
5683                 &aconnector->base.probed_modes :
5684                 &aconnector->base.modes;
5685
5686         if (aconnector->freesync_vid_base.clock != 0)
5687                 return &aconnector->freesync_vid_base;
5688
5689         /* Find the preferred mode */
5690         list_for_each_entry (m, list_head, head) {
5691                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5692                         m_pref = m;
5693                         break;
5694                 }
5695         }
5696
5697         if (!m_pref) {
5698                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5699                 m_pref = list_first_entry_or_null(
5700                                 &aconnector->base.modes, struct drm_display_mode, head);
5701                 if (!m_pref) {
5702                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5703                         return NULL;
5704                 }
5705         }
5706
5707         highest_refresh = drm_mode_vrefresh(m_pref);
5708
5709         /*
5710          * Find the mode with highest refresh rate with same resolution.
5711          * For some monitors, preferred mode is not the mode with highest
5712          * supported refresh rate.
5713          */
5714         list_for_each_entry (m, list_head, head) {
5715                 current_refresh  = drm_mode_vrefresh(m);
5716
5717                 if (m->hdisplay == m_pref->hdisplay &&
5718                     m->vdisplay == m_pref->vdisplay &&
5719                     highest_refresh < current_refresh) {
5720                         highest_refresh = current_refresh;
5721                         m_pref = m;
5722                 }
5723         }
5724
5725         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5726         return m_pref;
5727 }
5728
5729 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5730                 struct amdgpu_dm_connector *aconnector)
5731 {
5732         struct drm_display_mode *high_mode;
5733         int timing_diff;
5734
5735         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5736         if (!high_mode || !mode)
5737                 return false;
5738
5739         timing_diff = high_mode->vtotal - mode->vtotal;
5740
5741         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5742             high_mode->hdisplay != mode->hdisplay ||
5743             high_mode->vdisplay != mode->vdisplay ||
5744             high_mode->hsync_start != mode->hsync_start ||
5745             high_mode->hsync_end != mode->hsync_end ||
5746             high_mode->htotal != mode->htotal ||
5747             high_mode->hskew != mode->hskew ||
5748             high_mode->vscan != mode->vscan ||
5749             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5750             high_mode->vsync_end - mode->vsync_end != timing_diff)
5751                 return false;
5752         else
5753                 return true;
5754 }
5755
5756 #if defined(CONFIG_DRM_AMD_DC_DCN)
5757 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5758                             struct dc_sink *sink, struct dc_stream_state *stream,
5759                             struct dsc_dec_dpcd_caps *dsc_caps)
5760 {
5761         stream->timing.flags.DSC = 0;
5762         dsc_caps->is_dsc_supported = false;
5763
5764         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5765             sink->sink_signal == SIGNAL_TYPE_EDP)) {
5766                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5767                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5768                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5769                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5770                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5771                                 dsc_caps);
5772         }
5773 }
5774
5775
5776 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5777                                     struct dc_sink *sink, struct dc_stream_state *stream,
5778                                     struct dsc_dec_dpcd_caps *dsc_caps,
5779                                     uint32_t max_dsc_target_bpp_limit_override)
5780 {
5781         const struct dc_link_settings *verified_link_cap = NULL;
5782         u32 link_bw_in_kbps;
5783         u32 edp_min_bpp_x16, edp_max_bpp_x16;
5784         struct dc *dc = sink->ctx->dc;
5785         struct dc_dsc_bw_range bw_range = {0};
5786         struct dc_dsc_config dsc_cfg = {0};
5787
5788         verified_link_cap = dc_link_get_link_cap(stream->link);
5789         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5790         edp_min_bpp_x16 = 8 * 16;
5791         edp_max_bpp_x16 = 8 * 16;
5792
5793         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5794                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5795
5796         if (edp_max_bpp_x16 < edp_min_bpp_x16)
5797                 edp_min_bpp_x16 = edp_max_bpp_x16;
5798
5799         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5800                                 dc->debug.dsc_min_slice_height_override,
5801                                 edp_min_bpp_x16, edp_max_bpp_x16,
5802                                 dsc_caps,
5803                                 &stream->timing,
5804                                 &bw_range)) {
5805
5806                 if (bw_range.max_kbps < link_bw_in_kbps) {
5807                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5808                                         dsc_caps,
5809                                         dc->debug.dsc_min_slice_height_override,
5810                                         max_dsc_target_bpp_limit_override,
5811                                         0,
5812                                         &stream->timing,
5813                                         &dsc_cfg)) {
5814                                 stream->timing.dsc_cfg = dsc_cfg;
5815                                 stream->timing.flags.DSC = 1;
5816                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5817                         }
5818                         return;
5819                 }
5820         }
5821
5822         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5823                                 dsc_caps,
5824                                 dc->debug.dsc_min_slice_height_override,
5825                                 max_dsc_target_bpp_limit_override,
5826                                 link_bw_in_kbps,
5827                                 &stream->timing,
5828                                 &dsc_cfg)) {
5829                 stream->timing.dsc_cfg = dsc_cfg;
5830                 stream->timing.flags.DSC = 1;
5831         }
5832 }
5833
5834
5835 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5836                                         struct dc_sink *sink, struct dc_stream_state *stream,
5837                                         struct dsc_dec_dpcd_caps *dsc_caps)
5838 {
5839         struct drm_connector *drm_connector = &aconnector->base;
5840         u32 link_bandwidth_kbps;
5841         struct dc *dc = sink->ctx->dc;
5842         u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
5843         u32 dsc_max_supported_bw_in_kbps;
5844         u32 max_dsc_target_bpp_limit_override =
5845                 drm_connector->display_info.max_dsc_bpp;
5846
5847         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5848                                                         dc_link_get_link_cap(aconnector->dc_link));
5849
5850         /* Set DSC policy according to dsc_clock_en */
5851         dc_dsc_policy_set_enable_dsc_when_not_needed(
5852                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5853
5854         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
5855             !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
5856             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5857
5858                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5859
5860         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5861                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5862                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5863                                                 dsc_caps,
5864                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5865                                                 max_dsc_target_bpp_limit_override,
5866                                                 link_bandwidth_kbps,
5867                                                 &stream->timing,
5868                                                 &stream->timing.dsc_cfg)) {
5869                                 stream->timing.flags.DSC = 1;
5870                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5871                         }
5872                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5873                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5874                         max_supported_bw_in_kbps = link_bandwidth_kbps;
5875                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5876
5877                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5878                                         max_supported_bw_in_kbps > 0 &&
5879                                         dsc_max_supported_bw_in_kbps > 0)
5880                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5881                                                 dsc_caps,
5882                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5883                                                 max_dsc_target_bpp_limit_override,
5884                                                 dsc_max_supported_bw_in_kbps,
5885                                                 &stream->timing,
5886                                                 &stream->timing.dsc_cfg)) {
5887                                         stream->timing.flags.DSC = 1;
5888                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5889                                                                          __func__, drm_connector->name);
5890                                 }
5891                 }
5892         }
5893
5894         /* Overwrite the stream flag if DSC is enabled through debugfs */
5895         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5896                 stream->timing.flags.DSC = 1;
5897
5898         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5899                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5900
5901         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5902                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5903
5904         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5905                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5906 }
5907 #endif /* CONFIG_DRM_AMD_DC_DCN */
5908
5909 static struct dc_stream_state *
5910 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5911                        const struct drm_display_mode *drm_mode,
5912                        const struct dm_connector_state *dm_state,
5913                        const struct dc_stream_state *old_stream,
5914                        int requested_bpc)
5915 {
5916         struct drm_display_mode *preferred_mode = NULL;
5917         struct drm_connector *drm_connector;
5918         const struct drm_connector_state *con_state =
5919                 dm_state ? &dm_state->base : NULL;
5920         struct dc_stream_state *stream = NULL;
5921         struct drm_display_mode mode;
5922         struct drm_display_mode saved_mode;
5923         struct drm_display_mode *freesync_mode = NULL;
5924         bool native_mode_found = false;
5925         bool recalculate_timing = false;
5926         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5927         int mode_refresh;
5928         int preferred_refresh = 0;
5929         enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
5930 #if defined(CONFIG_DRM_AMD_DC_DCN)
5931         struct dsc_dec_dpcd_caps dsc_caps;
5932 #endif
5933
5934         struct dc_sink *sink = NULL;
5935
5936         drm_mode_init(&mode, drm_mode);
5937         memset(&saved_mode, 0, sizeof(saved_mode));
5938
5939         if (aconnector == NULL) {
5940                 DRM_ERROR("aconnector is NULL!\n");
5941                 return stream;
5942         }
5943
5944         drm_connector = &aconnector->base;
5945
5946         if (!aconnector->dc_sink) {
5947                 sink = create_fake_sink(aconnector);
5948                 if (!sink)
5949                         return stream;
5950         } else {
5951                 sink = aconnector->dc_sink;
5952                 dc_sink_retain(sink);
5953         }
5954
5955         stream = dc_create_stream_for_sink(sink);
5956
5957         if (stream == NULL) {
5958                 DRM_ERROR("Failed to create stream for sink!\n");
5959                 goto finish;
5960         }
5961
5962         stream->dm_stream_context = aconnector;
5963
5964         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5965                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5966
5967         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5968                 /* Search for preferred mode */
5969                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5970                         native_mode_found = true;
5971                         break;
5972                 }
5973         }
5974         if (!native_mode_found)
5975                 preferred_mode = list_first_entry_or_null(
5976                                 &aconnector->base.modes,
5977                                 struct drm_display_mode,
5978                                 head);
5979
5980         mode_refresh = drm_mode_vrefresh(&mode);
5981
5982         if (preferred_mode == NULL) {
5983                 /*
5984                  * This may not be an error, the use case is when we have no
5985                  * usermode calls to reset and set mode upon hotplug. In this
5986                  * case, we call set mode ourselves to restore the previous mode
5987                  * and the modelist may not be filled in in time.
5988                  */
5989                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5990         } else {
5991                 recalculate_timing = amdgpu_freesync_vid_mode &&
5992                                  is_freesync_video_mode(&mode, aconnector);
5993                 if (recalculate_timing) {
5994                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5995                         drm_mode_copy(&saved_mode, &mode);
5996                         drm_mode_copy(&mode, freesync_mode);
5997                 } else {
5998                         decide_crtc_timing_for_drm_display_mode(
5999                                         &mode, preferred_mode, scale);
6000
6001                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6002                 }
6003         }
6004
6005         if (recalculate_timing)
6006                 drm_mode_set_crtcinfo(&saved_mode, 0);
6007         else if (!dm_state)
6008                 drm_mode_set_crtcinfo(&mode, 0);
6009
6010         /*
6011         * If scaling is enabled and refresh rate didn't change
6012         * we copy the vic and polarities of the old timings
6013         */
6014         if (!scale || mode_refresh != preferred_refresh)
6015                 fill_stream_properties_from_drm_display_mode(
6016                         stream, &mode, &aconnector->base, con_state, NULL,
6017                         requested_bpc);
6018         else
6019                 fill_stream_properties_from_drm_display_mode(
6020                         stream, &mode, &aconnector->base, con_state, old_stream,
6021                         requested_bpc);
6022
6023         if (aconnector->timing_changed) {
6024                 DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
6025                                 __func__,
6026                                 stream->timing.display_color_depth,
6027                                 aconnector->timing_requested->display_color_depth);
6028                 stream->timing = *aconnector->timing_requested;
6029         }
6030
6031 #if defined(CONFIG_DRM_AMD_DC_DCN)
6032         /* SST DSC determination policy */
6033         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6034         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6035                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6036 #endif
6037
6038         update_stream_scaling_settings(&mode, dm_state, stream);
6039
6040         fill_audio_info(
6041                 &stream->audio_info,
6042                 drm_connector,
6043                 sink);
6044
6045         update_stream_signal(stream, sink);
6046
6047         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6048                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6049
6050         if (stream->link->psr_settings.psr_feature_enabled) {
6051                 //
6052                 // should decide stream support vsc sdp colorimetry capability
6053                 // before building vsc info packet
6054                 //
6055                 stream->use_vsc_sdp_for_colorimetry = false;
6056                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6057                         stream->use_vsc_sdp_for_colorimetry =
6058                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6059                 } else {
6060                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6061                                 stream->use_vsc_sdp_for_colorimetry = true;
6062                 }
6063                 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
6064                         tf = TRANSFER_FUNC_GAMMA_22;
6065                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
6066                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6067
6068         }
6069 finish:
6070         dc_sink_release(sink);
6071
6072         return stream;
6073 }
6074
6075 static enum drm_connector_status
6076 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6077 {
6078         bool connected;
6079         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6080
6081         /*
6082          * Notes:
6083          * 1. This interface is NOT called in context of HPD irq.
6084          * 2. This interface *is called* in context of user-mode ioctl. Which
6085          * makes it a bad place for *any* MST-related activity.
6086          */
6087
6088         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6089             !aconnector->fake_enable)
6090                 connected = (aconnector->dc_sink != NULL);
6091         else
6092                 connected = (aconnector->base.force == DRM_FORCE_ON ||
6093                                 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
6094
6095         update_subconnector_property(aconnector);
6096
6097         return (connected ? connector_status_connected :
6098                         connector_status_disconnected);
6099 }
6100
6101 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6102                                             struct drm_connector_state *connector_state,
6103                                             struct drm_property *property,
6104                                             uint64_t val)
6105 {
6106         struct drm_device *dev = connector->dev;
6107         struct amdgpu_device *adev = drm_to_adev(dev);
6108         struct dm_connector_state *dm_old_state =
6109                 to_dm_connector_state(connector->state);
6110         struct dm_connector_state *dm_new_state =
6111                 to_dm_connector_state(connector_state);
6112
6113         int ret = -EINVAL;
6114
6115         if (property == dev->mode_config.scaling_mode_property) {
6116                 enum amdgpu_rmx_type rmx_type;
6117
6118                 switch (val) {
6119                 case DRM_MODE_SCALE_CENTER:
6120                         rmx_type = RMX_CENTER;
6121                         break;
6122                 case DRM_MODE_SCALE_ASPECT:
6123                         rmx_type = RMX_ASPECT;
6124                         break;
6125                 case DRM_MODE_SCALE_FULLSCREEN:
6126                         rmx_type = RMX_FULL;
6127                         break;
6128                 case DRM_MODE_SCALE_NONE:
6129                 default:
6130                         rmx_type = RMX_OFF;
6131                         break;
6132                 }
6133
6134                 if (dm_old_state->scaling == rmx_type)
6135                         return 0;
6136
6137                 dm_new_state->scaling = rmx_type;
6138                 ret = 0;
6139         } else if (property == adev->mode_info.underscan_hborder_property) {
6140                 dm_new_state->underscan_hborder = val;
6141                 ret = 0;
6142         } else if (property == adev->mode_info.underscan_vborder_property) {
6143                 dm_new_state->underscan_vborder = val;
6144                 ret = 0;
6145         } else if (property == adev->mode_info.underscan_property) {
6146                 dm_new_state->underscan_enable = val;
6147                 ret = 0;
6148         } else if (property == adev->mode_info.abm_level_property) {
6149                 dm_new_state->abm_level = val;
6150                 ret = 0;
6151         }
6152
6153         return ret;
6154 }
6155
6156 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6157                                             const struct drm_connector_state *state,
6158                                             struct drm_property *property,
6159                                             uint64_t *val)
6160 {
6161         struct drm_device *dev = connector->dev;
6162         struct amdgpu_device *adev = drm_to_adev(dev);
6163         struct dm_connector_state *dm_state =
6164                 to_dm_connector_state(state);
6165         int ret = -EINVAL;
6166
6167         if (property == dev->mode_config.scaling_mode_property) {
6168                 switch (dm_state->scaling) {
6169                 case RMX_CENTER:
6170                         *val = DRM_MODE_SCALE_CENTER;
6171                         break;
6172                 case RMX_ASPECT:
6173                         *val = DRM_MODE_SCALE_ASPECT;
6174                         break;
6175                 case RMX_FULL:
6176                         *val = DRM_MODE_SCALE_FULLSCREEN;
6177                         break;
6178                 case RMX_OFF:
6179                 default:
6180                         *val = DRM_MODE_SCALE_NONE;
6181                         break;
6182                 }
6183                 ret = 0;
6184         } else if (property == adev->mode_info.underscan_hborder_property) {
6185                 *val = dm_state->underscan_hborder;
6186                 ret = 0;
6187         } else if (property == adev->mode_info.underscan_vborder_property) {
6188                 *val = dm_state->underscan_vborder;
6189                 ret = 0;
6190         } else if (property == adev->mode_info.underscan_property) {
6191                 *val = dm_state->underscan_enable;
6192                 ret = 0;
6193         } else if (property == adev->mode_info.abm_level_property) {
6194                 *val = dm_state->abm_level;
6195                 ret = 0;
6196         }
6197
6198         return ret;
6199 }
6200
6201 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6202 {
6203         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6204
6205         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6206 }
6207
6208 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6209 {
6210         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6211         const struct dc_link *link = aconnector->dc_link;
6212         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6213         struct amdgpu_display_manager *dm = &adev->dm;
6214         int i;
6215
6216         /*
6217          * Call only if mst_mgr was initialized before since it's not done
6218          * for all connector types.
6219          */
6220         if (aconnector->mst_mgr.dev)
6221                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6222
6223         for (i = 0; i < dm->num_of_edps; i++) {
6224                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6225                         backlight_device_unregister(dm->backlight_dev[i]);
6226                         dm->backlight_dev[i] = NULL;
6227                 }
6228         }
6229
6230         if (aconnector->dc_em_sink)
6231                 dc_sink_release(aconnector->dc_em_sink);
6232         aconnector->dc_em_sink = NULL;
6233         if (aconnector->dc_sink)
6234                 dc_sink_release(aconnector->dc_sink);
6235         aconnector->dc_sink = NULL;
6236
6237         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6238         drm_connector_unregister(connector);
6239         drm_connector_cleanup(connector);
6240         if (aconnector->i2c) {
6241                 i2c_del_adapter(&aconnector->i2c->base);
6242                 kfree(aconnector->i2c);
6243         }
6244         kfree(aconnector->dm_dp_aux.aux.name);
6245
6246         kfree(connector);
6247 }
6248
6249 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6250 {
6251         struct dm_connector_state *state =
6252                 to_dm_connector_state(connector->state);
6253
6254         if (connector->state)
6255                 __drm_atomic_helper_connector_destroy_state(connector->state);
6256
6257         kfree(state);
6258
6259         state = kzalloc(sizeof(*state), GFP_KERNEL);
6260
6261         if (state) {
6262                 state->scaling = RMX_OFF;
6263                 state->underscan_enable = false;
6264                 state->underscan_hborder = 0;
6265                 state->underscan_vborder = 0;
6266                 state->base.max_requested_bpc = 8;
6267                 state->vcpi_slots = 0;
6268                 state->pbn = 0;
6269
6270                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6271                         state->abm_level = amdgpu_dm_abm_level;
6272
6273                 __drm_atomic_helper_connector_reset(connector, &state->base);
6274         }
6275 }
6276
6277 struct drm_connector_state *
6278 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6279 {
6280         struct dm_connector_state *state =
6281                 to_dm_connector_state(connector->state);
6282
6283         struct dm_connector_state *new_state =
6284                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6285
6286         if (!new_state)
6287                 return NULL;
6288
6289         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6290
6291         new_state->freesync_capable = state->freesync_capable;
6292         new_state->abm_level = state->abm_level;
6293         new_state->scaling = state->scaling;
6294         new_state->underscan_enable = state->underscan_enable;
6295         new_state->underscan_hborder = state->underscan_hborder;
6296         new_state->underscan_vborder = state->underscan_vborder;
6297         new_state->vcpi_slots = state->vcpi_slots;
6298         new_state->pbn = state->pbn;
6299         return &new_state->base;
6300 }
6301
6302 static int
6303 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6304 {
6305         struct amdgpu_dm_connector *amdgpu_dm_connector =
6306                 to_amdgpu_dm_connector(connector);
6307         int r;
6308
6309         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6310             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6311                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6312                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6313                 if (r)
6314                         return r;
6315         }
6316
6317 #if defined(CONFIG_DEBUG_FS)
6318         connector_debugfs_init(amdgpu_dm_connector);
6319 #endif
6320
6321         return 0;
6322 }
6323
6324 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6325         .reset = amdgpu_dm_connector_funcs_reset,
6326         .detect = amdgpu_dm_connector_detect,
6327         .fill_modes = drm_helper_probe_single_connector_modes,
6328         .destroy = amdgpu_dm_connector_destroy,
6329         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6330         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6331         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6332         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6333         .late_register = amdgpu_dm_connector_late_register,
6334         .early_unregister = amdgpu_dm_connector_unregister
6335 };
6336
6337 static int get_modes(struct drm_connector *connector)
6338 {
6339         return amdgpu_dm_connector_get_modes(connector);
6340 }
6341
6342 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6343 {
6344         struct dc_sink_init_data init_params = {
6345                         .link = aconnector->dc_link,
6346                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6347         };
6348         struct edid *edid;
6349
6350         if (!aconnector->base.edid_blob_ptr) {
6351                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6352                                 aconnector->base.name);
6353
6354                 aconnector->base.force = DRM_FORCE_OFF;
6355                 return;
6356         }
6357
6358         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6359
6360         aconnector->edid = edid;
6361
6362         aconnector->dc_em_sink = dc_link_add_remote_sink(
6363                 aconnector->dc_link,
6364                 (uint8_t *)edid,
6365                 (edid->extensions + 1) * EDID_LENGTH,
6366                 &init_params);
6367
6368         if (aconnector->base.force == DRM_FORCE_ON) {
6369                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6370                 aconnector->dc_link->local_sink :
6371                 aconnector->dc_em_sink;
6372                 dc_sink_retain(aconnector->dc_sink);
6373         }
6374 }
6375
6376 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6377 {
6378         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6379
6380         /*
6381          * In case of headless boot with force on for DP managed connector
6382          * Those settings have to be != 0 to get initial modeset
6383          */
6384         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6385                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6386                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6387         }
6388
6389         create_eml_sink(aconnector);
6390 }
6391
6392 static enum dc_status dm_validate_stream_and_context(struct dc *dc,
6393                                                 struct dc_stream_state *stream)
6394 {
6395         enum dc_status dc_result = DC_ERROR_UNEXPECTED;
6396         struct dc_plane_state *dc_plane_state = NULL;
6397         struct dc_state *dc_state = NULL;
6398
6399         if (!stream)
6400                 goto cleanup;
6401
6402         dc_plane_state = dc_create_plane_state(dc);
6403         if (!dc_plane_state)
6404                 goto cleanup;
6405
6406         dc_state = dc_create_state(dc);
6407         if (!dc_state)
6408                 goto cleanup;
6409
6410         /* populate stream to plane */
6411         dc_plane_state->src_rect.height  = stream->src.height;
6412         dc_plane_state->src_rect.width   = stream->src.width;
6413         dc_plane_state->dst_rect.height  = stream->src.height;
6414         dc_plane_state->dst_rect.width   = stream->src.width;
6415         dc_plane_state->clip_rect.height = stream->src.height;
6416         dc_plane_state->clip_rect.width  = stream->src.width;
6417         dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
6418         dc_plane_state->plane_size.surface_size.height = stream->src.height;
6419         dc_plane_state->plane_size.surface_size.width  = stream->src.width;
6420         dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
6421         dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
6422         dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
6423         dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
6424         dc_plane_state->rotation = ROTATION_ANGLE_0;
6425         dc_plane_state->is_tiling_rotated = false;
6426         dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
6427
6428         dc_result = dc_validate_stream(dc, stream);
6429         if (dc_result == DC_OK)
6430                 dc_result = dc_validate_plane(dc, dc_plane_state);
6431
6432         if (dc_result == DC_OK)
6433                 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
6434
6435         if (dc_result == DC_OK && !dc_add_plane_to_context(
6436                                                 dc,
6437                                                 stream,
6438                                                 dc_plane_state,
6439                                                 dc_state))
6440                 dc_result = DC_FAIL_ATTACH_SURFACES;
6441
6442         if (dc_result == DC_OK)
6443                 dc_result = dc_validate_global_state(dc, dc_state, true);
6444
6445 cleanup:
6446         if (dc_state)
6447                 dc_release_state(dc_state);
6448
6449         if (dc_plane_state)
6450                 dc_plane_state_release(dc_plane_state);
6451
6452         return dc_result;
6453 }
6454
6455 struct dc_stream_state *
6456 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6457                                 const struct drm_display_mode *drm_mode,
6458                                 const struct dm_connector_state *dm_state,
6459                                 const struct dc_stream_state *old_stream)
6460 {
6461         struct drm_connector *connector = &aconnector->base;
6462         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6463         struct dc_stream_state *stream;
6464         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6465         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6466         enum dc_status dc_result = DC_OK;
6467
6468         do {
6469                 stream = create_stream_for_sink(aconnector, drm_mode,
6470                                                 dm_state, old_stream,
6471                                                 requested_bpc);
6472                 if (stream == NULL) {
6473                         DRM_ERROR("Failed to create stream for sink!\n");
6474                         break;
6475                 }
6476
6477                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6478                 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6479                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6480
6481                 if (dc_result == DC_OK)
6482                         dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
6483
6484                 if (dc_result != DC_OK) {
6485                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6486                                       drm_mode->hdisplay,
6487                                       drm_mode->vdisplay,
6488                                       drm_mode->clock,
6489                                       dc_result,
6490                                       dc_status_to_str(dc_result));
6491
6492                         dc_stream_release(stream);
6493                         stream = NULL;
6494                         requested_bpc -= 2; /* lower bpc to retry validation */
6495                 }
6496
6497         } while (stream == NULL && requested_bpc >= 6);
6498
6499         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6500                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6501
6502                 aconnector->force_yuv420_output = true;
6503                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6504                                                 dm_state, old_stream);
6505                 aconnector->force_yuv420_output = false;
6506         }
6507
6508         return stream;
6509 }
6510
6511 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6512                                    struct drm_display_mode *mode)
6513 {
6514         int result = MODE_ERROR;
6515         struct dc_sink *dc_sink;
6516         /* TODO: Unhardcode stream count */
6517         struct dc_stream_state *stream;
6518         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6519
6520         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6521                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6522                 return result;
6523
6524         /*
6525          * Only run this the first time mode_valid is called to initilialize
6526          * EDID mgmt
6527          */
6528         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6529                 !aconnector->dc_em_sink)
6530                 handle_edid_mgmt(aconnector);
6531
6532         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6533
6534         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6535                                 aconnector->base.force != DRM_FORCE_ON) {
6536                 DRM_ERROR("dc_sink is NULL!\n");
6537                 goto fail;
6538         }
6539
6540         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6541         if (stream) {
6542                 dc_stream_release(stream);
6543                 result = MODE_OK;
6544         }
6545
6546 fail:
6547         /* TODO: error handling*/
6548         return result;
6549 }
6550
6551 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6552                                 struct dc_info_packet *out)
6553 {
6554         struct hdmi_drm_infoframe frame;
6555         unsigned char buf[30]; /* 26 + 4 */
6556         ssize_t len;
6557         int ret, i;
6558
6559         memset(out, 0, sizeof(*out));
6560
6561         if (!state->hdr_output_metadata)
6562                 return 0;
6563
6564         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6565         if (ret)
6566                 return ret;
6567
6568         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6569         if (len < 0)
6570                 return (int)len;
6571
6572         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6573         if (len != 30)
6574                 return -EINVAL;
6575
6576         /* Prepare the infopacket for DC. */
6577         switch (state->connector->connector_type) {
6578         case DRM_MODE_CONNECTOR_HDMIA:
6579                 out->hb0 = 0x87; /* type */
6580                 out->hb1 = 0x01; /* version */
6581                 out->hb2 = 0x1A; /* length */
6582                 out->sb[0] = buf[3]; /* checksum */
6583                 i = 1;
6584                 break;
6585
6586         case DRM_MODE_CONNECTOR_DisplayPort:
6587         case DRM_MODE_CONNECTOR_eDP:
6588                 out->hb0 = 0x00; /* sdp id, zero */
6589                 out->hb1 = 0x87; /* type */
6590                 out->hb2 = 0x1D; /* payload len - 1 */
6591                 out->hb3 = (0x13 << 2); /* sdp version */
6592                 out->sb[0] = 0x01; /* version */
6593                 out->sb[1] = 0x1A; /* length */
6594                 i = 2;
6595                 break;
6596
6597         default:
6598                 return -EINVAL;
6599         }
6600
6601         memcpy(&out->sb[i], &buf[4], 26);
6602         out->valid = true;
6603
6604         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6605                        sizeof(out->sb), false);
6606
6607         return 0;
6608 }
6609
6610 static int
6611 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6612                                  struct drm_atomic_state *state)
6613 {
6614         struct drm_connector_state *new_con_state =
6615                 drm_atomic_get_new_connector_state(state, conn);
6616         struct drm_connector_state *old_con_state =
6617                 drm_atomic_get_old_connector_state(state, conn);
6618         struct drm_crtc *crtc = new_con_state->crtc;
6619         struct drm_crtc_state *new_crtc_state;
6620         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
6621         int ret;
6622
6623         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6624
6625         if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
6626                 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
6627                 if (ret < 0)
6628                         return ret;
6629         }
6630
6631         if (!crtc)
6632                 return 0;
6633
6634         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6635                 struct dc_info_packet hdr_infopacket;
6636
6637                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6638                 if (ret)
6639                         return ret;
6640
6641                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6642                 if (IS_ERR(new_crtc_state))
6643                         return PTR_ERR(new_crtc_state);
6644
6645                 /*
6646                  * DC considers the stream backends changed if the
6647                  * static metadata changes. Forcing the modeset also
6648                  * gives a simple way for userspace to switch from
6649                  * 8bpc to 10bpc when setting the metadata to enter
6650                  * or exit HDR.
6651                  *
6652                  * Changing the static metadata after it's been
6653                  * set is permissible, however. So only force a
6654                  * modeset if we're entering or exiting HDR.
6655                  */
6656                 new_crtc_state->mode_changed =
6657                         !old_con_state->hdr_output_metadata ||
6658                         !new_con_state->hdr_output_metadata;
6659         }
6660
6661         return 0;
6662 }
6663
6664 static const struct drm_connector_helper_funcs
6665 amdgpu_dm_connector_helper_funcs = {
6666         /*
6667          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6668          * modes will be filtered by drm_mode_validate_size(), and those modes
6669          * are missing after user start lightdm. So we need to renew modes list.
6670          * in get_modes call back, not just return the modes count
6671          */
6672         .get_modes = get_modes,
6673         .mode_valid = amdgpu_dm_connector_mode_valid,
6674         .atomic_check = amdgpu_dm_connector_atomic_check,
6675 };
6676
6677 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6678 {
6679
6680 }
6681
6682 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6683 {
6684         switch (display_color_depth) {
6685         case COLOR_DEPTH_666:
6686                 return 6;
6687         case COLOR_DEPTH_888:
6688                 return 8;
6689         case COLOR_DEPTH_101010:
6690                 return 10;
6691         case COLOR_DEPTH_121212:
6692                 return 12;
6693         case COLOR_DEPTH_141414:
6694                 return 14;
6695         case COLOR_DEPTH_161616:
6696                 return 16;
6697         default:
6698                 break;
6699         }
6700         return 0;
6701 }
6702
6703 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6704                                           struct drm_crtc_state *crtc_state,
6705                                           struct drm_connector_state *conn_state)
6706 {
6707         struct drm_atomic_state *state = crtc_state->state;
6708         struct drm_connector *connector = conn_state->connector;
6709         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6710         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6711         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6712         struct drm_dp_mst_topology_mgr *mst_mgr;
6713         struct drm_dp_mst_port *mst_port;
6714         struct drm_dp_mst_topology_state *mst_state;
6715         enum dc_color_depth color_depth;
6716         int clock, bpp = 0;
6717         bool is_y420 = false;
6718
6719         if (!aconnector->mst_output_port || !aconnector->dc_sink)
6720                 return 0;
6721
6722         mst_port = aconnector->mst_output_port;
6723         mst_mgr = &aconnector->mst_root->mst_mgr;
6724
6725         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6726                 return 0;
6727
6728         mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
6729         if (IS_ERR(mst_state))
6730                 return PTR_ERR(mst_state);
6731
6732         if (!mst_state->pbn_div)
6733                 mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
6734
6735         if (!state->duplicated) {
6736                 int max_bpc = conn_state->max_requested_bpc;
6737                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6738                           aconnector->force_yuv420_output;
6739                 color_depth = convert_color_depth_from_display_info(connector,
6740                                                                     is_y420,
6741                                                                     max_bpc);
6742                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6743                 clock = adjusted_mode->clock;
6744                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6745         }
6746
6747         dm_new_connector_state->vcpi_slots =
6748                 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
6749                                               dm_new_connector_state->pbn);
6750         if (dm_new_connector_state->vcpi_slots < 0) {
6751                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6752                 return dm_new_connector_state->vcpi_slots;
6753         }
6754         return 0;
6755 }
6756
6757 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6758         .disable = dm_encoder_helper_disable,
6759         .atomic_check = dm_encoder_helper_atomic_check
6760 };
6761
6762 #if defined(CONFIG_DRM_AMD_DC_DCN)
6763 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6764                                             struct dc_state *dc_state,
6765                                             struct dsc_mst_fairness_vars *vars)
6766 {
6767         struct dc_stream_state *stream = NULL;
6768         struct drm_connector *connector;
6769         struct drm_connector_state *new_con_state;
6770         struct amdgpu_dm_connector *aconnector;
6771         struct dm_connector_state *dm_conn_state;
6772         int i, j, ret;
6773         int vcpi, pbn_div, pbn, slot_num = 0;
6774
6775         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6776
6777                 aconnector = to_amdgpu_dm_connector(connector);
6778
6779                 if (!aconnector->mst_output_port)
6780                         continue;
6781
6782                 if (!new_con_state || !new_con_state->crtc)
6783                         continue;
6784
6785                 dm_conn_state = to_dm_connector_state(new_con_state);
6786
6787                 for (j = 0; j < dc_state->stream_count; j++) {
6788                         stream = dc_state->streams[j];
6789                         if (!stream)
6790                                 continue;
6791
6792                         if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6793                                 break;
6794
6795                         stream = NULL;
6796                 }
6797
6798                 if (!stream)
6799                         continue;
6800
6801                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6802                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
6803                 for (j = 0; j < dc_state->stream_count; j++) {
6804                         if (vars[j].aconnector == aconnector) {
6805                                 pbn = vars[j].pbn;
6806                                 break;
6807                         }
6808                 }
6809
6810                 if (j == dc_state->stream_count)
6811                         continue;
6812
6813                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
6814
6815                 if (stream->timing.flags.DSC != 1) {
6816                         dm_conn_state->pbn = pbn;
6817                         dm_conn_state->vcpi_slots = slot_num;
6818
6819                         ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
6820                                                            dm_conn_state->pbn, false);
6821                         if (ret < 0)
6822                                 return ret;
6823
6824                         continue;
6825                 }
6826
6827                 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
6828                 if (vcpi < 0)
6829                         return vcpi;
6830
6831                 dm_conn_state->pbn = pbn;
6832                 dm_conn_state->vcpi_slots = vcpi;
6833         }
6834         return 0;
6835 }
6836 #endif
6837
6838 static int to_drm_connector_type(enum signal_type st)
6839 {
6840         switch (st) {
6841         case SIGNAL_TYPE_HDMI_TYPE_A:
6842                 return DRM_MODE_CONNECTOR_HDMIA;
6843         case SIGNAL_TYPE_EDP:
6844                 return DRM_MODE_CONNECTOR_eDP;
6845         case SIGNAL_TYPE_LVDS:
6846                 return DRM_MODE_CONNECTOR_LVDS;
6847         case SIGNAL_TYPE_RGB:
6848                 return DRM_MODE_CONNECTOR_VGA;
6849         case SIGNAL_TYPE_DISPLAY_PORT:
6850         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6851                 return DRM_MODE_CONNECTOR_DisplayPort;
6852         case SIGNAL_TYPE_DVI_DUAL_LINK:
6853         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6854                 return DRM_MODE_CONNECTOR_DVID;
6855         case SIGNAL_TYPE_VIRTUAL:
6856                 return DRM_MODE_CONNECTOR_VIRTUAL;
6857
6858         default:
6859                 return DRM_MODE_CONNECTOR_Unknown;
6860         }
6861 }
6862
6863 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6864 {
6865         struct drm_encoder *encoder;
6866
6867         /* There is only one encoder per connector */
6868         drm_connector_for_each_possible_encoder(connector, encoder)
6869                 return encoder;
6870
6871         return NULL;
6872 }
6873
6874 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6875 {
6876         struct drm_encoder *encoder;
6877         struct amdgpu_encoder *amdgpu_encoder;
6878
6879         encoder = amdgpu_dm_connector_to_encoder(connector);
6880
6881         if (encoder == NULL)
6882                 return;
6883
6884         amdgpu_encoder = to_amdgpu_encoder(encoder);
6885
6886         amdgpu_encoder->native_mode.clock = 0;
6887
6888         if (!list_empty(&connector->probed_modes)) {
6889                 struct drm_display_mode *preferred_mode = NULL;
6890
6891                 list_for_each_entry(preferred_mode,
6892                                     &connector->probed_modes,
6893                                     head) {
6894                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6895                                 amdgpu_encoder->native_mode = *preferred_mode;
6896
6897                         break;
6898                 }
6899
6900         }
6901 }
6902
6903 static struct drm_display_mode *
6904 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6905                              char *name,
6906                              int hdisplay, int vdisplay)
6907 {
6908         struct drm_device *dev = encoder->dev;
6909         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6910         struct drm_display_mode *mode = NULL;
6911         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6912
6913         mode = drm_mode_duplicate(dev, native_mode);
6914
6915         if (mode == NULL)
6916                 return NULL;
6917
6918         mode->hdisplay = hdisplay;
6919         mode->vdisplay = vdisplay;
6920         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6921         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6922
6923         return mode;
6924
6925 }
6926
6927 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6928                                                  struct drm_connector *connector)
6929 {
6930         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6931         struct drm_display_mode *mode = NULL;
6932         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6933         struct amdgpu_dm_connector *amdgpu_dm_connector =
6934                                 to_amdgpu_dm_connector(connector);
6935         int i;
6936         int n;
6937         struct mode_size {
6938                 char name[DRM_DISPLAY_MODE_LEN];
6939                 int w;
6940                 int h;
6941         } common_modes[] = {
6942                 {  "640x480",  640,  480},
6943                 {  "800x600",  800,  600},
6944                 { "1024x768", 1024,  768},
6945                 { "1280x720", 1280,  720},
6946                 { "1280x800", 1280,  800},
6947                 {"1280x1024", 1280, 1024},
6948                 { "1440x900", 1440,  900},
6949                 {"1680x1050", 1680, 1050},
6950                 {"1600x1200", 1600, 1200},
6951                 {"1920x1080", 1920, 1080},
6952                 {"1920x1200", 1920, 1200}
6953         };
6954
6955         n = ARRAY_SIZE(common_modes);
6956
6957         for (i = 0; i < n; i++) {
6958                 struct drm_display_mode *curmode = NULL;
6959                 bool mode_existed = false;
6960
6961                 if (common_modes[i].w > native_mode->hdisplay ||
6962                     common_modes[i].h > native_mode->vdisplay ||
6963                    (common_modes[i].w == native_mode->hdisplay &&
6964                     common_modes[i].h == native_mode->vdisplay))
6965                         continue;
6966
6967                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6968                         if (common_modes[i].w == curmode->hdisplay &&
6969                             common_modes[i].h == curmode->vdisplay) {
6970                                 mode_existed = true;
6971                                 break;
6972                         }
6973                 }
6974
6975                 if (mode_existed)
6976                         continue;
6977
6978                 mode = amdgpu_dm_create_common_mode(encoder,
6979                                 common_modes[i].name, common_modes[i].w,
6980                                 common_modes[i].h);
6981                 if (!mode)
6982                         continue;
6983
6984                 drm_mode_probed_add(connector, mode);
6985                 amdgpu_dm_connector->num_modes++;
6986         }
6987 }
6988
6989 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
6990 {
6991         struct drm_encoder *encoder;
6992         struct amdgpu_encoder *amdgpu_encoder;
6993         const struct drm_display_mode *native_mode;
6994
6995         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
6996             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
6997                 return;
6998
6999         mutex_lock(&connector->dev->mode_config.mutex);
7000         amdgpu_dm_connector_get_modes(connector);
7001         mutex_unlock(&connector->dev->mode_config.mutex);
7002
7003         encoder = amdgpu_dm_connector_to_encoder(connector);
7004         if (!encoder)
7005                 return;
7006
7007         amdgpu_encoder = to_amdgpu_encoder(encoder);
7008
7009         native_mode = &amdgpu_encoder->native_mode;
7010         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7011                 return;
7012
7013         drm_connector_set_panel_orientation_with_quirk(connector,
7014                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7015                                                        native_mode->hdisplay,
7016                                                        native_mode->vdisplay);
7017 }
7018
7019 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7020                                               struct edid *edid)
7021 {
7022         struct amdgpu_dm_connector *amdgpu_dm_connector =
7023                         to_amdgpu_dm_connector(connector);
7024
7025         if (edid) {
7026                 /* empty probed_modes */
7027                 INIT_LIST_HEAD(&connector->probed_modes);
7028                 amdgpu_dm_connector->num_modes =
7029                                 drm_add_edid_modes(connector, edid);
7030
7031                 /* sorting the probed modes before calling function
7032                  * amdgpu_dm_get_native_mode() since EDID can have
7033                  * more than one preferred mode. The modes that are
7034                  * later in the probed mode list could be of higher
7035                  * and preferred resolution. For example, 3840x2160
7036                  * resolution in base EDID preferred timing and 4096x2160
7037                  * preferred resolution in DID extension block later.
7038                  */
7039                 drm_mode_sort(&connector->probed_modes);
7040                 amdgpu_dm_get_native_mode(connector);
7041
7042                 /* Freesync capabilities are reset by calling
7043                  * drm_add_edid_modes() and need to be
7044                  * restored here.
7045                  */
7046                 amdgpu_dm_update_freesync_caps(connector, edid);
7047         } else {
7048                 amdgpu_dm_connector->num_modes = 0;
7049         }
7050 }
7051
7052 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7053                               struct drm_display_mode *mode)
7054 {
7055         struct drm_display_mode *m;
7056
7057         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7058                 if (drm_mode_equal(m, mode))
7059                         return true;
7060         }
7061
7062         return false;
7063 }
7064
7065 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7066 {
7067         const struct drm_display_mode *m;
7068         struct drm_display_mode *new_mode;
7069         uint i;
7070         u32 new_modes_count = 0;
7071
7072         /* Standard FPS values
7073          *
7074          * 23.976       - TV/NTSC
7075          * 24           - Cinema
7076          * 25           - TV/PAL
7077          * 29.97        - TV/NTSC
7078          * 30           - TV/NTSC
7079          * 48           - Cinema HFR
7080          * 50           - TV/PAL
7081          * 60           - Commonly used
7082          * 48,72,96,120 - Multiples of 24
7083          */
7084         static const u32 common_rates[] = {
7085                 23976, 24000, 25000, 29970, 30000,
7086                 48000, 50000, 60000, 72000, 96000, 120000
7087         };
7088
7089         /*
7090          * Find mode with highest refresh rate with the same resolution
7091          * as the preferred mode. Some monitors report a preferred mode
7092          * with lower resolution than the highest refresh rate supported.
7093          */
7094
7095         m = get_highest_refresh_rate_mode(aconnector, true);
7096         if (!m)
7097                 return 0;
7098
7099         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7100                 u64 target_vtotal, target_vtotal_diff;
7101                 u64 num, den;
7102
7103                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7104                         continue;
7105
7106                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7107                     common_rates[i] > aconnector->max_vfreq * 1000)
7108                         continue;
7109
7110                 num = (unsigned long long)m->clock * 1000 * 1000;
7111                 den = common_rates[i] * (unsigned long long)m->htotal;
7112                 target_vtotal = div_u64(num, den);
7113                 target_vtotal_diff = target_vtotal - m->vtotal;
7114
7115                 /* Check for illegal modes */
7116                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7117                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7118                     m->vtotal + target_vtotal_diff < m->vsync_end)
7119                         continue;
7120
7121                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7122                 if (!new_mode)
7123                         goto out;
7124
7125                 new_mode->vtotal += (u16)target_vtotal_diff;
7126                 new_mode->vsync_start += (u16)target_vtotal_diff;
7127                 new_mode->vsync_end += (u16)target_vtotal_diff;
7128                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7129                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7130
7131                 if (!is_duplicate_mode(aconnector, new_mode)) {
7132                         drm_mode_probed_add(&aconnector->base, new_mode);
7133                         new_modes_count += 1;
7134                 } else
7135                         drm_mode_destroy(aconnector->base.dev, new_mode);
7136         }
7137  out:
7138         return new_modes_count;
7139 }
7140
7141 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7142                                                    struct edid *edid)
7143 {
7144         struct amdgpu_dm_connector *amdgpu_dm_connector =
7145                 to_amdgpu_dm_connector(connector);
7146
7147         if (!(amdgpu_freesync_vid_mode && edid))
7148                 return;
7149
7150         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7151                 amdgpu_dm_connector->num_modes +=
7152                         add_fs_modes(amdgpu_dm_connector);
7153 }
7154
7155 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7156 {
7157         struct amdgpu_dm_connector *amdgpu_dm_connector =
7158                         to_amdgpu_dm_connector(connector);
7159         struct drm_encoder *encoder;
7160         struct edid *edid = amdgpu_dm_connector->edid;
7161
7162         encoder = amdgpu_dm_connector_to_encoder(connector);
7163
7164         if (!drm_edid_is_valid(edid)) {
7165                 amdgpu_dm_connector->num_modes =
7166                                 drm_add_modes_noedid(connector, 640, 480);
7167         } else {
7168                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7169                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7170                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7171         }
7172         amdgpu_dm_fbc_init(connector);
7173
7174         return amdgpu_dm_connector->num_modes;
7175 }
7176
7177 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7178                                      struct amdgpu_dm_connector *aconnector,
7179                                      int connector_type,
7180                                      struct dc_link *link,
7181                                      int link_index)
7182 {
7183         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7184
7185         /*
7186          * Some of the properties below require access to state, like bpc.
7187          * Allocate some default initial connector state with our reset helper.
7188          */
7189         if (aconnector->base.funcs->reset)
7190                 aconnector->base.funcs->reset(&aconnector->base);
7191
7192         aconnector->connector_id = link_index;
7193         aconnector->dc_link = link;
7194         aconnector->base.interlace_allowed = false;
7195         aconnector->base.doublescan_allowed = false;
7196         aconnector->base.stereo_allowed = false;
7197         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7198         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7199         aconnector->audio_inst = -1;
7200         aconnector->pack_sdp_v1_3 = false;
7201         aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
7202         memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
7203         mutex_init(&aconnector->hpd_lock);
7204
7205         /*
7206          * configure support HPD hot plug connector_>polled default value is 0
7207          * which means HPD hot plug not supported
7208          */
7209         switch (connector_type) {
7210         case DRM_MODE_CONNECTOR_HDMIA:
7211                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7212                 aconnector->base.ycbcr_420_allowed =
7213                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7214                 break;
7215         case DRM_MODE_CONNECTOR_DisplayPort:
7216                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7217                 link->link_enc = link_enc_cfg_get_link_enc(link);
7218                 ASSERT(link->link_enc);
7219                 if (link->link_enc)
7220                         aconnector->base.ycbcr_420_allowed =
7221                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7222                 break;
7223         case DRM_MODE_CONNECTOR_DVID:
7224                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7225                 break;
7226         default:
7227                 break;
7228         }
7229
7230         drm_object_attach_property(&aconnector->base.base,
7231                                 dm->ddev->mode_config.scaling_mode_property,
7232                                 DRM_MODE_SCALE_NONE);
7233
7234         drm_object_attach_property(&aconnector->base.base,
7235                                 adev->mode_info.underscan_property,
7236                                 UNDERSCAN_OFF);
7237         drm_object_attach_property(&aconnector->base.base,
7238                                 adev->mode_info.underscan_hborder_property,
7239                                 0);
7240         drm_object_attach_property(&aconnector->base.base,
7241                                 adev->mode_info.underscan_vborder_property,
7242                                 0);
7243
7244         if (!aconnector->mst_root)
7245                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7246
7247         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7248         aconnector->base.state->max_bpc = 16;
7249         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7250
7251         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7252             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7253                 drm_object_attach_property(&aconnector->base.base,
7254                                 adev->mode_info.abm_level_property, 0);
7255         }
7256
7257         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7258             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7259             connector_type == DRM_MODE_CONNECTOR_eDP) {
7260                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7261
7262                 if (!aconnector->mst_root)
7263                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7264
7265 #ifdef CONFIG_DRM_AMD_DC_HDCP
7266                 if (adev->dm.hdcp_workqueue)
7267                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7268 #endif
7269         }
7270 }
7271
7272 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7273                               struct i2c_msg *msgs, int num)
7274 {
7275         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7276         struct ddc_service *ddc_service = i2c->ddc_service;
7277         struct i2c_command cmd;
7278         int i;
7279         int result = -EIO;
7280
7281         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7282
7283         if (!cmd.payloads)
7284                 return result;
7285
7286         cmd.number_of_payloads = num;
7287         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7288         cmd.speed = 100;
7289
7290         for (i = 0; i < num; i++) {
7291                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7292                 cmd.payloads[i].address = msgs[i].addr;
7293                 cmd.payloads[i].length = msgs[i].len;
7294                 cmd.payloads[i].data = msgs[i].buf;
7295         }
7296
7297         if (dc_submit_i2c(
7298                         ddc_service->ctx->dc,
7299                         ddc_service->link->link_index,
7300                         &cmd))
7301                 result = num;
7302
7303         kfree(cmd.payloads);
7304         return result;
7305 }
7306
7307 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7308 {
7309         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7310 }
7311
7312 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7313         .master_xfer = amdgpu_dm_i2c_xfer,
7314         .functionality = amdgpu_dm_i2c_func,
7315 };
7316
7317 static struct amdgpu_i2c_adapter *
7318 create_i2c(struct ddc_service *ddc_service,
7319            int link_index,
7320            int *res)
7321 {
7322         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7323         struct amdgpu_i2c_adapter *i2c;
7324
7325         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7326         if (!i2c)
7327                 return NULL;
7328         i2c->base.owner = THIS_MODULE;
7329         i2c->base.class = I2C_CLASS_DDC;
7330         i2c->base.dev.parent = &adev->pdev->dev;
7331         i2c->base.algo = &amdgpu_dm_i2c_algo;
7332         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7333         i2c_set_adapdata(&i2c->base, i2c);
7334         i2c->ddc_service = ddc_service;
7335
7336         return i2c;
7337 }
7338
7339
7340 /*
7341  * Note: this function assumes that dc_link_detect() was called for the
7342  * dc_link which will be represented by this aconnector.
7343  */
7344 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7345                                     struct amdgpu_dm_connector *aconnector,
7346                                     u32 link_index,
7347                                     struct amdgpu_encoder *aencoder)
7348 {
7349         int res = 0;
7350         int connector_type;
7351         struct dc *dc = dm->dc;
7352         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7353         struct amdgpu_i2c_adapter *i2c;
7354
7355         link->priv = aconnector;
7356
7357         DRM_DEBUG_DRIVER("%s()\n", __func__);
7358
7359         i2c = create_i2c(link->ddc, link->link_index, &res);
7360         if (!i2c) {
7361                 DRM_ERROR("Failed to create i2c adapter data\n");
7362                 return -ENOMEM;
7363         }
7364
7365         aconnector->i2c = i2c;
7366         res = i2c_add_adapter(&i2c->base);
7367
7368         if (res) {
7369                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7370                 goto out_free;
7371         }
7372
7373         connector_type = to_drm_connector_type(link->connector_signal);
7374
7375         res = drm_connector_init_with_ddc(
7376                         dm->ddev,
7377                         &aconnector->base,
7378                         &amdgpu_dm_connector_funcs,
7379                         connector_type,
7380                         &i2c->base);
7381
7382         if (res) {
7383                 DRM_ERROR("connector_init failed\n");
7384                 aconnector->connector_id = -1;
7385                 goto out_free;
7386         }
7387
7388         drm_connector_helper_add(
7389                         &aconnector->base,
7390                         &amdgpu_dm_connector_helper_funcs);
7391
7392         amdgpu_dm_connector_init_helper(
7393                 dm,
7394                 aconnector,
7395                 connector_type,
7396                 link,
7397                 link_index);
7398
7399         drm_connector_attach_encoder(
7400                 &aconnector->base, &aencoder->base);
7401
7402         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7403                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7404                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7405
7406 out_free:
7407         if (res) {
7408                 kfree(i2c);
7409                 aconnector->i2c = NULL;
7410         }
7411         return res;
7412 }
7413
7414 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7415 {
7416         switch (adev->mode_info.num_crtc) {
7417         case 1:
7418                 return 0x1;
7419         case 2:
7420                 return 0x3;
7421         case 3:
7422                 return 0x7;
7423         case 4:
7424                 return 0xf;
7425         case 5:
7426                 return 0x1f;
7427         case 6:
7428         default:
7429                 return 0x3f;
7430         }
7431 }
7432
7433 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7434                                   struct amdgpu_encoder *aencoder,
7435                                   uint32_t link_index)
7436 {
7437         struct amdgpu_device *adev = drm_to_adev(dev);
7438
7439         int res = drm_encoder_init(dev,
7440                                    &aencoder->base,
7441                                    &amdgpu_dm_encoder_funcs,
7442                                    DRM_MODE_ENCODER_TMDS,
7443                                    NULL);
7444
7445         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7446
7447         if (!res)
7448                 aencoder->encoder_id = link_index;
7449         else
7450                 aencoder->encoder_id = -1;
7451
7452         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7453
7454         return res;
7455 }
7456
7457 static void manage_dm_interrupts(struct amdgpu_device *adev,
7458                                  struct amdgpu_crtc *acrtc,
7459                                  bool enable)
7460 {
7461         /*
7462          * We have no guarantee that the frontend index maps to the same
7463          * backend index - some even map to more than one.
7464          *
7465          * TODO: Use a different interrupt or check DC itself for the mapping.
7466          */
7467         int irq_type =
7468                 amdgpu_display_crtc_idx_to_irq_type(
7469                         adev,
7470                         acrtc->crtc_id);
7471
7472         if (enable) {
7473                 drm_crtc_vblank_on(&acrtc->base);
7474                 amdgpu_irq_get(
7475                         adev,
7476                         &adev->pageflip_irq,
7477                         irq_type);
7478 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7479                 amdgpu_irq_get(
7480                         adev,
7481                         &adev->vline0_irq,
7482                         irq_type);
7483 #endif
7484         } else {
7485 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7486                 amdgpu_irq_put(
7487                         adev,
7488                         &adev->vline0_irq,
7489                         irq_type);
7490 #endif
7491                 amdgpu_irq_put(
7492                         adev,
7493                         &adev->pageflip_irq,
7494                         irq_type);
7495                 drm_crtc_vblank_off(&acrtc->base);
7496         }
7497 }
7498
7499 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7500                                       struct amdgpu_crtc *acrtc)
7501 {
7502         int irq_type =
7503                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7504
7505         /**
7506          * This reads the current state for the IRQ and force reapplies
7507          * the setting to hardware.
7508          */
7509         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7510 }
7511
7512 static bool
7513 is_scaling_state_different(const struct dm_connector_state *dm_state,
7514                            const struct dm_connector_state *old_dm_state)
7515 {
7516         if (dm_state->scaling != old_dm_state->scaling)
7517                 return true;
7518         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7519                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7520                         return true;
7521         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7522                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7523                         return true;
7524         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7525                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7526                 return true;
7527         return false;
7528 }
7529
7530 #ifdef CONFIG_DRM_AMD_DC_HDCP
7531 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
7532                                             struct drm_crtc_state *old_crtc_state,
7533                                             struct drm_connector_state *new_conn_state,
7534                                             struct drm_connector_state *old_conn_state,
7535                                             const struct drm_connector *connector,
7536                                             struct hdcp_workqueue *hdcp_w)
7537 {
7538         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7539         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7540
7541         pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
7542                 connector->index, connector->status, connector->dpms);
7543         pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
7544                 old_conn_state->content_protection, new_conn_state->content_protection);
7545
7546         if (old_crtc_state)
7547                 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7548                 old_crtc_state->enable,
7549                 old_crtc_state->active,
7550                 old_crtc_state->mode_changed,
7551                 old_crtc_state->active_changed,
7552                 old_crtc_state->connectors_changed);
7553
7554         if (new_crtc_state)
7555                 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7556                 new_crtc_state->enable,
7557                 new_crtc_state->active,
7558                 new_crtc_state->mode_changed,
7559                 new_crtc_state->active_changed,
7560                 new_crtc_state->connectors_changed);
7561
7562         /* hdcp content type change */
7563         if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
7564             new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7565                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7566                 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
7567                 return true;
7568         }
7569
7570         /* CP is being re enabled, ignore this */
7571         if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7572             new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7573                 if (new_crtc_state && new_crtc_state->mode_changed) {
7574                         new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7575                         pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
7576                         return true;
7577                 }
7578                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7579                 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
7580                 return false;
7581         }
7582
7583         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7584          *
7585          * Handles:     UNDESIRED -> ENABLED
7586          */
7587         if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7588             new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7589                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7590
7591         /* Stream removed and re-enabled
7592          *
7593          * Can sometimes overlap with the HPD case,
7594          * thus set update_hdcp to false to avoid
7595          * setting HDCP multiple times.
7596          *
7597          * Handles:     DESIRED -> DESIRED (Special case)
7598          */
7599         if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
7600                 new_conn_state->crtc && new_conn_state->crtc->enabled &&
7601                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7602                 dm_con_state->update_hdcp = false;
7603                 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
7604                         __func__);
7605                 return true;
7606         }
7607
7608         /* Hot-plug, headless s3, dpms
7609          *
7610          * Only start HDCP if the display is connected/enabled.
7611          * update_hdcp flag will be set to false until the next
7612          * HPD comes in.
7613          *
7614          * Handles:     DESIRED -> DESIRED (Special case)
7615          */
7616         if (dm_con_state->update_hdcp &&
7617         new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7618         connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7619                 dm_con_state->update_hdcp = false;
7620                 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
7621                         __func__);
7622                 return true;
7623         }
7624
7625         if (old_conn_state->content_protection == new_conn_state->content_protection) {
7626                 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7627                         if (new_crtc_state && new_crtc_state->mode_changed) {
7628                                 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
7629                                         __func__);
7630                                 return true;
7631                         }
7632                         pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
7633                                 __func__);
7634                         return false;
7635                 }
7636
7637                 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
7638                 return false;
7639         }
7640
7641         if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7642                 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
7643                         __func__);
7644                 return true;
7645         }
7646
7647         pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
7648         return false;
7649 }
7650 #endif
7651
7652 static void remove_stream(struct amdgpu_device *adev,
7653                           struct amdgpu_crtc *acrtc,
7654                           struct dc_stream_state *stream)
7655 {
7656         /* this is the update mode case */
7657
7658         acrtc->otg_inst = -1;
7659         acrtc->enabled = false;
7660 }
7661
7662 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7663 {
7664
7665         assert_spin_locked(&acrtc->base.dev->event_lock);
7666         WARN_ON(acrtc->event);
7667
7668         acrtc->event = acrtc->base.state->event;
7669
7670         /* Set the flip status */
7671         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7672
7673         /* Mark this event as consumed */
7674         acrtc->base.state->event = NULL;
7675
7676         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7677                      acrtc->crtc_id);
7678 }
7679
7680 static void update_freesync_state_on_stream(
7681         struct amdgpu_display_manager *dm,
7682         struct dm_crtc_state *new_crtc_state,
7683         struct dc_stream_state *new_stream,
7684         struct dc_plane_state *surface,
7685         u32 flip_timestamp_in_us)
7686 {
7687         struct mod_vrr_params vrr_params;
7688         struct dc_info_packet vrr_infopacket = {0};
7689         struct amdgpu_device *adev = dm->adev;
7690         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7691         unsigned long flags;
7692         bool pack_sdp_v1_3 = false;
7693         struct amdgpu_dm_connector *aconn;
7694         enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
7695
7696         if (!new_stream)
7697                 return;
7698
7699         /*
7700          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7701          * For now it's sufficient to just guard against these conditions.
7702          */
7703
7704         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7705                 return;
7706
7707         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7708         vrr_params = acrtc->dm_irq_params.vrr_params;
7709
7710         if (surface) {
7711                 mod_freesync_handle_preflip(
7712                         dm->freesync_module,
7713                         surface,
7714                         new_stream,
7715                         flip_timestamp_in_us,
7716                         &vrr_params);
7717
7718                 if (adev->family < AMDGPU_FAMILY_AI &&
7719                     amdgpu_dm_vrr_active(new_crtc_state)) {
7720                         mod_freesync_handle_v_update(dm->freesync_module,
7721                                                      new_stream, &vrr_params);
7722
7723                         /* Need to call this before the frame ends. */
7724                         dc_stream_adjust_vmin_vmax(dm->dc,
7725                                                    new_crtc_state->stream,
7726                                                    &vrr_params.adjust);
7727                 }
7728         }
7729
7730         aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
7731
7732         if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
7733                 pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
7734
7735                 if (aconn->vsdb_info.amd_vsdb_version == 1)
7736                         packet_type = PACKET_TYPE_FS_V1;
7737                 else if (aconn->vsdb_info.amd_vsdb_version == 2)
7738                         packet_type = PACKET_TYPE_FS_V2;
7739                 else if (aconn->vsdb_info.amd_vsdb_version == 3)
7740                         packet_type = PACKET_TYPE_FS_V3;
7741
7742                 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
7743                                         &new_stream->adaptive_sync_infopacket);
7744         }
7745
7746         mod_freesync_build_vrr_infopacket(
7747                 dm->freesync_module,
7748                 new_stream,
7749                 &vrr_params,
7750                 packet_type,
7751                 TRANSFER_FUNC_UNKNOWN,
7752                 &vrr_infopacket,
7753                 pack_sdp_v1_3);
7754
7755         new_crtc_state->freesync_vrr_info_changed |=
7756                 (memcmp(&new_crtc_state->vrr_infopacket,
7757                         &vrr_infopacket,
7758                         sizeof(vrr_infopacket)) != 0);
7759
7760         acrtc->dm_irq_params.vrr_params = vrr_params;
7761         new_crtc_state->vrr_infopacket = vrr_infopacket;
7762
7763         new_stream->vrr_infopacket = vrr_infopacket;
7764         new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
7765
7766         if (new_crtc_state->freesync_vrr_info_changed)
7767                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7768                               new_crtc_state->base.crtc->base.id,
7769                               (int)new_crtc_state->base.vrr_enabled,
7770                               (int)vrr_params.state);
7771
7772         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7773 }
7774
7775 static void update_stream_irq_parameters(
7776         struct amdgpu_display_manager *dm,
7777         struct dm_crtc_state *new_crtc_state)
7778 {
7779         struct dc_stream_state *new_stream = new_crtc_state->stream;
7780         struct mod_vrr_params vrr_params;
7781         struct mod_freesync_config config = new_crtc_state->freesync_config;
7782         struct amdgpu_device *adev = dm->adev;
7783         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7784         unsigned long flags;
7785
7786         if (!new_stream)
7787                 return;
7788
7789         /*
7790          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7791          * For now it's sufficient to just guard against these conditions.
7792          */
7793         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7794                 return;
7795
7796         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7797         vrr_params = acrtc->dm_irq_params.vrr_params;
7798
7799         if (new_crtc_state->vrr_supported &&
7800             config.min_refresh_in_uhz &&
7801             config.max_refresh_in_uhz) {
7802                 /*
7803                  * if freesync compatible mode was set, config.state will be set
7804                  * in atomic check
7805                  */
7806                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7807                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7808                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7809                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7810                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7811                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7812                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7813                 } else {
7814                         config.state = new_crtc_state->base.vrr_enabled ?
7815                                                      VRR_STATE_ACTIVE_VARIABLE :
7816                                                      VRR_STATE_INACTIVE;
7817                 }
7818         } else {
7819                 config.state = VRR_STATE_UNSUPPORTED;
7820         }
7821
7822         mod_freesync_build_vrr_params(dm->freesync_module,
7823                                       new_stream,
7824                                       &config, &vrr_params);
7825
7826         new_crtc_state->freesync_config = config;
7827         /* Copy state for access from DM IRQ handler */
7828         acrtc->dm_irq_params.freesync_config = config;
7829         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7830         acrtc->dm_irq_params.vrr_params = vrr_params;
7831         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7832 }
7833
7834 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7835                                             struct dm_crtc_state *new_state)
7836 {
7837         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7838         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7839
7840         if (!old_vrr_active && new_vrr_active) {
7841                 /* Transition VRR inactive -> active:
7842                  * While VRR is active, we must not disable vblank irq, as a
7843                  * reenable after disable would compute bogus vblank/pflip
7844                  * timestamps if it likely happened inside display front-porch.
7845                  *
7846                  * We also need vupdate irq for the actual core vblank handling
7847                  * at end of vblank.
7848                  */
7849                 WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
7850                 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
7851                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7852                                  __func__, new_state->base.crtc->base.id);
7853         } else if (old_vrr_active && !new_vrr_active) {
7854                 /* Transition VRR active -> inactive:
7855                  * Allow vblank irq disable again for fixed refresh rate.
7856                  */
7857                 WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
7858                 drm_crtc_vblank_put(new_state->base.crtc);
7859                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7860                                  __func__, new_state->base.crtc->base.id);
7861         }
7862 }
7863
7864 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7865 {
7866         struct drm_plane *plane;
7867         struct drm_plane_state *old_plane_state;
7868         int i;
7869
7870         /*
7871          * TODO: Make this per-stream so we don't issue redundant updates for
7872          * commits with multiple streams.
7873          */
7874         for_each_old_plane_in_state(state, plane, old_plane_state, i)
7875                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7876                         handle_cursor_update(plane, old_plane_state);
7877 }
7878
7879 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7880                                     struct dc_state *dc_state,
7881                                     struct drm_device *dev,
7882                                     struct amdgpu_display_manager *dm,
7883                                     struct drm_crtc *pcrtc,
7884                                     bool wait_for_vblank)
7885 {
7886         u32 i;
7887         u64 timestamp_ns = ktime_get_ns();
7888         struct drm_plane *plane;
7889         struct drm_plane_state *old_plane_state, *new_plane_state;
7890         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7891         struct drm_crtc_state *new_pcrtc_state =
7892                         drm_atomic_get_new_crtc_state(state, pcrtc);
7893         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7894         struct dm_crtc_state *dm_old_crtc_state =
7895                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7896         int planes_count = 0, vpos, hpos;
7897         unsigned long flags;
7898         u32 target_vblank, last_flip_vblank;
7899         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7900         bool cursor_update = false;
7901         bool pflip_present = false;
7902         bool dirty_rects_changed = false;
7903         struct {
7904                 struct dc_surface_update surface_updates[MAX_SURFACES];
7905                 struct dc_plane_info plane_infos[MAX_SURFACES];
7906                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7907                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7908                 struct dc_stream_update stream_update;
7909         } *bundle;
7910
7911         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7912
7913         if (!bundle) {
7914                 dm_error("Failed to allocate update bundle\n");
7915                 goto cleanup;
7916         }
7917
7918         /*
7919          * Disable the cursor first if we're disabling all the planes.
7920          * It'll remain on the screen after the planes are re-enabled
7921          * if we don't.
7922          */
7923         if (acrtc_state->active_planes == 0)
7924                 amdgpu_dm_commit_cursors(state);
7925
7926         /* update planes when needed */
7927         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7928                 struct drm_crtc *crtc = new_plane_state->crtc;
7929                 struct drm_crtc_state *new_crtc_state;
7930                 struct drm_framebuffer *fb = new_plane_state->fb;
7931                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7932                 bool plane_needs_flip;
7933                 struct dc_plane_state *dc_plane;
7934                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7935
7936                 /* Cursor plane is handled after stream updates */
7937                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7938                         if ((fb && crtc == pcrtc) ||
7939                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
7940                                 cursor_update = true;
7941
7942                         continue;
7943                 }
7944
7945                 if (!fb || !crtc || pcrtc != crtc)
7946                         continue;
7947
7948                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7949                 if (!new_crtc_state->active)
7950                         continue;
7951
7952                 dc_plane = dm_new_plane_state->dc_state;
7953
7954                 bundle->surface_updates[planes_count].surface = dc_plane;
7955                 if (new_pcrtc_state->color_mgmt_changed) {
7956                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7957                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7958                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7959                 }
7960
7961                 fill_dc_scaling_info(dm->adev, new_plane_state,
7962                                      &bundle->scaling_infos[planes_count]);
7963
7964                 bundle->surface_updates[planes_count].scaling_info =
7965                         &bundle->scaling_infos[planes_count];
7966
7967                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7968
7969                 pflip_present = pflip_present || plane_needs_flip;
7970
7971                 if (!plane_needs_flip) {
7972                         planes_count += 1;
7973                         continue;
7974                 }
7975
7976                 fill_dc_plane_info_and_addr(
7977                         dm->adev, new_plane_state,
7978                         afb->tiling_flags,
7979                         &bundle->plane_infos[planes_count],
7980                         &bundle->flip_addrs[planes_count].address,
7981                         afb->tmz_surface, false);
7982
7983                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
7984                                  new_plane_state->plane->index,
7985                                  bundle->plane_infos[planes_count].dcc.enable);
7986
7987                 bundle->surface_updates[planes_count].plane_info =
7988                         &bundle->plane_infos[planes_count];
7989
7990                 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
7991                         fill_dc_dirty_rects(plane, old_plane_state,
7992                                             new_plane_state, new_crtc_state,
7993                                             &bundle->flip_addrs[planes_count],
7994                                             &dirty_rects_changed);
7995
7996                         /*
7997                          * If the dirty regions changed, PSR-SU need to be disabled temporarily
7998                          * and enabled it again after dirty regions are stable to avoid video glitch.
7999                          * PSR-SU will be enabled in vblank_control_worker() if user pause the video
8000                          * during the PSR-SU was disabled.
8001                          */
8002                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8003                             acrtc_attach->dm_irq_params.allow_psr_entry &&
8004 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8005                             !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8006 #endif
8007                             dirty_rects_changed) {
8008                                 mutex_lock(&dm->dc_lock);
8009                                 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
8010                                 timestamp_ns;
8011                                 if (acrtc_state->stream->link->psr_settings.psr_allow_active)
8012                                         amdgpu_dm_psr_disable(acrtc_state->stream);
8013                                 mutex_unlock(&dm->dc_lock);
8014                         }
8015                 }
8016
8017                 /*
8018                  * Only allow immediate flips for fast updates that don't
8019                  * change FB pitch, DCC state, rotation or mirroing.
8020                  */
8021                 bundle->flip_addrs[planes_count].flip_immediate =
8022                         crtc->state->async_flip &&
8023                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8024
8025                 timestamp_ns = ktime_get_ns();
8026                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8027                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8028                 bundle->surface_updates[planes_count].surface = dc_plane;
8029
8030                 if (!bundle->surface_updates[planes_count].surface) {
8031                         DRM_ERROR("No surface for CRTC: id=%d\n",
8032                                         acrtc_attach->crtc_id);
8033                         continue;
8034                 }
8035
8036                 if (plane == pcrtc->primary)
8037                         update_freesync_state_on_stream(
8038                                 dm,
8039                                 acrtc_state,
8040                                 acrtc_state->stream,
8041                                 dc_plane,
8042                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8043
8044                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
8045                                  __func__,
8046                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8047                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8048
8049                 planes_count += 1;
8050
8051         }
8052
8053         if (pflip_present) {
8054                 if (!vrr_active) {
8055                         /* Use old throttling in non-vrr fixed refresh rate mode
8056                          * to keep flip scheduling based on target vblank counts
8057                          * working in a backwards compatible way, e.g., for
8058                          * clients using the GLX_OML_sync_control extension or
8059                          * DRI3/Present extension with defined target_msc.
8060                          */
8061                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8062                 }
8063                 else {
8064                         /* For variable refresh rate mode only:
8065                          * Get vblank of last completed flip to avoid > 1 vrr
8066                          * flips per video frame by use of throttling, but allow
8067                          * flip programming anywhere in the possibly large
8068                          * variable vrr vblank interval for fine-grained flip
8069                          * timing control and more opportunity to avoid stutter
8070                          * on late submission of flips.
8071                          */
8072                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8073                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8074                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8075                 }
8076
8077                 target_vblank = last_flip_vblank + wait_for_vblank;
8078
8079                 /*
8080                  * Wait until we're out of the vertical blank period before the one
8081                  * targeted by the flip
8082                  */
8083                 while ((acrtc_attach->enabled &&
8084                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8085                                                             0, &vpos, &hpos, NULL,
8086                                                             NULL, &pcrtc->hwmode)
8087                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8088                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8089                         (int)(target_vblank -
8090                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8091                         usleep_range(1000, 1100);
8092                 }
8093
8094                 /**
8095                  * Prepare the flip event for the pageflip interrupt to handle.
8096                  *
8097                  * This only works in the case where we've already turned on the
8098                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8099                  * from 0 -> n planes we have to skip a hardware generated event
8100                  * and rely on sending it from software.
8101                  */
8102                 if (acrtc_attach->base.state->event &&
8103                     acrtc_state->active_planes > 0) {
8104                         drm_crtc_vblank_get(pcrtc);
8105
8106                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8107
8108                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8109                         prepare_flip_isr(acrtc_attach);
8110
8111                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8112                 }
8113
8114                 if (acrtc_state->stream) {
8115                         if (acrtc_state->freesync_vrr_info_changed)
8116                                 bundle->stream_update.vrr_infopacket =
8117                                         &acrtc_state->stream->vrr_infopacket;
8118                 }
8119         } else if (cursor_update && acrtc_state->active_planes > 0 &&
8120                    acrtc_attach->base.state->event) {
8121                 drm_crtc_vblank_get(pcrtc);
8122
8123                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8124
8125                 acrtc_attach->event = acrtc_attach->base.state->event;
8126                 acrtc_attach->base.state->event = NULL;
8127
8128                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8129         }
8130
8131         /* Update the planes if changed or disable if we don't have any. */
8132         if ((planes_count || acrtc_state->active_planes == 0) &&
8133                 acrtc_state->stream) {
8134                 /*
8135                  * If PSR or idle optimizations are enabled then flush out
8136                  * any pending work before hardware programming.
8137                  */
8138                 if (dm->vblank_control_workqueue)
8139                         flush_workqueue(dm->vblank_control_workqueue);
8140
8141                 bundle->stream_update.stream = acrtc_state->stream;
8142                 if (new_pcrtc_state->mode_changed) {
8143                         bundle->stream_update.src = acrtc_state->stream->src;
8144                         bundle->stream_update.dst = acrtc_state->stream->dst;
8145                 }
8146
8147                 if (new_pcrtc_state->color_mgmt_changed) {
8148                         /*
8149                          * TODO: This isn't fully correct since we've actually
8150                          * already modified the stream in place.
8151                          */
8152                         bundle->stream_update.gamut_remap =
8153                                 &acrtc_state->stream->gamut_remap_matrix;
8154                         bundle->stream_update.output_csc_transform =
8155                                 &acrtc_state->stream->csc_color_matrix;
8156                         bundle->stream_update.out_transfer_func =
8157                                 acrtc_state->stream->out_transfer_func;
8158                 }
8159
8160                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8161                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8162                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8163
8164                 /*
8165                  * If FreeSync state on the stream has changed then we need to
8166                  * re-adjust the min/max bounds now that DC doesn't handle this
8167                  * as part of commit.
8168                  */
8169                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8170                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8171                         dc_stream_adjust_vmin_vmax(
8172                                 dm->dc, acrtc_state->stream,
8173                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8174                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8175                 }
8176                 mutex_lock(&dm->dc_lock);
8177                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8178                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8179                         amdgpu_dm_psr_disable(acrtc_state->stream);
8180
8181                 dc_commit_updates_for_stream(dm->dc,
8182                                                      bundle->surface_updates,
8183                                                      planes_count,
8184                                                      acrtc_state->stream,
8185                                                      &bundle->stream_update,
8186                                                      dc_state);
8187
8188                 /**
8189                  * Enable or disable the interrupts on the backend.
8190                  *
8191                  * Most pipes are put into power gating when unused.
8192                  *
8193                  * When power gating is enabled on a pipe we lose the
8194                  * interrupt enablement state when power gating is disabled.
8195                  *
8196                  * So we need to update the IRQ control state in hardware
8197                  * whenever the pipe turns on (since it could be previously
8198                  * power gated) or off (since some pipes can't be power gated
8199                  * on some ASICs).
8200                  */
8201                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8202                         dm_update_pflip_irq_state(drm_to_adev(dev),
8203                                                   acrtc_attach);
8204
8205                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8206                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8207                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8208                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8209
8210                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
8211                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8212                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8213                         struct amdgpu_dm_connector *aconn =
8214                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8215
8216                         if (aconn->psr_skip_count > 0)
8217                                 aconn->psr_skip_count--;
8218
8219                         /* Allow PSR when skip count is 0. */
8220                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8221
8222                         /*
8223                          * If sink supports PSR SU, there is no need to rely on
8224                          * a vblank event disable request to enable PSR. PSR SU
8225                          * can be enabled immediately once OS demonstrates an
8226                          * adequate number of fast atomic commits to notify KMD
8227                          * of update events. See `vblank_control_worker()`.
8228                          */
8229                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8230                             acrtc_attach->dm_irq_params.allow_psr_entry &&
8231 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8232                             !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8233 #endif
8234                             !acrtc_state->stream->link->psr_settings.psr_allow_active &&
8235                             (timestamp_ns -
8236                             acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
8237                             500000000)
8238                                 amdgpu_dm_psr_enable(acrtc_state->stream);
8239                 } else {
8240                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
8241                 }
8242
8243                 mutex_unlock(&dm->dc_lock);
8244         }
8245
8246         /*
8247          * Update cursor state *after* programming all the planes.
8248          * This avoids redundant programming in the case where we're going
8249          * to be disabling a single plane - those pipes are being disabled.
8250          */
8251         if (acrtc_state->active_planes)
8252                 amdgpu_dm_commit_cursors(state);
8253
8254 cleanup:
8255         kfree(bundle);
8256 }
8257
8258 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8259                                    struct drm_atomic_state *state)
8260 {
8261         struct amdgpu_device *adev = drm_to_adev(dev);
8262         struct amdgpu_dm_connector *aconnector;
8263         struct drm_connector *connector;
8264         struct drm_connector_state *old_con_state, *new_con_state;
8265         struct drm_crtc_state *new_crtc_state;
8266         struct dm_crtc_state *new_dm_crtc_state;
8267         const struct dc_stream_status *status;
8268         int i, inst;
8269
8270         /* Notify device removals. */
8271         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8272                 if (old_con_state->crtc != new_con_state->crtc) {
8273                         /* CRTC changes require notification. */
8274                         goto notify;
8275                 }
8276
8277                 if (!new_con_state->crtc)
8278                         continue;
8279
8280                 new_crtc_state = drm_atomic_get_new_crtc_state(
8281                         state, new_con_state->crtc);
8282
8283                 if (!new_crtc_state)
8284                         continue;
8285
8286                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8287                         continue;
8288
8289         notify:
8290                 aconnector = to_amdgpu_dm_connector(connector);
8291
8292                 mutex_lock(&adev->dm.audio_lock);
8293                 inst = aconnector->audio_inst;
8294                 aconnector->audio_inst = -1;
8295                 mutex_unlock(&adev->dm.audio_lock);
8296
8297                 amdgpu_dm_audio_eld_notify(adev, inst);
8298         }
8299
8300         /* Notify audio device additions. */
8301         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8302                 if (!new_con_state->crtc)
8303                         continue;
8304
8305                 new_crtc_state = drm_atomic_get_new_crtc_state(
8306                         state, new_con_state->crtc);
8307
8308                 if (!new_crtc_state)
8309                         continue;
8310
8311                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8312                         continue;
8313
8314                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8315                 if (!new_dm_crtc_state->stream)
8316                         continue;
8317
8318                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8319                 if (!status)
8320                         continue;
8321
8322                 aconnector = to_amdgpu_dm_connector(connector);
8323
8324                 mutex_lock(&adev->dm.audio_lock);
8325                 inst = status->audio_inst;
8326                 aconnector->audio_inst = inst;
8327                 mutex_unlock(&adev->dm.audio_lock);
8328
8329                 amdgpu_dm_audio_eld_notify(adev, inst);
8330         }
8331 }
8332
8333 /*
8334  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8335  * @crtc_state: the DRM CRTC state
8336  * @stream_state: the DC stream state.
8337  *
8338  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8339  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8340  */
8341 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8342                                                 struct dc_stream_state *stream_state)
8343 {
8344         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8345 }
8346
8347 /**
8348  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8349  * @state: The atomic state to commit
8350  *
8351  * This will tell DC to commit the constructed DC state from atomic_check,
8352  * programming the hardware. Any failures here implies a hardware failure, since
8353  * atomic check should have filtered anything non-kosher.
8354  */
8355 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8356 {
8357         struct drm_device *dev = state->dev;
8358         struct amdgpu_device *adev = drm_to_adev(dev);
8359         struct amdgpu_display_manager *dm = &adev->dm;
8360         struct dm_atomic_state *dm_state;
8361         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8362         u32 i, j;
8363         struct drm_crtc *crtc;
8364         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8365         unsigned long flags;
8366         bool wait_for_vblank = true;
8367         struct drm_connector *connector;
8368         struct drm_connector_state *old_con_state, *new_con_state;
8369         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8370         int crtc_disable_count = 0;
8371         bool mode_set_reset_required = false;
8372         int r;
8373
8374         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8375
8376         r = drm_atomic_helper_wait_for_fences(dev, state, false);
8377         if (unlikely(r))
8378                 DRM_ERROR("Waiting for fences timed out!");
8379
8380         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8381         drm_dp_mst_atomic_wait_for_dependencies(state);
8382
8383         dm_state = dm_atomic_get_new_state(state);
8384         if (dm_state && dm_state->context) {
8385                 dc_state = dm_state->context;
8386         } else {
8387                 /* No state changes, retain current state. */
8388                 dc_state_temp = dc_create_state(dm->dc);
8389                 ASSERT(dc_state_temp);
8390                 dc_state = dc_state_temp;
8391                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8392         }
8393
8394         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8395                                        new_crtc_state, i) {
8396                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8397
8398                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8399
8400                 if (old_crtc_state->active &&
8401                     (!new_crtc_state->active ||
8402                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8403                         manage_dm_interrupts(adev, acrtc, false);
8404                         dc_stream_release(dm_old_crtc_state->stream);
8405                 }
8406         }
8407
8408         drm_atomic_helper_calc_timestamping_constants(state);
8409
8410         /* update changed items */
8411         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8412                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8413
8414                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8415                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8416
8417                 drm_dbg_state(state->dev,
8418                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8419                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8420                         "connectors_changed:%d\n",
8421                         acrtc->crtc_id,
8422                         new_crtc_state->enable,
8423                         new_crtc_state->active,
8424                         new_crtc_state->planes_changed,
8425                         new_crtc_state->mode_changed,
8426                         new_crtc_state->active_changed,
8427                         new_crtc_state->connectors_changed);
8428
8429                 /* Disable cursor if disabling crtc */
8430                 if (old_crtc_state->active && !new_crtc_state->active) {
8431                         struct dc_cursor_position position;
8432
8433                         memset(&position, 0, sizeof(position));
8434                         mutex_lock(&dm->dc_lock);
8435                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8436                         mutex_unlock(&dm->dc_lock);
8437                 }
8438
8439                 /* Copy all transient state flags into dc state */
8440                 if (dm_new_crtc_state->stream) {
8441                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8442                                                             dm_new_crtc_state->stream);
8443                 }
8444
8445                 /* handles headless hotplug case, updating new_state and
8446                  * aconnector as needed
8447                  */
8448
8449                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8450
8451                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8452
8453                         if (!dm_new_crtc_state->stream) {
8454                                 /*
8455                                  * this could happen because of issues with
8456                                  * userspace notifications delivery.
8457                                  * In this case userspace tries to set mode on
8458                                  * display which is disconnected in fact.
8459                                  * dc_sink is NULL in this case on aconnector.
8460                                  * We expect reset mode will come soon.
8461                                  *
8462                                  * This can also happen when unplug is done
8463                                  * during resume sequence ended
8464                                  *
8465                                  * In this case, we want to pretend we still
8466                                  * have a sink to keep the pipe running so that
8467                                  * hw state is consistent with the sw state
8468                                  */
8469                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8470                                                 __func__, acrtc->base.base.id);
8471                                 continue;
8472                         }
8473
8474                         if (dm_old_crtc_state->stream)
8475                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8476
8477                         pm_runtime_get_noresume(dev->dev);
8478
8479                         acrtc->enabled = true;
8480                         acrtc->hw_mode = new_crtc_state->mode;
8481                         crtc->hwmode = new_crtc_state->mode;
8482                         mode_set_reset_required = true;
8483                 } else if (modereset_required(new_crtc_state)) {
8484                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8485                         /* i.e. reset mode */
8486                         if (dm_old_crtc_state->stream)
8487                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8488
8489                         mode_set_reset_required = true;
8490                 }
8491         } /* for_each_crtc_in_state() */
8492
8493         if (dc_state) {
8494                 /* if there mode set or reset, disable eDP PSR */
8495                 if (mode_set_reset_required) {
8496                         if (dm->vblank_control_workqueue)
8497                                 flush_workqueue(dm->vblank_control_workqueue);
8498
8499                         amdgpu_dm_psr_disable_all(dm);
8500                 }
8501
8502                 dm_enable_per_frame_crtc_master_sync(dc_state);
8503                 mutex_lock(&dm->dc_lock);
8504                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8505
8506                 /* Allow idle optimization when vblank count is 0 for display off */
8507                 if (dm->active_vblank_irq_count == 0)
8508                         dc_allow_idle_optimizations(dm->dc, true);
8509                 mutex_unlock(&dm->dc_lock);
8510         }
8511
8512         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8513                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8514
8515                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8516
8517                 if (dm_new_crtc_state->stream != NULL) {
8518                         const struct dc_stream_status *status =
8519                                         dc_stream_get_status(dm_new_crtc_state->stream);
8520
8521                         if (!status)
8522                                 status = dc_stream_get_status_from_state(dc_state,
8523                                                                          dm_new_crtc_state->stream);
8524                         if (!status)
8525                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8526                         else
8527                                 acrtc->otg_inst = status->primary_otg_inst;
8528                 }
8529         }
8530 #ifdef CONFIG_DRM_AMD_DC_HDCP
8531         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8532                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8533                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8534                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8535
8536                 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
8537
8538                 if (!connector)
8539                         continue;
8540
8541                 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
8542                         connector->index, connector->status, connector->dpms);
8543                 pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
8544                         old_con_state->content_protection, new_con_state->content_protection);
8545
8546                 if (aconnector->dc_sink) {
8547                         if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
8548                                 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
8549                                 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
8550                                 aconnector->dc_sink->edid_caps.display_name);
8551                         }
8552                 }
8553
8554                 new_crtc_state = NULL;
8555                 old_crtc_state = NULL;
8556
8557                 if (acrtc) {
8558                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8559                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8560                 }
8561
8562                 if (old_crtc_state)
8563                         pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8564                         old_crtc_state->enable,
8565                         old_crtc_state->active,
8566                         old_crtc_state->mode_changed,
8567                         old_crtc_state->active_changed,
8568                         old_crtc_state->connectors_changed);
8569
8570                 if (new_crtc_state)
8571                         pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8572                         new_crtc_state->enable,
8573                         new_crtc_state->active,
8574                         new_crtc_state->mode_changed,
8575                         new_crtc_state->active_changed,
8576                         new_crtc_state->connectors_changed);
8577         }
8578
8579         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8580                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8581                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8582                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8583
8584                 new_crtc_state = NULL;
8585                 old_crtc_state = NULL;
8586
8587                 if (acrtc) {
8588                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8589                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8590                 }
8591
8592                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8593
8594                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8595                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8596                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8597                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8598                         dm_new_con_state->update_hdcp = true;
8599                         continue;
8600                 }
8601
8602                 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
8603                                                                                         old_con_state, connector, adev->dm.hdcp_workqueue)) {
8604                         /* when display is unplugged from mst hub, connctor will
8605                          * be destroyed within dm_dp_mst_connector_destroy. connector
8606                          * hdcp perperties, like type, undesired, desired, enabled,
8607                          * will be lost. So, save hdcp properties into hdcp_work within
8608                          * amdgpu_dm_atomic_commit_tail. if the same display is
8609                          * plugged back with same display index, its hdcp properties
8610                          * will be retrieved from hdcp_work within dm_dp_mst_get_modes
8611                          */
8612
8613                         bool enable_encryption = false;
8614
8615                         if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
8616                                 enable_encryption = true;
8617
8618                         if (aconnector->dc_link && aconnector->dc_sink &&
8619                                 aconnector->dc_link->type == dc_connection_mst_branch) {
8620                                 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
8621                                 struct hdcp_workqueue *hdcp_w =
8622                                         &hdcp_work[aconnector->dc_link->link_index];
8623
8624                                 hdcp_w->hdcp_content_type[connector->index] =
8625                                         new_con_state->hdcp_content_type;
8626                                 hdcp_w->content_protection[connector->index] =
8627                                         new_con_state->content_protection;
8628                         }
8629
8630                         if (new_crtc_state && new_crtc_state->mode_changed &&
8631                                 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
8632                                 enable_encryption = true;
8633
8634                         DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
8635
8636                         hdcp_update_display(
8637                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8638                                 new_con_state->hdcp_content_type, enable_encryption);
8639                 }
8640         }
8641 #endif
8642
8643         /* Handle connector state changes */
8644         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8645                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8646                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8647                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8648                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8649                 struct dc_stream_update stream_update;
8650                 struct dc_info_packet hdr_packet;
8651                 struct dc_stream_status *status = NULL;
8652                 bool abm_changed, hdr_changed, scaling_changed;
8653
8654                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8655                 memset(&stream_update, 0, sizeof(stream_update));
8656
8657                 if (acrtc) {
8658                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8659                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8660                 }
8661
8662                 /* Skip any modesets/resets */
8663                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8664                         continue;
8665
8666                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8667                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8668
8669                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8670                                                              dm_old_con_state);
8671
8672                 abm_changed = dm_new_crtc_state->abm_level !=
8673                               dm_old_crtc_state->abm_level;
8674
8675                 hdr_changed =
8676                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8677
8678                 if (!scaling_changed && !abm_changed && !hdr_changed)
8679                         continue;
8680
8681                 stream_update.stream = dm_new_crtc_state->stream;
8682                 if (scaling_changed) {
8683                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8684                                         dm_new_con_state, dm_new_crtc_state->stream);
8685
8686                         stream_update.src = dm_new_crtc_state->stream->src;
8687                         stream_update.dst = dm_new_crtc_state->stream->dst;
8688                 }
8689
8690                 if (abm_changed) {
8691                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8692
8693                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8694                 }
8695
8696                 if (hdr_changed) {
8697                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8698                         stream_update.hdr_static_metadata = &hdr_packet;
8699                 }
8700
8701                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8702
8703                 if (WARN_ON(!status))
8704                         continue;
8705
8706                 WARN_ON(!status->plane_count);
8707
8708                 /*
8709                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8710                  * Here we create an empty update on each plane.
8711                  * To fix this, DC should permit updating only stream properties.
8712                  */
8713                 for (j = 0; j < status->plane_count; j++)
8714                         dummy_updates[j].surface = status->plane_states[0];
8715
8716
8717                 mutex_lock(&dm->dc_lock);
8718                 dc_commit_updates_for_stream(dm->dc,
8719                                                      dummy_updates,
8720                                                      status->plane_count,
8721                                                      dm_new_crtc_state->stream,
8722                                                      &stream_update,
8723                                                      dc_state);
8724                 mutex_unlock(&dm->dc_lock);
8725         }
8726
8727         /**
8728          * Enable interrupts for CRTCs that are newly enabled or went through
8729          * a modeset. It was intentionally deferred until after the front end
8730          * state was modified to wait until the OTG was on and so the IRQ
8731          * handlers didn't access stale or invalid state.
8732          */
8733         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8734                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8735 #ifdef CONFIG_DEBUG_FS
8736                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8737 #endif
8738                 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8739                 if (old_crtc_state->active && !new_crtc_state->active)
8740                         crtc_disable_count++;
8741
8742                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8743                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8744
8745                 /* For freesync config update on crtc state and params for irq */
8746                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8747
8748 #ifdef CONFIG_DEBUG_FS
8749                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8750                 cur_crc_src = acrtc->dm_irq_params.crc_src;
8751                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8752 #endif
8753
8754                 if (new_crtc_state->active &&
8755                     (!old_crtc_state->active ||
8756                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8757                         dc_stream_retain(dm_new_crtc_state->stream);
8758                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8759                         manage_dm_interrupts(adev, acrtc, true);
8760                 }
8761                 /* Handle vrr on->off / off->on transitions */
8762                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
8763
8764 #ifdef CONFIG_DEBUG_FS
8765                 if (new_crtc_state->active &&
8766                     (!old_crtc_state->active ||
8767                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8768                         /**
8769                          * Frontend may have changed so reapply the CRC capture
8770                          * settings for the stream.
8771                          */
8772                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8773 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8774                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
8775                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8776                                         acrtc->dm_irq_params.window_param.update_win = true;
8777
8778                                         /**
8779                                          * It takes 2 frames for HW to stably generate CRC when
8780                                          * resuming from suspend, so we set skip_frame_cnt 2.
8781                                          */
8782                                         acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
8783                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8784                                 }
8785 #endif
8786                                 if (amdgpu_dm_crtc_configure_crc_source(
8787                                         crtc, dm_new_crtc_state, cur_crc_src))
8788                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
8789                         }
8790                 }
8791 #endif
8792         }
8793
8794         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8795                 if (new_crtc_state->async_flip)
8796                         wait_for_vblank = false;
8797
8798         /* update planes when needed per crtc*/
8799         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8800                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8801
8802                 if (dm_new_crtc_state->stream)
8803                         amdgpu_dm_commit_planes(state, dc_state, dev,
8804                                                 dm, crtc, wait_for_vblank);
8805         }
8806
8807         /* Update audio instances for each connector. */
8808         amdgpu_dm_commit_audio(dev, state);
8809
8810         /* restore the backlight level */
8811         for (i = 0; i < dm->num_of_edps; i++) {
8812                 if (dm->backlight_dev[i] &&
8813                     (dm->actual_brightness[i] != dm->brightness[i]))
8814                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8815         }
8816
8817         /*
8818          * send vblank event on all events not handled in flip and
8819          * mark consumed event for drm_atomic_helper_commit_hw_done
8820          */
8821         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8822         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8823
8824                 if (new_crtc_state->event)
8825                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8826
8827                 new_crtc_state->event = NULL;
8828         }
8829         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8830
8831         /* Signal HW programming completion */
8832         drm_atomic_helper_commit_hw_done(state);
8833
8834         if (wait_for_vblank)
8835                 drm_atomic_helper_wait_for_flip_done(dev, state);
8836
8837         drm_atomic_helper_cleanup_planes(dev, state);
8838
8839         /* return the stolen vga memory back to VRAM */
8840         if (!adev->mman.keep_stolen_vga_memory)
8841                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8842         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8843
8844         /*
8845          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8846          * so we can put the GPU into runtime suspend if we're not driving any
8847          * displays anymore
8848          */
8849         for (i = 0; i < crtc_disable_count; i++)
8850                 pm_runtime_put_autosuspend(dev->dev);
8851         pm_runtime_mark_last_busy(dev->dev);
8852
8853         if (dc_state_temp)
8854                 dc_release_state(dc_state_temp);
8855 }
8856
8857 static int dm_force_atomic_commit(struct drm_connector *connector)
8858 {
8859         int ret = 0;
8860         struct drm_device *ddev = connector->dev;
8861         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8862         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8863         struct drm_plane *plane = disconnected_acrtc->base.primary;
8864         struct drm_connector_state *conn_state;
8865         struct drm_crtc_state *crtc_state;
8866         struct drm_plane_state *plane_state;
8867
8868         if (!state)
8869                 return -ENOMEM;
8870
8871         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8872
8873         /* Construct an atomic state to restore previous display setting */
8874
8875         /*
8876          * Attach connectors to drm_atomic_state
8877          */
8878         conn_state = drm_atomic_get_connector_state(state, connector);
8879
8880         ret = PTR_ERR_OR_ZERO(conn_state);
8881         if (ret)
8882                 goto out;
8883
8884         /* Attach crtc to drm_atomic_state*/
8885         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8886
8887         ret = PTR_ERR_OR_ZERO(crtc_state);
8888         if (ret)
8889                 goto out;
8890
8891         /* force a restore */
8892         crtc_state->mode_changed = true;
8893
8894         /* Attach plane to drm_atomic_state */
8895         plane_state = drm_atomic_get_plane_state(state, plane);
8896
8897         ret = PTR_ERR_OR_ZERO(plane_state);
8898         if (ret)
8899                 goto out;
8900
8901         /* Call commit internally with the state we just constructed */
8902         ret = drm_atomic_commit(state);
8903
8904 out:
8905         drm_atomic_state_put(state);
8906         if (ret)
8907                 DRM_ERROR("Restoring old state failed with %i\n", ret);
8908
8909         return ret;
8910 }
8911
8912 /*
8913  * This function handles all cases when set mode does not come upon hotplug.
8914  * This includes when a display is unplugged then plugged back into the
8915  * same port and when running without usermode desktop manager supprot
8916  */
8917 void dm_restore_drm_connector_state(struct drm_device *dev,
8918                                     struct drm_connector *connector)
8919 {
8920         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8921         struct amdgpu_crtc *disconnected_acrtc;
8922         struct dm_crtc_state *acrtc_state;
8923
8924         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8925                 return;
8926
8927         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8928         if (!disconnected_acrtc)
8929                 return;
8930
8931         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8932         if (!acrtc_state->stream)
8933                 return;
8934
8935         /*
8936          * If the previous sink is not released and different from the current,
8937          * we deduce we are in a state where we can not rely on usermode call
8938          * to turn on the display, so we do it here
8939          */
8940         if (acrtc_state->stream->sink != aconnector->dc_sink)
8941                 dm_force_atomic_commit(&aconnector->base);
8942 }
8943
8944 /*
8945  * Grabs all modesetting locks to serialize against any blocking commits,
8946  * Waits for completion of all non blocking commits.
8947  */
8948 static int do_aquire_global_lock(struct drm_device *dev,
8949                                  struct drm_atomic_state *state)
8950 {
8951         struct drm_crtc *crtc;
8952         struct drm_crtc_commit *commit;
8953         long ret;
8954
8955         /*
8956          * Adding all modeset locks to aquire_ctx will
8957          * ensure that when the framework release it the
8958          * extra locks we are locking here will get released to
8959          */
8960         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8961         if (ret)
8962                 return ret;
8963
8964         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8965                 spin_lock(&crtc->commit_lock);
8966                 commit = list_first_entry_or_null(&crtc->commit_list,
8967                                 struct drm_crtc_commit, commit_entry);
8968                 if (commit)
8969                         drm_crtc_commit_get(commit);
8970                 spin_unlock(&crtc->commit_lock);
8971
8972                 if (!commit)
8973                         continue;
8974
8975                 /*
8976                  * Make sure all pending HW programming completed and
8977                  * page flips done
8978                  */
8979                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8980
8981                 if (ret > 0)
8982                         ret = wait_for_completion_interruptible_timeout(
8983                                         &commit->flip_done, 10*HZ);
8984
8985                 if (ret == 0)
8986                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8987                                   "timed out\n", crtc->base.id, crtc->name);
8988
8989                 drm_crtc_commit_put(commit);
8990         }
8991
8992         return ret < 0 ? ret : 0;
8993 }
8994
8995 static void get_freesync_config_for_crtc(
8996         struct dm_crtc_state *new_crtc_state,
8997         struct dm_connector_state *new_con_state)
8998 {
8999         struct mod_freesync_config config = {0};
9000         struct amdgpu_dm_connector *aconnector =
9001                         to_amdgpu_dm_connector(new_con_state->base.connector);
9002         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9003         int vrefresh = drm_mode_vrefresh(mode);
9004         bool fs_vid_mode = false;
9005
9006         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9007                                         vrefresh >= aconnector->min_vfreq &&
9008                                         vrefresh <= aconnector->max_vfreq;
9009
9010         if (new_crtc_state->vrr_supported) {
9011                 new_crtc_state->stream->ignore_msa_timing_param = true;
9012                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9013
9014                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9015                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9016                 config.vsif_supported = true;
9017                 config.btr = true;
9018
9019                 if (fs_vid_mode) {
9020                         config.state = VRR_STATE_ACTIVE_FIXED;
9021                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9022                         goto out;
9023                 } else if (new_crtc_state->base.vrr_enabled) {
9024                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9025                 } else {
9026                         config.state = VRR_STATE_INACTIVE;
9027                 }
9028         }
9029 out:
9030         new_crtc_state->freesync_config = config;
9031 }
9032
9033 static void reset_freesync_config_for_crtc(
9034         struct dm_crtc_state *new_crtc_state)
9035 {
9036         new_crtc_state->vrr_supported = false;
9037
9038         memset(&new_crtc_state->vrr_infopacket, 0,
9039                sizeof(new_crtc_state->vrr_infopacket));
9040 }
9041
9042 static bool
9043 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9044                                  struct drm_crtc_state *new_crtc_state)
9045 {
9046         const struct drm_display_mode *old_mode, *new_mode;
9047
9048         if (!old_crtc_state || !new_crtc_state)
9049                 return false;
9050
9051         old_mode = &old_crtc_state->mode;
9052         new_mode = &new_crtc_state->mode;
9053
9054         if (old_mode->clock       == new_mode->clock &&
9055             old_mode->hdisplay    == new_mode->hdisplay &&
9056             old_mode->vdisplay    == new_mode->vdisplay &&
9057             old_mode->htotal      == new_mode->htotal &&
9058             old_mode->vtotal      != new_mode->vtotal &&
9059             old_mode->hsync_start == new_mode->hsync_start &&
9060             old_mode->vsync_start != new_mode->vsync_start &&
9061             old_mode->hsync_end   == new_mode->hsync_end &&
9062             old_mode->vsync_end   != new_mode->vsync_end &&
9063             old_mode->hskew       == new_mode->hskew &&
9064             old_mode->vscan       == new_mode->vscan &&
9065             (old_mode->vsync_end - old_mode->vsync_start) ==
9066             (new_mode->vsync_end - new_mode->vsync_start))
9067                 return true;
9068
9069         return false;
9070 }
9071
9072 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9073         u64 num, den, res;
9074         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9075
9076         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9077
9078         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9079         den = (unsigned long long)new_crtc_state->mode.htotal *
9080               (unsigned long long)new_crtc_state->mode.vtotal;
9081
9082         res = div_u64(num, den);
9083         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9084 }
9085
9086 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9087                          struct drm_atomic_state *state,
9088                          struct drm_crtc *crtc,
9089                          struct drm_crtc_state *old_crtc_state,
9090                          struct drm_crtc_state *new_crtc_state,
9091                          bool enable,
9092                          bool *lock_and_validation_needed)
9093 {
9094         struct dm_atomic_state *dm_state = NULL;
9095         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9096         struct dc_stream_state *new_stream;
9097         int ret = 0;
9098
9099         /*
9100          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9101          * update changed items
9102          */
9103         struct amdgpu_crtc *acrtc = NULL;
9104         struct amdgpu_dm_connector *aconnector = NULL;
9105         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9106         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9107
9108         new_stream = NULL;
9109
9110         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9111         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9112         acrtc = to_amdgpu_crtc(crtc);
9113         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9114
9115         /* TODO This hack should go away */
9116         if (aconnector && enable) {
9117                 /* Make sure fake sink is created in plug-in scenario */
9118                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9119                                                             &aconnector->base);
9120                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9121                                                             &aconnector->base);
9122
9123                 if (IS_ERR(drm_new_conn_state)) {
9124                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9125                         goto fail;
9126                 }
9127
9128                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9129                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9130
9131                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9132                         goto skip_modeset;
9133
9134                 new_stream = create_validate_stream_for_sink(aconnector,
9135                                                              &new_crtc_state->mode,
9136                                                              dm_new_conn_state,
9137                                                              dm_old_crtc_state->stream);
9138
9139                 /*
9140                  * we can have no stream on ACTION_SET if a display
9141                  * was disconnected during S3, in this case it is not an
9142                  * error, the OS will be updated after detection, and
9143                  * will do the right thing on next atomic commit
9144                  */
9145
9146                 if (!new_stream) {
9147                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9148                                         __func__, acrtc->base.base.id);
9149                         ret = -ENOMEM;
9150                         goto fail;
9151                 }
9152
9153                 /*
9154                  * TODO: Check VSDB bits to decide whether this should
9155                  * be enabled or not.
9156                  */
9157                 new_stream->triggered_crtc_reset.enabled =
9158                         dm->force_timing_sync;
9159
9160                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9161
9162                 ret = fill_hdr_info_packet(drm_new_conn_state,
9163                                            &new_stream->hdr_static_metadata);
9164                 if (ret)
9165                         goto fail;
9166
9167                 /*
9168                  * If we already removed the old stream from the context
9169                  * (and set the new stream to NULL) then we can't reuse
9170                  * the old stream even if the stream and scaling are unchanged.
9171                  * We'll hit the BUG_ON and black screen.
9172                  *
9173                  * TODO: Refactor this function to allow this check to work
9174                  * in all conditions.
9175                  */
9176                 if (amdgpu_freesync_vid_mode &&
9177                     dm_new_crtc_state->stream &&
9178                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9179                         goto skip_modeset;
9180
9181                 if (dm_new_crtc_state->stream &&
9182                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9183                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9184                         new_crtc_state->mode_changed = false;
9185                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9186                                          new_crtc_state->mode_changed);
9187                 }
9188         }
9189
9190         /* mode_changed flag may get updated above, need to check again */
9191         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9192                 goto skip_modeset;
9193
9194         drm_dbg_state(state->dev,
9195                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9196                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9197                 "connectors_changed:%d\n",
9198                 acrtc->crtc_id,
9199                 new_crtc_state->enable,
9200                 new_crtc_state->active,
9201                 new_crtc_state->planes_changed,
9202                 new_crtc_state->mode_changed,
9203                 new_crtc_state->active_changed,
9204                 new_crtc_state->connectors_changed);
9205
9206         /* Remove stream for any changed/disabled CRTC */
9207         if (!enable) {
9208
9209                 if (!dm_old_crtc_state->stream)
9210                         goto skip_modeset;
9211
9212                 /* Unset freesync video if it was active before */
9213                 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
9214                         dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
9215                         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
9216                 }
9217
9218                 /* Now check if we should set freesync video mode */
9219                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9220                     is_timing_unchanged_for_freesync(new_crtc_state,
9221                                                      old_crtc_state)) {
9222                         new_crtc_state->mode_changed = false;
9223                         DRM_DEBUG_DRIVER(
9224                                 "Mode change not required for front porch change, "
9225                                 "setting mode_changed to %d",
9226                                 new_crtc_state->mode_changed);
9227
9228                         set_freesync_fixed_config(dm_new_crtc_state);
9229
9230                         goto skip_modeset;
9231                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9232                            is_freesync_video_mode(&new_crtc_state->mode,
9233                                                   aconnector)) {
9234                         struct drm_display_mode *high_mode;
9235
9236                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
9237                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9238                                 set_freesync_fixed_config(dm_new_crtc_state);
9239                         }
9240                 }
9241
9242                 ret = dm_atomic_get_state(state, &dm_state);
9243                 if (ret)
9244                         goto fail;
9245
9246                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9247                                 crtc->base.id);
9248
9249                 /* i.e. reset mode */
9250                 if (dc_remove_stream_from_ctx(
9251                                 dm->dc,
9252                                 dm_state->context,
9253                                 dm_old_crtc_state->stream) != DC_OK) {
9254                         ret = -EINVAL;
9255                         goto fail;
9256                 }
9257
9258                 dc_stream_release(dm_old_crtc_state->stream);
9259                 dm_new_crtc_state->stream = NULL;
9260
9261                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9262
9263                 *lock_and_validation_needed = true;
9264
9265         } else {/* Add stream for any updated/enabled CRTC */
9266                 /*
9267                  * Quick fix to prevent NULL pointer on new_stream when
9268                  * added MST connectors not found in existing crtc_state in the chained mode
9269                  * TODO: need to dig out the root cause of that
9270                  */
9271                 if (!aconnector)
9272                         goto skip_modeset;
9273
9274                 if (modereset_required(new_crtc_state))
9275                         goto skip_modeset;
9276
9277                 if (modeset_required(new_crtc_state, new_stream,
9278                                      dm_old_crtc_state->stream)) {
9279
9280                         WARN_ON(dm_new_crtc_state->stream);
9281
9282                         ret = dm_atomic_get_state(state, &dm_state);
9283                         if (ret)
9284                                 goto fail;
9285
9286                         dm_new_crtc_state->stream = new_stream;
9287
9288                         dc_stream_retain(new_stream);
9289
9290                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9291                                          crtc->base.id);
9292
9293                         if (dc_add_stream_to_ctx(
9294                                         dm->dc,
9295                                         dm_state->context,
9296                                         dm_new_crtc_state->stream) != DC_OK) {
9297                                 ret = -EINVAL;
9298                                 goto fail;
9299                         }
9300
9301                         *lock_and_validation_needed = true;
9302                 }
9303         }
9304
9305 skip_modeset:
9306         /* Release extra reference */
9307         if (new_stream)
9308                  dc_stream_release(new_stream);
9309
9310         /*
9311          * We want to do dc stream updates that do not require a
9312          * full modeset below.
9313          */
9314         if (!(enable && aconnector && new_crtc_state->active))
9315                 return 0;
9316         /*
9317          * Given above conditions, the dc state cannot be NULL because:
9318          * 1. We're in the process of enabling CRTCs (just been added
9319          *    to the dc context, or already is on the context)
9320          * 2. Has a valid connector attached, and
9321          * 3. Is currently active and enabled.
9322          * => The dc stream state currently exists.
9323          */
9324         BUG_ON(dm_new_crtc_state->stream == NULL);
9325
9326         /* Scaling or underscan settings */
9327         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9328                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
9329                 update_stream_scaling_settings(
9330                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9331
9332         /* ABM settings */
9333         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9334
9335         /*
9336          * Color management settings. We also update color properties
9337          * when a modeset is needed, to ensure it gets reprogrammed.
9338          */
9339         if (dm_new_crtc_state->base.color_mgmt_changed ||
9340             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9341                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9342                 if (ret)
9343                         goto fail;
9344         }
9345
9346         /* Update Freesync settings. */
9347         get_freesync_config_for_crtc(dm_new_crtc_state,
9348                                      dm_new_conn_state);
9349
9350         return ret;
9351
9352 fail:
9353         if (new_stream)
9354                 dc_stream_release(new_stream);
9355         return ret;
9356 }
9357
9358 static bool should_reset_plane(struct drm_atomic_state *state,
9359                                struct drm_plane *plane,
9360                                struct drm_plane_state *old_plane_state,
9361                                struct drm_plane_state *new_plane_state)
9362 {
9363         struct drm_plane *other;
9364         struct drm_plane_state *old_other_state, *new_other_state;
9365         struct drm_crtc_state *new_crtc_state;
9366         int i;
9367
9368         /*
9369          * TODO: Remove this hack once the checks below are sufficient
9370          * enough to determine when we need to reset all the planes on
9371          * the stream.
9372          */
9373         if (state->allow_modeset)
9374                 return true;
9375
9376         /* Exit early if we know that we're adding or removing the plane. */
9377         if (old_plane_state->crtc != new_plane_state->crtc)
9378                 return true;
9379
9380         /* old crtc == new_crtc == NULL, plane not in context. */
9381         if (!new_plane_state->crtc)
9382                 return false;
9383
9384         new_crtc_state =
9385                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9386
9387         if (!new_crtc_state)
9388                 return true;
9389
9390         /* CRTC Degamma changes currently require us to recreate planes. */
9391         if (new_crtc_state->color_mgmt_changed)
9392                 return true;
9393
9394         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9395                 return true;
9396
9397         /*
9398          * If there are any new primary or overlay planes being added or
9399          * removed then the z-order can potentially change. To ensure
9400          * correct z-order and pipe acquisition the current DC architecture
9401          * requires us to remove and recreate all existing planes.
9402          *
9403          * TODO: Come up with a more elegant solution for this.
9404          */
9405         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9406                 struct amdgpu_framebuffer *old_afb, *new_afb;
9407                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9408                         continue;
9409
9410                 if (old_other_state->crtc != new_plane_state->crtc &&
9411                     new_other_state->crtc != new_plane_state->crtc)
9412                         continue;
9413
9414                 if (old_other_state->crtc != new_other_state->crtc)
9415                         return true;
9416
9417                 /* Src/dst size and scaling updates. */
9418                 if (old_other_state->src_w != new_other_state->src_w ||
9419                     old_other_state->src_h != new_other_state->src_h ||
9420                     old_other_state->crtc_w != new_other_state->crtc_w ||
9421                     old_other_state->crtc_h != new_other_state->crtc_h)
9422                         return true;
9423
9424                 /* Rotation / mirroring updates. */
9425                 if (old_other_state->rotation != new_other_state->rotation)
9426                         return true;
9427
9428                 /* Blending updates. */
9429                 if (old_other_state->pixel_blend_mode !=
9430                     new_other_state->pixel_blend_mode)
9431                         return true;
9432
9433                 /* Alpha updates. */
9434                 if (old_other_state->alpha != new_other_state->alpha)
9435                         return true;
9436
9437                 /* Colorspace changes. */
9438                 if (old_other_state->color_range != new_other_state->color_range ||
9439                     old_other_state->color_encoding != new_other_state->color_encoding)
9440                         return true;
9441
9442                 /* Framebuffer checks fall at the end. */
9443                 if (!old_other_state->fb || !new_other_state->fb)
9444                         continue;
9445
9446                 /* Pixel format changes can require bandwidth updates. */
9447                 if (old_other_state->fb->format != new_other_state->fb->format)
9448                         return true;
9449
9450                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9451                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9452
9453                 /* Tiling and DCC changes also require bandwidth updates. */
9454                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9455                     old_afb->base.modifier != new_afb->base.modifier)
9456                         return true;
9457         }
9458
9459         return false;
9460 }
9461
9462 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9463                               struct drm_plane_state *new_plane_state,
9464                               struct drm_framebuffer *fb)
9465 {
9466         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9467         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9468         unsigned int pitch;
9469         bool linear;
9470
9471         if (fb->width > new_acrtc->max_cursor_width ||
9472             fb->height > new_acrtc->max_cursor_height) {
9473                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9474                                  new_plane_state->fb->width,
9475                                  new_plane_state->fb->height);
9476                 return -EINVAL;
9477         }
9478         if (new_plane_state->src_w != fb->width << 16 ||
9479             new_plane_state->src_h != fb->height << 16) {
9480                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9481                 return -EINVAL;
9482         }
9483
9484         /* Pitch in pixels */
9485         pitch = fb->pitches[0] / fb->format->cpp[0];
9486
9487         if (fb->width != pitch) {
9488                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9489                                  fb->width, pitch);
9490                 return -EINVAL;
9491         }
9492
9493         switch (pitch) {
9494         case 64:
9495         case 128:
9496         case 256:
9497                 /* FB pitch is supported by cursor plane */
9498                 break;
9499         default:
9500                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9501                 return -EINVAL;
9502         }
9503
9504         /* Core DRM takes care of checking FB modifiers, so we only need to
9505          * check tiling flags when the FB doesn't have a modifier. */
9506         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9507                 if (adev->family < AMDGPU_FAMILY_AI) {
9508                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9509                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9510                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9511                 } else {
9512                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9513                 }
9514                 if (!linear) {
9515                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9516                         return -EINVAL;
9517                 }
9518         }
9519
9520         return 0;
9521 }
9522
9523 static int dm_update_plane_state(struct dc *dc,
9524                                  struct drm_atomic_state *state,
9525                                  struct drm_plane *plane,
9526                                  struct drm_plane_state *old_plane_state,
9527                                  struct drm_plane_state *new_plane_state,
9528                                  bool enable,
9529                                  bool *lock_and_validation_needed,
9530                                  bool *is_top_most_overlay)
9531 {
9532
9533         struct dm_atomic_state *dm_state = NULL;
9534         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9535         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9536         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9537         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9538         struct amdgpu_crtc *new_acrtc;
9539         bool needs_reset;
9540         int ret = 0;
9541
9542
9543         new_plane_crtc = new_plane_state->crtc;
9544         old_plane_crtc = old_plane_state->crtc;
9545         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9546         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9547
9548         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9549                 if (!enable || !new_plane_crtc ||
9550                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9551                         return 0;
9552
9553                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9554
9555                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9556                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9557                         return -EINVAL;
9558                 }
9559
9560                 if (new_plane_state->fb) {
9561                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9562                                                  new_plane_state->fb);
9563                         if (ret)
9564                                 return ret;
9565                 }
9566
9567                 return 0;
9568         }
9569
9570         needs_reset = should_reset_plane(state, plane, old_plane_state,
9571                                          new_plane_state);
9572
9573         /* Remove any changed/removed planes */
9574         if (!enable) {
9575                 if (!needs_reset)
9576                         return 0;
9577
9578                 if (!old_plane_crtc)
9579                         return 0;
9580
9581                 old_crtc_state = drm_atomic_get_old_crtc_state(
9582                                 state, old_plane_crtc);
9583                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9584
9585                 if (!dm_old_crtc_state->stream)
9586                         return 0;
9587
9588                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9589                                 plane->base.id, old_plane_crtc->base.id);
9590
9591                 ret = dm_atomic_get_state(state, &dm_state);
9592                 if (ret)
9593                         return ret;
9594
9595                 if (!dc_remove_plane_from_context(
9596                                 dc,
9597                                 dm_old_crtc_state->stream,
9598                                 dm_old_plane_state->dc_state,
9599                                 dm_state->context)) {
9600
9601                         return -EINVAL;
9602                 }
9603
9604
9605                 dc_plane_state_release(dm_old_plane_state->dc_state);
9606                 dm_new_plane_state->dc_state = NULL;
9607
9608                 *lock_and_validation_needed = true;
9609
9610         } else { /* Add new planes */
9611                 struct dc_plane_state *dc_new_plane_state;
9612
9613                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9614                         return 0;
9615
9616                 if (!new_plane_crtc)
9617                         return 0;
9618
9619                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9620                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9621
9622                 if (!dm_new_crtc_state->stream)
9623                         return 0;
9624
9625                 if (!needs_reset)
9626                         return 0;
9627
9628                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9629                 if (ret)
9630                         return ret;
9631
9632                 WARN_ON(dm_new_plane_state->dc_state);
9633
9634                 dc_new_plane_state = dc_create_plane_state(dc);
9635                 if (!dc_new_plane_state)
9636                         return -ENOMEM;
9637
9638                 /* Block top most plane from being a video plane */
9639                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9640                         if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
9641                                 return -EINVAL;
9642                         else
9643                                 *is_top_most_overlay = false;
9644                 }
9645
9646                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9647                                  plane->base.id, new_plane_crtc->base.id);
9648
9649                 ret = fill_dc_plane_attributes(
9650                         drm_to_adev(new_plane_crtc->dev),
9651                         dc_new_plane_state,
9652                         new_plane_state,
9653                         new_crtc_state);
9654                 if (ret) {
9655                         dc_plane_state_release(dc_new_plane_state);
9656                         return ret;
9657                 }
9658
9659                 ret = dm_atomic_get_state(state, &dm_state);
9660                 if (ret) {
9661                         dc_plane_state_release(dc_new_plane_state);
9662                         return ret;
9663                 }
9664
9665                 /*
9666                  * Any atomic check errors that occur after this will
9667                  * not need a release. The plane state will be attached
9668                  * to the stream, and therefore part of the atomic
9669                  * state. It'll be released when the atomic state is
9670                  * cleaned.
9671                  */
9672                 if (!dc_add_plane_to_context(
9673                                 dc,
9674                                 dm_new_crtc_state->stream,
9675                                 dc_new_plane_state,
9676                                 dm_state->context)) {
9677
9678                         dc_plane_state_release(dc_new_plane_state);
9679                         return -EINVAL;
9680                 }
9681
9682                 dm_new_plane_state->dc_state = dc_new_plane_state;
9683
9684                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9685
9686                 /* Tell DC to do a full surface update every time there
9687                  * is a plane change. Inefficient, but works for now.
9688                  */
9689                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9690
9691                 *lock_and_validation_needed = true;
9692         }
9693
9694
9695         return ret;
9696 }
9697
9698 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9699                                        int *src_w, int *src_h)
9700 {
9701         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9702         case DRM_MODE_ROTATE_90:
9703         case DRM_MODE_ROTATE_270:
9704                 *src_w = plane_state->src_h >> 16;
9705                 *src_h = plane_state->src_w >> 16;
9706                 break;
9707         case DRM_MODE_ROTATE_0:
9708         case DRM_MODE_ROTATE_180:
9709         default:
9710                 *src_w = plane_state->src_w >> 16;
9711                 *src_h = plane_state->src_h >> 16;
9712                 break;
9713         }
9714 }
9715
9716 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9717                                 struct drm_crtc *crtc,
9718                                 struct drm_crtc_state *new_crtc_state)
9719 {
9720         struct drm_plane *cursor = crtc->cursor, *underlying;
9721         struct drm_plane_state *new_cursor_state, *new_underlying_state;
9722         int i;
9723         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9724         int cursor_src_w, cursor_src_h;
9725         int underlying_src_w, underlying_src_h;
9726
9727         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9728          * cursor per pipe but it's going to inherit the scaling and
9729          * positioning from the underlying pipe. Check the cursor plane's
9730          * blending properties match the underlying planes'. */
9731
9732         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9733         if (!new_cursor_state || !new_cursor_state->fb) {
9734                 return 0;
9735         }
9736
9737         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9738         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9739         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9740
9741         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9742                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
9743                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9744                         continue;
9745
9746                 /* Ignore disabled planes */
9747                 if (!new_underlying_state->fb)
9748                         continue;
9749
9750                 dm_get_oriented_plane_size(new_underlying_state,
9751                                            &underlying_src_w, &underlying_src_h);
9752                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9753                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9754
9755                 if (cursor_scale_w != underlying_scale_w ||
9756                     cursor_scale_h != underlying_scale_h) {
9757                         drm_dbg_atomic(crtc->dev,
9758                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9759                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9760                         return -EINVAL;
9761                 }
9762
9763                 /* If this plane covers the whole CRTC, no need to check planes underneath */
9764                 if (new_underlying_state->crtc_x <= 0 &&
9765                     new_underlying_state->crtc_y <= 0 &&
9766                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9767                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9768                         break;
9769         }
9770
9771         return 0;
9772 }
9773
9774 #if defined(CONFIG_DRM_AMD_DC_DCN)
9775 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9776 {
9777         struct drm_connector *connector;
9778         struct drm_connector_state *conn_state, *old_conn_state;
9779         struct amdgpu_dm_connector *aconnector = NULL;
9780         int i;
9781         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9782                 if (!conn_state->crtc)
9783                         conn_state = old_conn_state;
9784
9785                 if (conn_state->crtc != crtc)
9786                         continue;
9787
9788                 aconnector = to_amdgpu_dm_connector(connector);
9789                 if (!aconnector->mst_output_port || !aconnector->mst_root)
9790                         aconnector = NULL;
9791                 else
9792                         break;
9793         }
9794
9795         if (!aconnector)
9796                 return 0;
9797
9798         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
9799 }
9800 #endif
9801
9802 /**
9803  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9804  *
9805  * @dev: The DRM device
9806  * @state: The atomic state to commit
9807  *
9808  * Validate that the given atomic state is programmable by DC into hardware.
9809  * This involves constructing a &struct dc_state reflecting the new hardware
9810  * state we wish to commit, then querying DC to see if it is programmable. It's
9811  * important not to modify the existing DC state. Otherwise, atomic_check
9812  * may unexpectedly commit hardware changes.
9813  *
9814  * When validating the DC state, it's important that the right locks are
9815  * acquired. For full updates case which removes/adds/updates streams on one
9816  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9817  * that any such full update commit will wait for completion of any outstanding
9818  * flip using DRMs synchronization events.
9819  *
9820  * Note that DM adds the affected connectors for all CRTCs in state, when that
9821  * might not seem necessary. This is because DC stream creation requires the
9822  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9823  * be possible but non-trivial - a possible TODO item.
9824  *
9825  * Return: -Error code if validation failed.
9826  */
9827 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9828                                   struct drm_atomic_state *state)
9829 {
9830         struct amdgpu_device *adev = drm_to_adev(dev);
9831         struct dm_atomic_state *dm_state = NULL;
9832         struct dc *dc = adev->dm.dc;
9833         struct drm_connector *connector;
9834         struct drm_connector_state *old_con_state, *new_con_state;
9835         struct drm_crtc *crtc;
9836         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9837         struct drm_plane *plane;
9838         struct drm_plane_state *old_plane_state, *new_plane_state;
9839         enum dc_status status;
9840         int ret, i;
9841         bool lock_and_validation_needed = false;
9842         bool is_top_most_overlay = true;
9843         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9844 #if defined(CONFIG_DRM_AMD_DC_DCN)
9845         struct drm_dp_mst_topology_mgr *mgr;
9846         struct drm_dp_mst_topology_state *mst_state;
9847         struct dsc_mst_fairness_vars vars[MAX_PIPES];
9848 #endif
9849
9850         trace_amdgpu_dm_atomic_check_begin(state);
9851
9852         ret = drm_atomic_helper_check_modeset(dev, state);
9853         if (ret) {
9854                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
9855                 goto fail;
9856         }
9857
9858         /* Check connector changes */
9859         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9860                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9861                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9862
9863                 /* Skip connectors that are disabled or part of modeset already. */
9864                 if (!new_con_state->crtc)
9865                         continue;
9866
9867                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9868                 if (IS_ERR(new_crtc_state)) {
9869                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
9870                         ret = PTR_ERR(new_crtc_state);
9871                         goto fail;
9872                 }
9873
9874                 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
9875                     dm_old_con_state->scaling != dm_new_con_state->scaling)
9876                         new_crtc_state->connectors_changed = true;
9877         }
9878
9879 #if defined(CONFIG_DRM_AMD_DC_DCN)
9880         if (dc_resource_is_dsc_encoding_supported(dc)) {
9881                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9882                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9883                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9884                                 if (ret) {
9885                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
9886                                         goto fail;
9887                                 }
9888                         }
9889                 }
9890         }
9891 #endif
9892         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9893                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9894
9895                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9896                     !new_crtc_state->color_mgmt_changed &&
9897                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9898                         dm_old_crtc_state->dsc_force_changed == false)
9899                         continue;
9900
9901                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
9902                 if (ret) {
9903                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
9904                         goto fail;
9905                 }
9906
9907                 if (!new_crtc_state->enable)
9908                         continue;
9909
9910                 ret = drm_atomic_add_affected_connectors(state, crtc);
9911                 if (ret) {
9912                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
9913                         goto fail;
9914                 }
9915
9916                 ret = drm_atomic_add_affected_planes(state, crtc);
9917                 if (ret) {
9918                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
9919                         goto fail;
9920                 }
9921
9922                 if (dm_old_crtc_state->dsc_force_changed)
9923                         new_crtc_state->mode_changed = true;
9924         }
9925
9926         /*
9927          * Add all primary and overlay planes on the CRTC to the state
9928          * whenever a plane is enabled to maintain correct z-ordering
9929          * and to enable fast surface updates.
9930          */
9931         drm_for_each_crtc(crtc, dev) {
9932                 bool modified = false;
9933
9934                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9935                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9936                                 continue;
9937
9938                         if (new_plane_state->crtc == crtc ||
9939                             old_plane_state->crtc == crtc) {
9940                                 modified = true;
9941                                 break;
9942                         }
9943                 }
9944
9945                 if (!modified)
9946                         continue;
9947
9948                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9949                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9950                                 continue;
9951
9952                         new_plane_state =
9953                                 drm_atomic_get_plane_state(state, plane);
9954
9955                         if (IS_ERR(new_plane_state)) {
9956                                 ret = PTR_ERR(new_plane_state);
9957                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
9958                                 goto fail;
9959                         }
9960                 }
9961         }
9962
9963         /*
9964          * DC consults the zpos (layer_index in DC terminology) to determine the
9965          * hw plane on which to enable the hw cursor (see
9966          * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
9967          * atomic state, so call drm helper to normalize zpos.
9968          */
9969         ret = drm_atomic_normalize_zpos(dev, state);
9970         if (ret) {
9971                 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
9972                 goto fail;
9973         }
9974
9975         /* Remove exiting planes if they are modified */
9976         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9977                 ret = dm_update_plane_state(dc, state, plane,
9978                                             old_plane_state,
9979                                             new_plane_state,
9980                                             false,
9981                                             &lock_and_validation_needed,
9982                                             &is_top_most_overlay);
9983                 if (ret) {
9984                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9985                         goto fail;
9986                 }
9987         }
9988
9989         /* Disable all crtcs which require disable */
9990         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9991                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9992                                            old_crtc_state,
9993                                            new_crtc_state,
9994                                            false,
9995                                            &lock_and_validation_needed);
9996                 if (ret) {
9997                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
9998                         goto fail;
9999                 }
10000         }
10001
10002         /* Enable all crtcs which require enable */
10003         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10004                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10005                                            old_crtc_state,
10006                                            new_crtc_state,
10007                                            true,
10008                                            &lock_and_validation_needed);
10009                 if (ret) {
10010                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
10011                         goto fail;
10012                 }
10013         }
10014
10015         /* Add new/modified planes */
10016         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10017                 ret = dm_update_plane_state(dc, state, plane,
10018                                             old_plane_state,
10019                                             new_plane_state,
10020                                             true,
10021                                             &lock_and_validation_needed,
10022                                             &is_top_most_overlay);
10023                 if (ret) {
10024                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10025                         goto fail;
10026                 }
10027         }
10028
10029 #if defined(CONFIG_DRM_AMD_DC_DCN)
10030         if (dc_resource_is_dsc_encoding_supported(dc)) {
10031                 ret = pre_validate_dsc(state, &dm_state, vars);
10032                 if (ret != 0)
10033                         goto fail;
10034         }
10035 #endif
10036
10037         /* Run this here since we want to validate the streams we created */
10038         ret = drm_atomic_helper_check_planes(dev, state);
10039         if (ret) {
10040                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
10041                 goto fail;
10042         }
10043
10044         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10045                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10046                 if (dm_new_crtc_state->mpo_requested)
10047                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
10048         }
10049
10050         /* Check cursor planes scaling */
10051         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10052                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10053                 if (ret) {
10054                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
10055                         goto fail;
10056                 }
10057         }
10058
10059         if (state->legacy_cursor_update) {
10060                 /*
10061                  * This is a fast cursor update coming from the plane update
10062                  * helper, check if it can be done asynchronously for better
10063                  * performance.
10064                  */
10065                 state->async_update =
10066                         !drm_atomic_helper_async_check(dev, state);
10067
10068                 /*
10069                  * Skip the remaining global validation if this is an async
10070                  * update. Cursor updates can be done without affecting
10071                  * state or bandwidth calcs and this avoids the performance
10072                  * penalty of locking the private state object and
10073                  * allocating a new dc_state.
10074                  */
10075                 if (state->async_update)
10076                         return 0;
10077         }
10078
10079         /* Check scaling and underscan changes*/
10080         /* TODO Removed scaling changes validation due to inability to commit
10081          * new stream into context w\o causing full reset. Need to
10082          * decide how to handle.
10083          */
10084         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10085                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10086                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10087                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10088
10089                 /* Skip any modesets/resets */
10090                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10091                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10092                         continue;
10093
10094                 /* Skip any thing not scale or underscan changes */
10095                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10096                         continue;
10097
10098                 lock_and_validation_needed = true;
10099         }
10100
10101 #if defined(CONFIG_DRM_AMD_DC_DCN)
10102         /* set the slot info for each mst_state based on the link encoding format */
10103         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10104                 struct amdgpu_dm_connector *aconnector;
10105                 struct drm_connector *connector;
10106                 struct drm_connector_list_iter iter;
10107                 u8 link_coding_cap;
10108
10109                 drm_connector_list_iter_begin(dev, &iter);
10110                 drm_for_each_connector_iter(connector, &iter) {
10111                         if (connector->index == mst_state->mgr->conn_base_id) {
10112                                 aconnector = to_amdgpu_dm_connector(connector);
10113                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10114                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10115
10116                                 break;
10117                         }
10118                 }
10119                 drm_connector_list_iter_end(&iter);
10120         }
10121 #endif
10122
10123         /**
10124          * Streams and planes are reset when there are changes that affect
10125          * bandwidth. Anything that affects bandwidth needs to go through
10126          * DC global validation to ensure that the configuration can be applied
10127          * to hardware.
10128          *
10129          * We have to currently stall out here in atomic_check for outstanding
10130          * commits to finish in this case because our IRQ handlers reference
10131          * DRM state directly - we can end up disabling interrupts too early
10132          * if we don't.
10133          *
10134          * TODO: Remove this stall and drop DM state private objects.
10135          */
10136         if (lock_and_validation_needed) {
10137                 ret = dm_atomic_get_state(state, &dm_state);
10138                 if (ret) {
10139                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
10140                         goto fail;
10141                 }
10142
10143                 ret = do_aquire_global_lock(dev, state);
10144                 if (ret) {
10145                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
10146                         goto fail;
10147                 }
10148
10149 #if defined(CONFIG_DRM_AMD_DC_DCN)
10150                 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
10151                 if (ret) {
10152                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
10153                         goto fail;
10154                 }
10155
10156                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10157                 if (ret) {
10158                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
10159                         goto fail;
10160                 }
10161 #endif
10162
10163                 /*
10164                  * Perform validation of MST topology in the state:
10165                  * We need to perform MST atomic check before calling
10166                  * dc_validate_global_state(), or there is a chance
10167                  * to get stuck in an infinite loop and hang eventually.
10168                  */
10169                 ret = drm_dp_mst_atomic_check(state);
10170                 if (ret) {
10171                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
10172                         goto fail;
10173                 }
10174                 status = dc_validate_global_state(dc, dm_state->context, true);
10175                 if (status != DC_OK) {
10176                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
10177                                        dc_status_to_str(status), status);
10178                         ret = -EINVAL;
10179                         goto fail;
10180                 }
10181         } else {
10182                 /*
10183                  * The commit is a fast update. Fast updates shouldn't change
10184                  * the DC context, affect global validation, and can have their
10185                  * commit work done in parallel with other commits not touching
10186                  * the same resource. If we have a new DC context as part of
10187                  * the DM atomic state from validation we need to free it and
10188                  * retain the existing one instead.
10189                  *
10190                  * Furthermore, since the DM atomic state only contains the DC
10191                  * context and can safely be annulled, we can free the state
10192                  * and clear the associated private object now to free
10193                  * some memory and avoid a possible use-after-free later.
10194                  */
10195
10196                 for (i = 0; i < state->num_private_objs; i++) {
10197                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10198
10199                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10200                                 int j = state->num_private_objs-1;
10201
10202                                 dm_atomic_destroy_state(obj,
10203                                                 state->private_objs[i].state);
10204
10205                                 /* If i is not at the end of the array then the
10206                                  * last element needs to be moved to where i was
10207                                  * before the array can safely be truncated.
10208                                  */
10209                                 if (i != j)
10210                                         state->private_objs[i] =
10211                                                 state->private_objs[j];
10212
10213                                 state->private_objs[j].ptr = NULL;
10214                                 state->private_objs[j].state = NULL;
10215                                 state->private_objs[j].old_state = NULL;
10216                                 state->private_objs[j].new_state = NULL;
10217
10218                                 state->num_private_objs = j;
10219                                 break;
10220                         }
10221                 }
10222         }
10223
10224         /* Store the overall update type for use later in atomic check. */
10225         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10226                 struct dm_crtc_state *dm_new_crtc_state =
10227                         to_dm_crtc_state(new_crtc_state);
10228
10229                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10230                                                          UPDATE_TYPE_FULL :
10231                                                          UPDATE_TYPE_FAST;
10232         }
10233
10234         /* Must be success */
10235         WARN_ON(ret);
10236
10237         trace_amdgpu_dm_atomic_check_finish(state, ret);
10238
10239         return ret;
10240
10241 fail:
10242         if (ret == -EDEADLK)
10243                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10244         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10245                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10246         else
10247                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10248
10249         trace_amdgpu_dm_atomic_check_finish(state, ret);
10250
10251         return ret;
10252 }
10253
10254 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10255                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10256 {
10257         u8 dpcd_data;
10258         bool capable = false;
10259
10260         if (amdgpu_dm_connector->dc_link &&
10261                 dm_helpers_dp_read_dpcd(
10262                                 NULL,
10263                                 amdgpu_dm_connector->dc_link,
10264                                 DP_DOWN_STREAM_PORT_COUNT,
10265                                 &dpcd_data,
10266                                 sizeof(dpcd_data))) {
10267                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10268         }
10269
10270         return capable;
10271 }
10272
10273 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10274                 unsigned int offset,
10275                 unsigned int total_length,
10276                 u8 *data,
10277                 unsigned int length,
10278                 struct amdgpu_hdmi_vsdb_info *vsdb)
10279 {
10280         bool res;
10281         union dmub_rb_cmd cmd;
10282         struct dmub_cmd_send_edid_cea *input;
10283         struct dmub_cmd_edid_cea_output *output;
10284
10285         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10286                 return false;
10287
10288         memset(&cmd, 0, sizeof(cmd));
10289
10290         input = &cmd.edid_cea.data.input;
10291
10292         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10293         cmd.edid_cea.header.sub_type = 0;
10294         cmd.edid_cea.header.payload_bytes =
10295                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10296         input->offset = offset;
10297         input->length = length;
10298         input->cea_total_length = total_length;
10299         memcpy(input->payload, data, length);
10300
10301         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10302         if (!res) {
10303                 DRM_ERROR("EDID CEA parser failed\n");
10304                 return false;
10305         }
10306
10307         output = &cmd.edid_cea.data.output;
10308
10309         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10310                 if (!output->ack.success) {
10311                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
10312                                         output->ack.offset);
10313                 }
10314         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10315                 if (!output->amd_vsdb.vsdb_found)
10316                         return false;
10317
10318                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10319                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10320                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10321                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10322         } else {
10323                 DRM_WARN("Unknown EDID CEA parser results\n");
10324                 return false;
10325         }
10326
10327         return true;
10328 }
10329
10330 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10331                 u8 *edid_ext, int len,
10332                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10333 {
10334         int i;
10335
10336         /* send extension block to DMCU for parsing */
10337         for (i = 0; i < len; i += 8) {
10338                 bool res;
10339                 int offset;
10340
10341                 /* send 8 bytes a time */
10342                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10343                         return false;
10344
10345                 if (i+8 == len) {
10346                         /* EDID block sent completed, expect result */
10347                         int version, min_rate, max_rate;
10348
10349                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10350                         if (res) {
10351                                 /* amd vsdb found */
10352                                 vsdb_info->freesync_supported = 1;
10353                                 vsdb_info->amd_vsdb_version = version;
10354                                 vsdb_info->min_refresh_rate_hz = min_rate;
10355                                 vsdb_info->max_refresh_rate_hz = max_rate;
10356                                 return true;
10357                         }
10358                         /* not amd vsdb */
10359                         return false;
10360                 }
10361
10362                 /* check for ack*/
10363                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10364                 if (!res)
10365                         return false;
10366         }
10367
10368         return false;
10369 }
10370
10371 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10372                 u8 *edid_ext, int len,
10373                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10374 {
10375         int i;
10376
10377         /* send extension block to DMCU for parsing */
10378         for (i = 0; i < len; i += 8) {
10379                 /* send 8 bytes a time */
10380                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10381                         return false;
10382         }
10383
10384         return vsdb_info->freesync_supported;
10385 }
10386
10387 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10388                 u8 *edid_ext, int len,
10389                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10390 {
10391         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10392         bool ret;
10393
10394         mutex_lock(&adev->dm.dc_lock);
10395         if (adev->dm.dmub_srv)
10396                 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10397         else
10398                 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10399         mutex_unlock(&adev->dm.dc_lock);
10400         return ret;
10401 }
10402
10403 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10404                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10405 {
10406         u8 *edid_ext = NULL;
10407         int i;
10408         bool valid_vsdb_found = false;
10409
10410         /*----- drm_find_cea_extension() -----*/
10411         /* No EDID or EDID extensions */
10412         if (edid == NULL || edid->extensions == 0)
10413                 return -ENODEV;
10414
10415         /* Find CEA extension */
10416         for (i = 0; i < edid->extensions; i++) {
10417                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10418                 if (edid_ext[0] == CEA_EXT)
10419                         break;
10420         }
10421
10422         if (i == edid->extensions)
10423                 return -ENODEV;
10424
10425         /*----- cea_db_offsets() -----*/
10426         if (edid_ext[0] != CEA_EXT)
10427                 return -ENODEV;
10428
10429         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10430
10431         return valid_vsdb_found ? i : -ENODEV;
10432 }
10433
10434 /**
10435  * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
10436  *
10437  * @connector: Connector to query.
10438  * @edid: EDID from monitor
10439  *
10440  * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
10441  * track of some of the display information in the internal data struct used by
10442  * amdgpu_dm. This function checks which type of connector we need to set the
10443  * FreeSync parameters.
10444  */
10445 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10446                                     struct edid *edid)
10447 {
10448         int i = 0;
10449         struct detailed_timing *timing;
10450         struct detailed_non_pixel *data;
10451         struct detailed_data_monitor_range *range;
10452         struct amdgpu_dm_connector *amdgpu_dm_connector =
10453                         to_amdgpu_dm_connector(connector);
10454         struct dm_connector_state *dm_con_state = NULL;
10455         struct dc_sink *sink;
10456
10457         struct drm_device *dev = connector->dev;
10458         struct amdgpu_device *adev = drm_to_adev(dev);
10459         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10460         bool freesync_capable = false;
10461         enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
10462
10463         if (!connector->state) {
10464                 DRM_ERROR("%s - Connector has no state", __func__);
10465                 goto update;
10466         }
10467
10468         sink = amdgpu_dm_connector->dc_sink ?
10469                 amdgpu_dm_connector->dc_sink :
10470                 amdgpu_dm_connector->dc_em_sink;
10471
10472         if (!edid || !sink) {
10473                 dm_con_state = to_dm_connector_state(connector->state);
10474
10475                 amdgpu_dm_connector->min_vfreq = 0;
10476                 amdgpu_dm_connector->max_vfreq = 0;
10477                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10478                 connector->display_info.monitor_range.min_vfreq = 0;
10479                 connector->display_info.monitor_range.max_vfreq = 0;
10480                 freesync_capable = false;
10481
10482                 goto update;
10483         }
10484
10485         dm_con_state = to_dm_connector_state(connector->state);
10486
10487         if (!adev->dm.freesync_module)
10488                 goto update;
10489
10490         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10491                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
10492                 bool edid_check_required = false;
10493
10494                 if (edid) {
10495                         edid_check_required = is_dp_capable_without_timing_msa(
10496                                                 adev->dm.dc,
10497                                                 amdgpu_dm_connector);
10498                 }
10499
10500                 if (edid_check_required == true && (edid->version > 1 ||
10501                    (edid->version == 1 && edid->revision > 1))) {
10502                         for (i = 0; i < 4; i++) {
10503
10504                                 timing  = &edid->detailed_timings[i];
10505                                 data    = &timing->data.other_data;
10506                                 range   = &data->data.range;
10507                                 /*
10508                                  * Check if monitor has continuous frequency mode
10509                                  */
10510                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10511                                         continue;
10512                                 /*
10513                                  * Check for flag range limits only. If flag == 1 then
10514                                  * no additional timing information provided.
10515                                  * Default GTF, GTF Secondary curve and CVT are not
10516                                  * supported
10517                                  */
10518                                 if (range->flags != 1)
10519                                         continue;
10520
10521                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10522                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10523                                 amdgpu_dm_connector->pixel_clock_mhz =
10524                                         range->pixel_clock_mhz * 10;
10525
10526                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10527                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10528
10529                                 break;
10530                         }
10531
10532                         if (amdgpu_dm_connector->max_vfreq -
10533                             amdgpu_dm_connector->min_vfreq > 10) {
10534
10535                                 freesync_capable = true;
10536                         }
10537                 }
10538         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10539                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10540                 if (i >= 0 && vsdb_info.freesync_supported) {
10541                         timing  = &edid->detailed_timings[i];
10542                         data    = &timing->data.other_data;
10543
10544                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10545                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10546                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10547                                 freesync_capable = true;
10548
10549                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10550                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10551                 }
10552         }
10553
10554         as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
10555
10556         if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
10557                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10558                 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
10559
10560                         amdgpu_dm_connector->pack_sdp_v1_3 = true;
10561                         amdgpu_dm_connector->as_type = as_type;
10562                         amdgpu_dm_connector->vsdb_info = vsdb_info;
10563
10564                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10565                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10566                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10567                                 freesync_capable = true;
10568
10569                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10570                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10571                 }
10572         }
10573
10574 update:
10575         if (dm_con_state)
10576                 dm_con_state->freesync_capable = freesync_capable;
10577
10578         if (connector->vrr_capable_property)
10579                 drm_connector_set_vrr_capable_property(connector,
10580                                                        freesync_capable);
10581 }
10582
10583 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10584 {
10585         struct amdgpu_device *adev = drm_to_adev(dev);
10586         struct dc *dc = adev->dm.dc;
10587         int i;
10588
10589         mutex_lock(&adev->dm.dc_lock);
10590         if (dc->current_state) {
10591                 for (i = 0; i < dc->current_state->stream_count; ++i)
10592                         dc->current_state->streams[i]
10593                                 ->triggered_crtc_reset.enabled =
10594                                 adev->dm.force_timing_sync;
10595
10596                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10597                 dc_trigger_sync(dc, dc->current_state);
10598         }
10599         mutex_unlock(&adev->dm.dc_lock);
10600 }
10601
10602 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10603                        u32 value, const char *func_name)
10604 {
10605 #ifdef DM_CHECK_ADDR_0
10606         if (address == 0) {
10607                 DC_ERR("invalid register write. address = 0");
10608                 return;
10609         }
10610 #endif
10611         cgs_write_register(ctx->cgs_device, address, value);
10612         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10613 }
10614
10615 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10616                           const char *func_name)
10617 {
10618         u32 value;
10619 #ifdef DM_CHECK_ADDR_0
10620         if (address == 0) {
10621                 DC_ERR("invalid register read; address = 0\n");
10622                 return 0;
10623         }
10624 #endif
10625
10626         if (ctx->dmub_srv &&
10627             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10628             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10629                 ASSERT(false);
10630                 return 0;
10631         }
10632
10633         value = cgs_read_register(ctx->cgs_device, address);
10634
10635         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10636
10637         return value;
10638 }
10639
10640 int amdgpu_dm_process_dmub_aux_transfer_sync(
10641                 struct dc_context *ctx,
10642                 unsigned int link_index,
10643                 struct aux_payload *payload,
10644                 enum aux_return_code_type *operation_result)
10645 {
10646         struct amdgpu_device *adev = ctx->driver_context;
10647         struct dmub_notification *p_notify = adev->dm.dmub_notify;
10648         int ret = -1;
10649
10650         mutex_lock(&adev->dm.dpia_aux_lock);
10651         if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
10652                 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10653                 goto out;
10654         }
10655
10656         if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10657                 DRM_ERROR("wait_for_completion_timeout timeout!");
10658                 *operation_result = AUX_RET_ERROR_TIMEOUT;
10659                 goto out;
10660         }
10661
10662         if (p_notify->result != AUX_RET_SUCCESS) {
10663                 /*
10664                  * Transient states before tunneling is enabled could
10665                  * lead to this error. We can ignore this for now.
10666                  */
10667                 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
10668                         DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
10669                                         payload->address, payload->length,
10670                                         p_notify->result);
10671                 }
10672                 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10673                 goto out;
10674         }
10675
10676
10677         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10678         if (!payload->write && p_notify->aux_reply.length &&
10679                         (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
10680
10681                 if (payload->length != p_notify->aux_reply.length) {
10682                         DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
10683                                 p_notify->aux_reply.length,
10684                                         payload->address, payload->length);
10685                         *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10686                         goto out;
10687                 }
10688
10689                 memcpy(payload->data, p_notify->aux_reply.data,
10690                                 p_notify->aux_reply.length);
10691         }
10692
10693         /* success */
10694         ret = p_notify->aux_reply.length;
10695         *operation_result = p_notify->result;
10696 out:
10697         reinit_completion(&adev->dm.dmub_aux_transfer_done);
10698         mutex_unlock(&adev->dm.dpia_aux_lock);
10699         return ret;
10700 }
10701
10702 int amdgpu_dm_process_dmub_set_config_sync(
10703                 struct dc_context *ctx,
10704                 unsigned int link_index,
10705                 struct set_config_cmd_payload *payload,
10706                 enum set_config_status *operation_result)
10707 {
10708         struct amdgpu_device *adev = ctx->driver_context;
10709         bool is_cmd_complete;
10710         int ret;
10711
10712         mutex_lock(&adev->dm.dpia_aux_lock);
10713         is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
10714                         link_index, payload, adev->dm.dmub_notify);
10715
10716         if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10717                 ret = 0;
10718                 *operation_result = adev->dm.dmub_notify->sc_status;
10719         } else {
10720                 DRM_ERROR("wait_for_completion_timeout timeout!");
10721                 ret = -1;
10722                 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
10723         }
10724
10725         if (!is_cmd_complete)
10726                 reinit_completion(&adev->dm.dmub_aux_transfer_done);
10727         mutex_unlock(&adev->dm.dpia_aux_lock);
10728         return ret;
10729 }
10730
10731 /*
10732  * Check whether seamless boot is supported.
10733  *
10734  * So far we only support seamless boot on CHIP_VANGOGH.
10735  * If everything goes well, we may consider expanding
10736  * seamless boot to other ASICs.
10737  */
10738 bool check_seamless_boot_capability(struct amdgpu_device *adev)
10739 {
10740         switch (adev->ip_versions[DCE_HWIP][0]) {
10741         case IP_VERSION(3, 0, 1):
10742                 if (!adev->mman.keep_stolen_vga_memory)
10743                         return true;
10744                 break;
10745         default:
10746                 break;
10747         }
10748
10749         return false;
10750 }