dfc28b7433bf42f72a71a8bcf909499dfc338e51
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32
33 #include "vid.h"
34 #include "amdgpu.h"
35 #include "amdgpu_display.h"
36 #include "amdgpu_ucode.h"
37 #include "atom.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_pm.h"
40
41 #include "amd_shared.h"
42 #include "amdgpu_dm_irq.h"
43 #include "dm_helpers.h"
44 #include "amdgpu_dm_mst_types.h"
45 #if defined(CONFIG_DEBUG_FS)
46 #include "amdgpu_dm_debugfs.h"
47 #endif
48
49 #include "ivsrcid/ivsrcid_vislands30.h"
50
51 #include <linux/module.h>
52 #include <linux/moduleparam.h>
53 #include <linux/version.h>
54 #include <linux/types.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/firmware.h>
57
58 #include <drm/drmP.h>
59 #include <drm/drm_atomic.h>
60 #include <drm/drm_atomic_uapi.h>
61 #include <drm/drm_atomic_helper.h>
62 #include <drm/drm_dp_mst_helper.h>
63 #include <drm/drm_fb_helper.h>
64 #include <drm/drm_edid.h>
65
66 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
67 #include "ivsrcid/irqsrcs_dcn_1_0.h"
68
69 #include "dcn/dcn_1_0_offset.h"
70 #include "dcn/dcn_1_0_sh_mask.h"
71 #include "soc15_hw_ip.h"
72 #include "vega10_ip_offset.h"
73
74 #include "soc15_common.h"
75 #endif
76
77 #include "modules/inc/mod_freesync.h"
78 #include "modules/power/power_helpers.h"
79 #include "modules/inc/mod_info_packet.h"
80
81 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
82 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
83
84 /**
85  * DOC: overview
86  *
87  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
88  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
89  * requests into DC requests, and DC responses into DRM responses.
90  *
91  * The root control structure is &struct amdgpu_display_manager.
92  */
93
94 /* basic init/fini API */
95 static int amdgpu_dm_init(struct amdgpu_device *adev);
96 static void amdgpu_dm_fini(struct amdgpu_device *adev);
97
98 /*
99  * initializes drm_device display related structures, based on the information
100  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
101  * drm_encoder, drm_mode_config
102  *
103  * Returns 0 on success
104  */
105 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
106 /* removes and deallocates the drm structures, created by the above function */
107 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
108
109 static void
110 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
111
112 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
113                                 struct drm_plane *plane,
114                                 unsigned long possible_crtcs,
115                                 const struct dc_plane_cap *plane_cap);
116 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
117                                struct drm_plane *plane,
118                                uint32_t link_index);
119 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
120                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
121                                     uint32_t link_index,
122                                     struct amdgpu_encoder *amdgpu_encoder);
123 static int amdgpu_dm_encoder_init(struct drm_device *dev,
124                                   struct amdgpu_encoder *aencoder,
125                                   uint32_t link_index);
126
127 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
128
129 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
130                                    struct drm_atomic_state *state,
131                                    bool nonblock);
132
133 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
134
135 static int amdgpu_dm_atomic_check(struct drm_device *dev,
136                                   struct drm_atomic_state *state);
137
138 static void handle_cursor_update(struct drm_plane *plane,
139                                  struct drm_plane_state *old_plane_state);
140
141 /*
142  * dm_vblank_get_counter
143  *
144  * @brief
145  * Get counter for number of vertical blanks
146  *
147  * @param
148  * struct amdgpu_device *adev - [in] desired amdgpu device
149  * int disp_idx - [in] which CRTC to get the counter from
150  *
151  * @return
152  * Counter for vertical blanks
153  */
154 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
155 {
156         if (crtc >= adev->mode_info.num_crtc)
157                 return 0;
158         else {
159                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
160                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
161                                 acrtc->base.state);
162
163
164                 if (acrtc_state->stream == NULL) {
165                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
166                                   crtc);
167                         return 0;
168                 }
169
170                 return dc_stream_get_vblank_counter(acrtc_state->stream);
171         }
172 }
173
174 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
175                                   u32 *vbl, u32 *position)
176 {
177         uint32_t v_blank_start, v_blank_end, h_position, v_position;
178
179         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
180                 return -EINVAL;
181         else {
182                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
183                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
184                                                 acrtc->base.state);
185
186                 if (acrtc_state->stream ==  NULL) {
187                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
188                                   crtc);
189                         return 0;
190                 }
191
192                 /*
193                  * TODO rework base driver to use values directly.
194                  * for now parse it back into reg-format
195                  */
196                 dc_stream_get_scanoutpos(acrtc_state->stream,
197                                          &v_blank_start,
198                                          &v_blank_end,
199                                          &h_position,
200                                          &v_position);
201
202                 *position = v_position | (h_position << 16);
203                 *vbl = v_blank_start | (v_blank_end << 16);
204         }
205
206         return 0;
207 }
208
209 static bool dm_is_idle(void *handle)
210 {
211         /* XXX todo */
212         return true;
213 }
214
215 static int dm_wait_for_idle(void *handle)
216 {
217         /* XXX todo */
218         return 0;
219 }
220
221 static bool dm_check_soft_reset(void *handle)
222 {
223         return false;
224 }
225
226 static int dm_soft_reset(void *handle)
227 {
228         /* XXX todo */
229         return 0;
230 }
231
232 static struct amdgpu_crtc *
233 get_crtc_by_otg_inst(struct amdgpu_device *adev,
234                      int otg_inst)
235 {
236         struct drm_device *dev = adev->ddev;
237         struct drm_crtc *crtc;
238         struct amdgpu_crtc *amdgpu_crtc;
239
240         if (otg_inst == -1) {
241                 WARN_ON(1);
242                 return adev->mode_info.crtcs[0];
243         }
244
245         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
246                 amdgpu_crtc = to_amdgpu_crtc(crtc);
247
248                 if (amdgpu_crtc->otg_inst == otg_inst)
249                         return amdgpu_crtc;
250         }
251
252         return NULL;
253 }
254
255 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
256 {
257         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
258                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
259 }
260
261 static void dm_pflip_high_irq(void *interrupt_params)
262 {
263         struct amdgpu_crtc *amdgpu_crtc;
264         struct common_irq_params *irq_params = interrupt_params;
265         struct amdgpu_device *adev = irq_params->adev;
266         unsigned long flags;
267         struct drm_pending_vblank_event *e;
268         struct dm_crtc_state *acrtc_state;
269         uint32_t vpos, hpos, v_blank_start, v_blank_end;
270         bool vrr_active;
271
272         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
273
274         /* IRQ could occur when in initial stage */
275         /* TODO work and BO cleanup */
276         if (amdgpu_crtc == NULL) {
277                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
278                 return;
279         }
280
281         spin_lock_irqsave(&adev->ddev->event_lock, flags);
282
283         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
284                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
285                                                  amdgpu_crtc->pflip_status,
286                                                  AMDGPU_FLIP_SUBMITTED,
287                                                  amdgpu_crtc->crtc_id,
288                                                  amdgpu_crtc);
289                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
290                 return;
291         }
292
293         /* page flip completed. */
294         e = amdgpu_crtc->event;
295         amdgpu_crtc->event = NULL;
296
297         if (!e)
298                 WARN_ON(1);
299
300         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
301         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
302
303         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
304         if (!vrr_active ||
305             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
306                                       &v_blank_end, &hpos, &vpos) ||
307             (vpos < v_blank_start)) {
308                 /* Update to correct count and vblank timestamp if racing with
309                  * vblank irq. This also updates to the correct vblank timestamp
310                  * even in VRR mode, as scanout is past the front-porch atm.
311                  */
312                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
313
314                 /* Wake up userspace by sending the pageflip event with proper
315                  * count and timestamp of vblank of flip completion.
316                  */
317                 if (e) {
318                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
319
320                         /* Event sent, so done with vblank for this flip */
321                         drm_crtc_vblank_put(&amdgpu_crtc->base);
322                 }
323         } else if (e) {
324                 /* VRR active and inside front-porch: vblank count and
325                  * timestamp for pageflip event will only be up to date after
326                  * drm_crtc_handle_vblank() has been executed from late vblank
327                  * irq handler after start of back-porch (vline 0). We queue the
328                  * pageflip event for send-out by drm_crtc_handle_vblank() with
329                  * updated timestamp and count, once it runs after us.
330                  *
331                  * We need to open-code this instead of using the helper
332                  * drm_crtc_arm_vblank_event(), as that helper would
333                  * call drm_crtc_accurate_vblank_count(), which we must
334                  * not call in VRR mode while we are in front-porch!
335                  */
336
337                 /* sequence will be replaced by real count during send-out. */
338                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
339                 e->pipe = amdgpu_crtc->crtc_id;
340
341                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
342                 e = NULL;
343         }
344
345         /* Keep track of vblank of this flip for flip throttling. We use the
346          * cooked hw counter, as that one incremented at start of this vblank
347          * of pageflip completion, so last_flip_vblank is the forbidden count
348          * for queueing new pageflips if vsync + VRR is enabled.
349          */
350         amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
351                                                         amdgpu_crtc->crtc_id);
352
353         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
354         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
355
356         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
357                          amdgpu_crtc->crtc_id, amdgpu_crtc,
358                          vrr_active, (int) !e);
359 }
360
361 static void dm_vupdate_high_irq(void *interrupt_params)
362 {
363         struct common_irq_params *irq_params = interrupt_params;
364         struct amdgpu_device *adev = irq_params->adev;
365         struct amdgpu_crtc *acrtc;
366         struct dm_crtc_state *acrtc_state;
367
368         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
369
370         if (acrtc) {
371                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
372
373                 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
374                                  amdgpu_dm_vrr_active(acrtc_state));
375
376                 /* Core vblank handling is done here after end of front-porch in
377                  * vrr mode, as vblank timestamping will give valid results
378                  * while now done after front-porch. This will also deliver
379                  * page-flip completion events that have been queued to us
380                  * if a pageflip happened inside front-porch.
381                  */
382                 if (amdgpu_dm_vrr_active(acrtc_state))
383                         drm_crtc_handle_vblank(&acrtc->base);
384         }
385 }
386
387 static void dm_crtc_high_irq(void *interrupt_params)
388 {
389         struct common_irq_params *irq_params = interrupt_params;
390         struct amdgpu_device *adev = irq_params->adev;
391         struct amdgpu_crtc *acrtc;
392         struct dm_crtc_state *acrtc_state;
393
394         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
395
396         if (acrtc) {
397                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
398
399                 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
400                                  amdgpu_dm_vrr_active(acrtc_state));
401
402                 /* Core vblank handling at start of front-porch is only possible
403                  * in non-vrr mode, as only there vblank timestamping will give
404                  * valid results while done in front-porch. Otherwise defer it
405                  * to dm_vupdate_high_irq after end of front-porch.
406                  */
407                 if (!amdgpu_dm_vrr_active(acrtc_state))
408                         drm_crtc_handle_vblank(&acrtc->base);
409
410                 /* Following stuff must happen at start of vblank, for crc
411                  * computation and below-the-range btr support in vrr mode.
412                  */
413                 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
414
415                 if (acrtc_state->stream &&
416                     acrtc_state->vrr_params.supported &&
417                     acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
418                         mod_freesync_handle_v_update(
419                                 adev->dm.freesync_module,
420                                 acrtc_state->stream,
421                                 &acrtc_state->vrr_params);
422
423                         dc_stream_adjust_vmin_vmax(
424                                 adev->dm.dc,
425                                 acrtc_state->stream,
426                                 &acrtc_state->vrr_params.adjust);
427                 }
428         }
429 }
430
431 static int dm_set_clockgating_state(void *handle,
432                   enum amd_clockgating_state state)
433 {
434         return 0;
435 }
436
437 static int dm_set_powergating_state(void *handle,
438                   enum amd_powergating_state state)
439 {
440         return 0;
441 }
442
443 /* Prototypes of private functions */
444 static int dm_early_init(void* handle);
445
446 /* Allocate memory for FBC compressed data  */
447 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
448 {
449         struct drm_device *dev = connector->dev;
450         struct amdgpu_device *adev = dev->dev_private;
451         struct dm_comressor_info *compressor = &adev->dm.compressor;
452         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
453         struct drm_display_mode *mode;
454         unsigned long max_size = 0;
455
456         if (adev->dm.dc->fbc_compressor == NULL)
457                 return;
458
459         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
460                 return;
461
462         if (compressor->bo_ptr)
463                 return;
464
465
466         list_for_each_entry(mode, &connector->modes, head) {
467                 if (max_size < mode->htotal * mode->vtotal)
468                         max_size = mode->htotal * mode->vtotal;
469         }
470
471         if (max_size) {
472                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
473                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
474                             &compressor->gpu_addr, &compressor->cpu_addr);
475
476                 if (r)
477                         DRM_ERROR("DM: Failed to initialize FBC\n");
478                 else {
479                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
480                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
481                 }
482
483         }
484
485 }
486
487 static int amdgpu_dm_init(struct amdgpu_device *adev)
488 {
489         struct dc_init_data init_data;
490         adev->dm.ddev = adev->ddev;
491         adev->dm.adev = adev;
492
493         /* Zero all the fields */
494         memset(&init_data, 0, sizeof(init_data));
495
496         mutex_init(&adev->dm.dc_lock);
497
498         if(amdgpu_dm_irq_init(adev)) {
499                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
500                 goto error;
501         }
502
503         init_data.asic_id.chip_family = adev->family;
504
505         init_data.asic_id.pci_revision_id = adev->rev_id;
506         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
507
508         init_data.asic_id.vram_width = adev->gmc.vram_width;
509         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
510         init_data.asic_id.atombios_base_address =
511                 adev->mode_info.atom_context->bios;
512
513         init_data.driver = adev;
514
515         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
516
517         if (!adev->dm.cgs_device) {
518                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
519                 goto error;
520         }
521
522         init_data.cgs_device = adev->dm.cgs_device;
523
524         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
525
526         /*
527          * TODO debug why this doesn't work on Raven
528          */
529         if (adev->flags & AMD_IS_APU &&
530             adev->asic_type >= CHIP_CARRIZO &&
531             adev->asic_type < CHIP_RAVEN)
532                 init_data.flags.gpu_vm_support = true;
533
534         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
535                 init_data.flags.fbc_support = true;
536
537         /* Display Core create. */
538         adev->dm.dc = dc_create(&init_data);
539
540         if (adev->dm.dc) {
541                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
542         } else {
543                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
544                 goto error;
545         }
546
547         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
548         if (!adev->dm.freesync_module) {
549                 DRM_ERROR(
550                 "amdgpu: failed to initialize freesync_module.\n");
551         } else
552                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
553                                 adev->dm.freesync_module);
554
555         amdgpu_dm_init_color_mod();
556
557         if (amdgpu_dm_initialize_drm_device(adev)) {
558                 DRM_ERROR(
559                 "amdgpu: failed to initialize sw for display support.\n");
560                 goto error;
561         }
562
563         /* Update the actual used number of crtc */
564         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
565
566         /* TODO: Add_display_info? */
567
568         /* TODO use dynamic cursor width */
569         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
570         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
571
572         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
573                 DRM_ERROR(
574                 "amdgpu: failed to initialize sw for display support.\n");
575                 goto error;
576         }
577
578 #if defined(CONFIG_DEBUG_FS)
579         if (dtn_debugfs_init(adev))
580                 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
581 #endif
582
583         DRM_DEBUG_DRIVER("KMS initialized.\n");
584
585         return 0;
586 error:
587         amdgpu_dm_fini(adev);
588
589         return -EINVAL;
590 }
591
592 static void amdgpu_dm_fini(struct amdgpu_device *adev)
593 {
594         amdgpu_dm_destroy_drm_device(&adev->dm);
595         /*
596          * TODO: pageflip, vlank interrupt
597          *
598          * amdgpu_dm_irq_fini(adev);
599          */
600
601         if (adev->dm.cgs_device) {
602                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
603                 adev->dm.cgs_device = NULL;
604         }
605         if (adev->dm.freesync_module) {
606                 mod_freesync_destroy(adev->dm.freesync_module);
607                 adev->dm.freesync_module = NULL;
608         }
609         /* DC Destroy TODO: Replace destroy DAL */
610         if (adev->dm.dc)
611                 dc_destroy(&adev->dm.dc);
612
613         mutex_destroy(&adev->dm.dc_lock);
614
615         return;
616 }
617
618 static int load_dmcu_fw(struct amdgpu_device *adev)
619 {
620         const char *fw_name_dmcu;
621         int r;
622         const struct dmcu_firmware_header_v1_0 *hdr;
623
624         switch(adev->asic_type) {
625         case CHIP_BONAIRE:
626         case CHIP_HAWAII:
627         case CHIP_KAVERI:
628         case CHIP_KABINI:
629         case CHIP_MULLINS:
630         case CHIP_TONGA:
631         case CHIP_FIJI:
632         case CHIP_CARRIZO:
633         case CHIP_STONEY:
634         case CHIP_POLARIS11:
635         case CHIP_POLARIS10:
636         case CHIP_POLARIS12:
637         case CHIP_VEGAM:
638         case CHIP_VEGA10:
639         case CHIP_VEGA12:
640         case CHIP_VEGA20:
641                 return 0;
642         case CHIP_RAVEN:
643                 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
644                 break;
645         default:
646                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
647                 return -EINVAL;
648         }
649
650         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
651                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
652                 return 0;
653         }
654
655         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
656         if (r == -ENOENT) {
657                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
658                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
659                 adev->dm.fw_dmcu = NULL;
660                 return 0;
661         }
662         if (r) {
663                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
664                         fw_name_dmcu);
665                 return r;
666         }
667
668         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
669         if (r) {
670                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
671                         fw_name_dmcu);
672                 release_firmware(adev->dm.fw_dmcu);
673                 adev->dm.fw_dmcu = NULL;
674                 return r;
675         }
676
677         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
678         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
679         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
680         adev->firmware.fw_size +=
681                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
682
683         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
684         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
685         adev->firmware.fw_size +=
686                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
687
688         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
689
690         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
691
692         return 0;
693 }
694
695 static int dm_sw_init(void *handle)
696 {
697         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
698
699         return load_dmcu_fw(adev);
700 }
701
702 static int dm_sw_fini(void *handle)
703 {
704         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
705
706         if(adev->dm.fw_dmcu) {
707                 release_firmware(adev->dm.fw_dmcu);
708                 adev->dm.fw_dmcu = NULL;
709         }
710
711         return 0;
712 }
713
714 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
715 {
716         struct amdgpu_dm_connector *aconnector;
717         struct drm_connector *connector;
718         int ret = 0;
719
720         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
721
722         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
723                 aconnector = to_amdgpu_dm_connector(connector);
724                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
725                     aconnector->mst_mgr.aux) {
726                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
727                                         aconnector, aconnector->base.base.id);
728
729                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
730                         if (ret < 0) {
731                                 DRM_ERROR("DM_MST: Failed to start MST\n");
732                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
733                                 return ret;
734                                 }
735                         }
736         }
737
738         drm_modeset_unlock(&dev->mode_config.connection_mutex);
739         return ret;
740 }
741
742 static int dm_late_init(void *handle)
743 {
744         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
745
746         struct dmcu_iram_parameters params;
747         unsigned int linear_lut[16];
748         int i;
749         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
750         bool ret;
751
752         for (i = 0; i < 16; i++)
753                 linear_lut[i] = 0xFFFF * i / 15;
754
755         params.set = 0;
756         params.backlight_ramping_start = 0xCCCC;
757         params.backlight_ramping_reduction = 0xCCCCCCCC;
758         params.backlight_lut_array_size = 16;
759         params.backlight_lut_array = linear_lut;
760
761         ret = dmcu_load_iram(dmcu, params);
762
763         if (!ret)
764                 return -EINVAL;
765
766         return detect_mst_link_for_all_connectors(adev->ddev);
767 }
768
769 static void s3_handle_mst(struct drm_device *dev, bool suspend)
770 {
771         struct amdgpu_dm_connector *aconnector;
772         struct drm_connector *connector;
773         struct drm_dp_mst_topology_mgr *mgr;
774         int ret;
775         bool need_hotplug = false;
776
777         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
778
779         list_for_each_entry(connector, &dev->mode_config.connector_list,
780                             head) {
781                 aconnector = to_amdgpu_dm_connector(connector);
782                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
783                     aconnector->mst_port)
784                         continue;
785
786                 mgr = &aconnector->mst_mgr;
787
788                 if (suspend) {
789                         drm_dp_mst_topology_mgr_suspend(mgr);
790                 } else {
791                         ret = drm_dp_mst_topology_mgr_resume(mgr);
792                         if (ret < 0) {
793                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
794                                 need_hotplug = true;
795                         }
796                 }
797         }
798
799         drm_modeset_unlock(&dev->mode_config.connection_mutex);
800
801         if (need_hotplug)
802                 drm_kms_helper_hotplug_event(dev);
803 }
804
805 /**
806  * dm_hw_init() - Initialize DC device
807  * @handle: The base driver device containing the amdpgu_dm device.
808  *
809  * Initialize the &struct amdgpu_display_manager device. This involves calling
810  * the initializers of each DM component, then populating the struct with them.
811  *
812  * Although the function implies hardware initialization, both hardware and
813  * software are initialized here. Splitting them out to their relevant init
814  * hooks is a future TODO item.
815  *
816  * Some notable things that are initialized here:
817  *
818  * - Display Core, both software and hardware
819  * - DC modules that we need (freesync and color management)
820  * - DRM software states
821  * - Interrupt sources and handlers
822  * - Vblank support
823  * - Debug FS entries, if enabled
824  */
825 static int dm_hw_init(void *handle)
826 {
827         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
828         /* Create DAL display manager */
829         amdgpu_dm_init(adev);
830         amdgpu_dm_hpd_init(adev);
831
832         return 0;
833 }
834
835 /**
836  * dm_hw_fini() - Teardown DC device
837  * @handle: The base driver device containing the amdpgu_dm device.
838  *
839  * Teardown components within &struct amdgpu_display_manager that require
840  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
841  * were loaded. Also flush IRQ workqueues and disable them.
842  */
843 static int dm_hw_fini(void *handle)
844 {
845         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
846
847         amdgpu_dm_hpd_fini(adev);
848
849         amdgpu_dm_irq_fini(adev);
850         amdgpu_dm_fini(adev);
851         return 0;
852 }
853
854 static int dm_suspend(void *handle)
855 {
856         struct amdgpu_device *adev = handle;
857         struct amdgpu_display_manager *dm = &adev->dm;
858         int ret = 0;
859
860         WARN_ON(adev->dm.cached_state);
861         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
862
863         s3_handle_mst(adev->ddev, true);
864
865         amdgpu_dm_irq_suspend(adev);
866
867
868         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
869
870         return ret;
871 }
872
873 static struct amdgpu_dm_connector *
874 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
875                                              struct drm_crtc *crtc)
876 {
877         uint32_t i;
878         struct drm_connector_state *new_con_state;
879         struct drm_connector *connector;
880         struct drm_crtc *crtc_from_state;
881
882         for_each_new_connector_in_state(state, connector, new_con_state, i) {
883                 crtc_from_state = new_con_state->crtc;
884
885                 if (crtc_from_state == crtc)
886                         return to_amdgpu_dm_connector(connector);
887         }
888
889         return NULL;
890 }
891
892 static void emulated_link_detect(struct dc_link *link)
893 {
894         struct dc_sink_init_data sink_init_data = { 0 };
895         struct display_sink_capability sink_caps = { 0 };
896         enum dc_edid_status edid_status;
897         struct dc_context *dc_ctx = link->ctx;
898         struct dc_sink *sink = NULL;
899         struct dc_sink *prev_sink = NULL;
900
901         link->type = dc_connection_none;
902         prev_sink = link->local_sink;
903
904         if (prev_sink != NULL)
905                 dc_sink_retain(prev_sink);
906
907         switch (link->connector_signal) {
908         case SIGNAL_TYPE_HDMI_TYPE_A: {
909                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
910                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
911                 break;
912         }
913
914         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
915                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
916                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
917                 break;
918         }
919
920         case SIGNAL_TYPE_DVI_DUAL_LINK: {
921                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
922                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
923                 break;
924         }
925
926         case SIGNAL_TYPE_LVDS: {
927                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
928                 sink_caps.signal = SIGNAL_TYPE_LVDS;
929                 break;
930         }
931
932         case SIGNAL_TYPE_EDP: {
933                 sink_caps.transaction_type =
934                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
935                 sink_caps.signal = SIGNAL_TYPE_EDP;
936                 break;
937         }
938
939         case SIGNAL_TYPE_DISPLAY_PORT: {
940                 sink_caps.transaction_type =
941                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
942                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
943                 break;
944         }
945
946         default:
947                 DC_ERROR("Invalid connector type! signal:%d\n",
948                         link->connector_signal);
949                 return;
950         }
951
952         sink_init_data.link = link;
953         sink_init_data.sink_signal = sink_caps.signal;
954
955         sink = dc_sink_create(&sink_init_data);
956         if (!sink) {
957                 DC_ERROR("Failed to create sink!\n");
958                 return;
959         }
960
961         /* dc_sink_create returns a new reference */
962         link->local_sink = sink;
963
964         edid_status = dm_helpers_read_local_edid(
965                         link->ctx,
966                         link,
967                         sink);
968
969         if (edid_status != EDID_OK)
970                 DC_ERROR("Failed to read EDID");
971
972 }
973
974 static int dm_resume(void *handle)
975 {
976         struct amdgpu_device *adev = handle;
977         struct drm_device *ddev = adev->ddev;
978         struct amdgpu_display_manager *dm = &adev->dm;
979         struct amdgpu_dm_connector *aconnector;
980         struct drm_connector *connector;
981         struct drm_crtc *crtc;
982         struct drm_crtc_state *new_crtc_state;
983         struct dm_crtc_state *dm_new_crtc_state;
984         struct drm_plane *plane;
985         struct drm_plane_state *new_plane_state;
986         struct dm_plane_state *dm_new_plane_state;
987         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
988         enum dc_connection_type new_connection_type = dc_connection_none;
989         int i;
990
991         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
992         dc_release_state(dm_state->context);
993         dm_state->context = dc_create_state(dm->dc);
994         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
995         dc_resource_state_construct(dm->dc, dm_state->context);
996
997         /* power on hardware */
998         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
999
1000         /* program HPD filter */
1001         dc_resume(dm->dc);
1002
1003         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
1004         s3_handle_mst(ddev, false);
1005
1006         /*
1007          * early enable HPD Rx IRQ, should be done before set mode as short
1008          * pulse interrupts are used for MST
1009          */
1010         amdgpu_dm_irq_resume_early(adev);
1011
1012         /* Do detection*/
1013         list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
1014                 aconnector = to_amdgpu_dm_connector(connector);
1015
1016                 /*
1017                  * this is the case when traversing through already created
1018                  * MST connectors, should be skipped
1019                  */
1020                 if (aconnector->mst_port)
1021                         continue;
1022
1023                 mutex_lock(&aconnector->hpd_lock);
1024                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1025                         DRM_ERROR("KMS: Failed to detect connector\n");
1026
1027                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1028                         emulated_link_detect(aconnector->dc_link);
1029                 else
1030                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1031
1032                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1033                         aconnector->fake_enable = false;
1034
1035                 if (aconnector->dc_sink)
1036                         dc_sink_release(aconnector->dc_sink);
1037                 aconnector->dc_sink = NULL;
1038                 amdgpu_dm_update_connector_after_detect(aconnector);
1039                 mutex_unlock(&aconnector->hpd_lock);
1040         }
1041
1042         /* Force mode set in atomic commit */
1043         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1044                 new_crtc_state->active_changed = true;
1045
1046         /*
1047          * atomic_check is expected to create the dc states. We need to release
1048          * them here, since they were duplicated as part of the suspend
1049          * procedure.
1050          */
1051         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1052                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1053                 if (dm_new_crtc_state->stream) {
1054                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1055                         dc_stream_release(dm_new_crtc_state->stream);
1056                         dm_new_crtc_state->stream = NULL;
1057                 }
1058         }
1059
1060         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1061                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1062                 if (dm_new_plane_state->dc_state) {
1063                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1064                         dc_plane_state_release(dm_new_plane_state->dc_state);
1065                         dm_new_plane_state->dc_state = NULL;
1066                 }
1067         }
1068
1069         drm_atomic_helper_resume(ddev, dm->cached_state);
1070
1071         dm->cached_state = NULL;
1072
1073         amdgpu_dm_irq_resume_late(adev);
1074
1075         return 0;
1076 }
1077
1078 /**
1079  * DOC: DM Lifecycle
1080  *
1081  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1082  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1083  * the base driver's device list to be initialized and torn down accordingly.
1084  *
1085  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1086  */
1087
1088 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1089         .name = "dm",
1090         .early_init = dm_early_init,
1091         .late_init = dm_late_init,
1092         .sw_init = dm_sw_init,
1093         .sw_fini = dm_sw_fini,
1094         .hw_init = dm_hw_init,
1095         .hw_fini = dm_hw_fini,
1096         .suspend = dm_suspend,
1097         .resume = dm_resume,
1098         .is_idle = dm_is_idle,
1099         .wait_for_idle = dm_wait_for_idle,
1100         .check_soft_reset = dm_check_soft_reset,
1101         .soft_reset = dm_soft_reset,
1102         .set_clockgating_state = dm_set_clockgating_state,
1103         .set_powergating_state = dm_set_powergating_state,
1104 };
1105
1106 const struct amdgpu_ip_block_version dm_ip_block =
1107 {
1108         .type = AMD_IP_BLOCK_TYPE_DCE,
1109         .major = 1,
1110         .minor = 0,
1111         .rev = 0,
1112         .funcs = &amdgpu_dm_funcs,
1113 };
1114
1115
1116 /**
1117  * DOC: atomic
1118  *
1119  * *WIP*
1120  */
1121
1122 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1123         .fb_create = amdgpu_display_user_framebuffer_create,
1124         .output_poll_changed = drm_fb_helper_output_poll_changed,
1125         .atomic_check = amdgpu_dm_atomic_check,
1126         .atomic_commit = amdgpu_dm_atomic_commit,
1127 };
1128
1129 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1130         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1131 };
1132
1133 static void
1134 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1135 {
1136         struct drm_connector *connector = &aconnector->base;
1137         struct drm_device *dev = connector->dev;
1138         struct dc_sink *sink;
1139
1140         /* MST handled by drm_mst framework */
1141         if (aconnector->mst_mgr.mst_state == true)
1142                 return;
1143
1144
1145         sink = aconnector->dc_link->local_sink;
1146         if (sink)
1147                 dc_sink_retain(sink);
1148
1149         /*
1150          * Edid mgmt connector gets first update only in mode_valid hook and then
1151          * the connector sink is set to either fake or physical sink depends on link status.
1152          * Skip if already done during boot.
1153          */
1154         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1155                         && aconnector->dc_em_sink) {
1156
1157                 /*
1158                  * For S3 resume with headless use eml_sink to fake stream
1159                  * because on resume connector->sink is set to NULL
1160                  */
1161                 mutex_lock(&dev->mode_config.mutex);
1162
1163                 if (sink) {
1164                         if (aconnector->dc_sink) {
1165                                 amdgpu_dm_update_freesync_caps(connector, NULL);
1166                                 /*
1167                                  * retain and release below are used to
1168                                  * bump up refcount for sink because the link doesn't point
1169                                  * to it anymore after disconnect, so on next crtc to connector
1170                                  * reshuffle by UMD we will get into unwanted dc_sink release
1171                                  */
1172                                 dc_sink_release(aconnector->dc_sink);
1173                         }
1174                         aconnector->dc_sink = sink;
1175                         dc_sink_retain(aconnector->dc_sink);
1176                         amdgpu_dm_update_freesync_caps(connector,
1177                                         aconnector->edid);
1178                 } else {
1179                         amdgpu_dm_update_freesync_caps(connector, NULL);
1180                         if (!aconnector->dc_sink) {
1181                                 aconnector->dc_sink = aconnector->dc_em_sink;
1182                                 dc_sink_retain(aconnector->dc_sink);
1183                         }
1184                 }
1185
1186                 mutex_unlock(&dev->mode_config.mutex);
1187
1188                 if (sink)
1189                         dc_sink_release(sink);
1190                 return;
1191         }
1192
1193         /*
1194          * TODO: temporary guard to look for proper fix
1195          * if this sink is MST sink, we should not do anything
1196          */
1197         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1198                 dc_sink_release(sink);
1199                 return;
1200         }
1201
1202         if (aconnector->dc_sink == sink) {
1203                 /*
1204                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
1205                  * Do nothing!!
1206                  */
1207                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1208                                 aconnector->connector_id);
1209                 if (sink)
1210                         dc_sink_release(sink);
1211                 return;
1212         }
1213
1214         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1215                 aconnector->connector_id, aconnector->dc_sink, sink);
1216
1217         mutex_lock(&dev->mode_config.mutex);
1218
1219         /*
1220          * 1. Update status of the drm connector
1221          * 2. Send an event and let userspace tell us what to do
1222          */
1223         if (sink) {
1224                 /*
1225                  * TODO: check if we still need the S3 mode update workaround.
1226                  * If yes, put it here.
1227                  */
1228                 if (aconnector->dc_sink)
1229                         amdgpu_dm_update_freesync_caps(connector, NULL);
1230
1231                 aconnector->dc_sink = sink;
1232                 dc_sink_retain(aconnector->dc_sink);
1233                 if (sink->dc_edid.length == 0) {
1234                         aconnector->edid = NULL;
1235                         drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1236                 } else {
1237                         aconnector->edid =
1238                                 (struct edid *) sink->dc_edid.raw_edid;
1239
1240
1241                         drm_connector_update_edid_property(connector,
1242                                         aconnector->edid);
1243                         drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1244                                             aconnector->edid);
1245                 }
1246                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1247
1248         } else {
1249                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1250                 amdgpu_dm_update_freesync_caps(connector, NULL);
1251                 drm_connector_update_edid_property(connector, NULL);
1252                 aconnector->num_modes = 0;
1253                 dc_sink_release(aconnector->dc_sink);
1254                 aconnector->dc_sink = NULL;
1255                 aconnector->edid = NULL;
1256         }
1257
1258         mutex_unlock(&dev->mode_config.mutex);
1259
1260         if (sink)
1261                 dc_sink_release(sink);
1262 }
1263
1264 static void handle_hpd_irq(void *param)
1265 {
1266         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1267         struct drm_connector *connector = &aconnector->base;
1268         struct drm_device *dev = connector->dev;
1269         enum dc_connection_type new_connection_type = dc_connection_none;
1270
1271         /*
1272          * In case of failure or MST no need to update connector status or notify the OS
1273          * since (for MST case) MST does this in its own context.
1274          */
1275         mutex_lock(&aconnector->hpd_lock);
1276
1277         if (aconnector->fake_enable)
1278                 aconnector->fake_enable = false;
1279
1280         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1281                 DRM_ERROR("KMS: Failed to detect connector\n");
1282
1283         if (aconnector->base.force && new_connection_type == dc_connection_none) {
1284                 emulated_link_detect(aconnector->dc_link);
1285
1286
1287                 drm_modeset_lock_all(dev);
1288                 dm_restore_drm_connector_state(dev, connector);
1289                 drm_modeset_unlock_all(dev);
1290
1291                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1292                         drm_kms_helper_hotplug_event(dev);
1293
1294         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1295                 amdgpu_dm_update_connector_after_detect(aconnector);
1296
1297
1298                 drm_modeset_lock_all(dev);
1299                 dm_restore_drm_connector_state(dev, connector);
1300                 drm_modeset_unlock_all(dev);
1301
1302                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1303                         drm_kms_helper_hotplug_event(dev);
1304         }
1305         mutex_unlock(&aconnector->hpd_lock);
1306
1307 }
1308
1309 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1310 {
1311         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1312         uint8_t dret;
1313         bool new_irq_handled = false;
1314         int dpcd_addr;
1315         int dpcd_bytes_to_read;
1316
1317         const int max_process_count = 30;
1318         int process_count = 0;
1319
1320         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1321
1322         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1323                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1324                 /* DPCD 0x200 - 0x201 for downstream IRQ */
1325                 dpcd_addr = DP_SINK_COUNT;
1326         } else {
1327                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1328                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1329                 dpcd_addr = DP_SINK_COUNT_ESI;
1330         }
1331
1332         dret = drm_dp_dpcd_read(
1333                 &aconnector->dm_dp_aux.aux,
1334                 dpcd_addr,
1335                 esi,
1336                 dpcd_bytes_to_read);
1337
1338         while (dret == dpcd_bytes_to_read &&
1339                 process_count < max_process_count) {
1340                 uint8_t retry;
1341                 dret = 0;
1342
1343                 process_count++;
1344
1345                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1346                 /* handle HPD short pulse irq */
1347                 if (aconnector->mst_mgr.mst_state)
1348                         drm_dp_mst_hpd_irq(
1349                                 &aconnector->mst_mgr,
1350                                 esi,
1351                                 &new_irq_handled);
1352
1353                 if (new_irq_handled) {
1354                         /* ACK at DPCD to notify down stream */
1355                         const int ack_dpcd_bytes_to_write =
1356                                 dpcd_bytes_to_read - 1;
1357
1358                         for (retry = 0; retry < 3; retry++) {
1359                                 uint8_t wret;
1360
1361                                 wret = drm_dp_dpcd_write(
1362                                         &aconnector->dm_dp_aux.aux,
1363                                         dpcd_addr + 1,
1364                                         &esi[1],
1365                                         ack_dpcd_bytes_to_write);
1366                                 if (wret == ack_dpcd_bytes_to_write)
1367                                         break;
1368                         }
1369
1370                         /* check if there is new irq to be handled */
1371                         dret = drm_dp_dpcd_read(
1372                                 &aconnector->dm_dp_aux.aux,
1373                                 dpcd_addr,
1374                                 esi,
1375                                 dpcd_bytes_to_read);
1376
1377                         new_irq_handled = false;
1378                 } else {
1379                         break;
1380                 }
1381         }
1382
1383         if (process_count == max_process_count)
1384                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1385 }
1386
1387 static void handle_hpd_rx_irq(void *param)
1388 {
1389         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1390         struct drm_connector *connector = &aconnector->base;
1391         struct drm_device *dev = connector->dev;
1392         struct dc_link *dc_link = aconnector->dc_link;
1393         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1394         enum dc_connection_type new_connection_type = dc_connection_none;
1395
1396         /*
1397          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1398          * conflict, after implement i2c helper, this mutex should be
1399          * retired.
1400          */
1401         if (dc_link->type != dc_connection_mst_branch)
1402                 mutex_lock(&aconnector->hpd_lock);
1403
1404         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1405                         !is_mst_root_connector) {
1406                 /* Downstream Port status changed. */
1407                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1408                         DRM_ERROR("KMS: Failed to detect connector\n");
1409
1410                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1411                         emulated_link_detect(dc_link);
1412
1413                         if (aconnector->fake_enable)
1414                                 aconnector->fake_enable = false;
1415
1416                         amdgpu_dm_update_connector_after_detect(aconnector);
1417
1418
1419                         drm_modeset_lock_all(dev);
1420                         dm_restore_drm_connector_state(dev, connector);
1421                         drm_modeset_unlock_all(dev);
1422
1423                         drm_kms_helper_hotplug_event(dev);
1424                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1425
1426                         if (aconnector->fake_enable)
1427                                 aconnector->fake_enable = false;
1428
1429                         amdgpu_dm_update_connector_after_detect(aconnector);
1430
1431
1432                         drm_modeset_lock_all(dev);
1433                         dm_restore_drm_connector_state(dev, connector);
1434                         drm_modeset_unlock_all(dev);
1435
1436                         drm_kms_helper_hotplug_event(dev);
1437                 }
1438         }
1439         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1440             (dc_link->type == dc_connection_mst_branch))
1441                 dm_handle_hpd_rx_irq(aconnector);
1442
1443         if (dc_link->type != dc_connection_mst_branch) {
1444                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1445                 mutex_unlock(&aconnector->hpd_lock);
1446         }
1447 }
1448
1449 static void register_hpd_handlers(struct amdgpu_device *adev)
1450 {
1451         struct drm_device *dev = adev->ddev;
1452         struct drm_connector *connector;
1453         struct amdgpu_dm_connector *aconnector;
1454         const struct dc_link *dc_link;
1455         struct dc_interrupt_params int_params = {0};
1456
1457         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1458         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1459
1460         list_for_each_entry(connector,
1461                         &dev->mode_config.connector_list, head) {
1462
1463                 aconnector = to_amdgpu_dm_connector(connector);
1464                 dc_link = aconnector->dc_link;
1465
1466                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1467                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1468                         int_params.irq_source = dc_link->irq_source_hpd;
1469
1470                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1471                                         handle_hpd_irq,
1472                                         (void *) aconnector);
1473                 }
1474
1475                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1476
1477                         /* Also register for DP short pulse (hpd_rx). */
1478                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1479                         int_params.irq_source = dc_link->irq_source_hpd_rx;
1480
1481                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1482                                         handle_hpd_rx_irq,
1483                                         (void *) aconnector);
1484                 }
1485         }
1486 }
1487
1488 /* Register IRQ sources and initialize IRQ callbacks */
1489 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1490 {
1491         struct dc *dc = adev->dm.dc;
1492         struct common_irq_params *c_irq_params;
1493         struct dc_interrupt_params int_params = {0};
1494         int r;
1495         int i;
1496         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1497
1498         if (adev->asic_type == CHIP_VEGA10 ||
1499             adev->asic_type == CHIP_VEGA12 ||
1500             adev->asic_type == CHIP_VEGA20 ||
1501             adev->asic_type == CHIP_RAVEN)
1502                 client_id = SOC15_IH_CLIENTID_DCE;
1503
1504         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1505         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1506
1507         /*
1508          * Actions of amdgpu_irq_add_id():
1509          * 1. Register a set() function with base driver.
1510          *    Base driver will call set() function to enable/disable an
1511          *    interrupt in DC hardware.
1512          * 2. Register amdgpu_dm_irq_handler().
1513          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1514          *    coming from DC hardware.
1515          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1516          *    for acknowledging and handling. */
1517
1518         /* Use VBLANK interrupt */
1519         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1520                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1521                 if (r) {
1522                         DRM_ERROR("Failed to add crtc irq id!\n");
1523                         return r;
1524                 }
1525
1526                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1527                 int_params.irq_source =
1528                         dc_interrupt_to_irq_source(dc, i, 0);
1529
1530                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1531
1532                 c_irq_params->adev = adev;
1533                 c_irq_params->irq_src = int_params.irq_source;
1534
1535                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1536                                 dm_crtc_high_irq, c_irq_params);
1537         }
1538
1539         /* Use VUPDATE interrupt */
1540         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1541                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
1542                 if (r) {
1543                         DRM_ERROR("Failed to add vupdate irq id!\n");
1544                         return r;
1545                 }
1546
1547                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1548                 int_params.irq_source =
1549                         dc_interrupt_to_irq_source(dc, i, 0);
1550
1551                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1552
1553                 c_irq_params->adev = adev;
1554                 c_irq_params->irq_src = int_params.irq_source;
1555
1556                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1557                                 dm_vupdate_high_irq, c_irq_params);
1558         }
1559
1560         /* Use GRPH_PFLIP interrupt */
1561         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1562                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1563                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1564                 if (r) {
1565                         DRM_ERROR("Failed to add page flip irq id!\n");
1566                         return r;
1567                 }
1568
1569                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1570                 int_params.irq_source =
1571                         dc_interrupt_to_irq_source(dc, i, 0);
1572
1573                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1574
1575                 c_irq_params->adev = adev;
1576                 c_irq_params->irq_src = int_params.irq_source;
1577
1578                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1579                                 dm_pflip_high_irq, c_irq_params);
1580
1581         }
1582
1583         /* HPD */
1584         r = amdgpu_irq_add_id(adev, client_id,
1585                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1586         if (r) {
1587                 DRM_ERROR("Failed to add hpd irq id!\n");
1588                 return r;
1589         }
1590
1591         register_hpd_handlers(adev);
1592
1593         return 0;
1594 }
1595
1596 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1597 /* Register IRQ sources and initialize IRQ callbacks */
1598 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1599 {
1600         struct dc *dc = adev->dm.dc;
1601         struct common_irq_params *c_irq_params;
1602         struct dc_interrupt_params int_params = {0};
1603         int r;
1604         int i;
1605
1606         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1607         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1608
1609         /*
1610          * Actions of amdgpu_irq_add_id():
1611          * 1. Register a set() function with base driver.
1612          *    Base driver will call set() function to enable/disable an
1613          *    interrupt in DC hardware.
1614          * 2. Register amdgpu_dm_irq_handler().
1615          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1616          *    coming from DC hardware.
1617          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1618          *    for acknowledging and handling.
1619          */
1620
1621         /* Use VSTARTUP interrupt */
1622         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1623                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1624                         i++) {
1625                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1626
1627                 if (r) {
1628                         DRM_ERROR("Failed to add crtc irq id!\n");
1629                         return r;
1630                 }
1631
1632                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1633                 int_params.irq_source =
1634                         dc_interrupt_to_irq_source(dc, i, 0);
1635
1636                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1637
1638                 c_irq_params->adev = adev;
1639                 c_irq_params->irq_src = int_params.irq_source;
1640
1641                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1642                                 dm_crtc_high_irq, c_irq_params);
1643         }
1644
1645         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
1646          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
1647          * to trigger at end of each vblank, regardless of state of the lock,
1648          * matching DCE behaviour.
1649          */
1650         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
1651              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
1652              i++) {
1653                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
1654
1655                 if (r) {
1656                         DRM_ERROR("Failed to add vupdate irq id!\n");
1657                         return r;
1658                 }
1659
1660                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1661                 int_params.irq_source =
1662                         dc_interrupt_to_irq_source(dc, i, 0);
1663
1664                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1665
1666                 c_irq_params->adev = adev;
1667                 c_irq_params->irq_src = int_params.irq_source;
1668
1669                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1670                                 dm_vupdate_high_irq, c_irq_params);
1671         }
1672
1673         /* Use GRPH_PFLIP interrupt */
1674         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1675                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1676                         i++) {
1677                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1678                 if (r) {
1679                         DRM_ERROR("Failed to add page flip irq id!\n");
1680                         return r;
1681                 }
1682
1683                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1684                 int_params.irq_source =
1685                         dc_interrupt_to_irq_source(dc, i, 0);
1686
1687                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1688
1689                 c_irq_params->adev = adev;
1690                 c_irq_params->irq_src = int_params.irq_source;
1691
1692                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1693                                 dm_pflip_high_irq, c_irq_params);
1694
1695         }
1696
1697         /* HPD */
1698         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1699                         &adev->hpd_irq);
1700         if (r) {
1701                 DRM_ERROR("Failed to add hpd irq id!\n");
1702                 return r;
1703         }
1704
1705         register_hpd_handlers(adev);
1706
1707         return 0;
1708 }
1709 #endif
1710
1711 /*
1712  * Acquires the lock for the atomic state object and returns
1713  * the new atomic state.
1714  *
1715  * This should only be called during atomic check.
1716  */
1717 static int dm_atomic_get_state(struct drm_atomic_state *state,
1718                                struct dm_atomic_state **dm_state)
1719 {
1720         struct drm_device *dev = state->dev;
1721         struct amdgpu_device *adev = dev->dev_private;
1722         struct amdgpu_display_manager *dm = &adev->dm;
1723         struct drm_private_state *priv_state;
1724
1725         if (*dm_state)
1726                 return 0;
1727
1728         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1729         if (IS_ERR(priv_state))
1730                 return PTR_ERR(priv_state);
1731
1732         *dm_state = to_dm_atomic_state(priv_state);
1733
1734         return 0;
1735 }
1736
1737 struct dm_atomic_state *
1738 dm_atomic_get_new_state(struct drm_atomic_state *state)
1739 {
1740         struct drm_device *dev = state->dev;
1741         struct amdgpu_device *adev = dev->dev_private;
1742         struct amdgpu_display_manager *dm = &adev->dm;
1743         struct drm_private_obj *obj;
1744         struct drm_private_state *new_obj_state;
1745         int i;
1746
1747         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1748                 if (obj->funcs == dm->atomic_obj.funcs)
1749                         return to_dm_atomic_state(new_obj_state);
1750         }
1751
1752         return NULL;
1753 }
1754
1755 struct dm_atomic_state *
1756 dm_atomic_get_old_state(struct drm_atomic_state *state)
1757 {
1758         struct drm_device *dev = state->dev;
1759         struct amdgpu_device *adev = dev->dev_private;
1760         struct amdgpu_display_manager *dm = &adev->dm;
1761         struct drm_private_obj *obj;
1762         struct drm_private_state *old_obj_state;
1763         int i;
1764
1765         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1766                 if (obj->funcs == dm->atomic_obj.funcs)
1767                         return to_dm_atomic_state(old_obj_state);
1768         }
1769
1770         return NULL;
1771 }
1772
1773 static struct drm_private_state *
1774 dm_atomic_duplicate_state(struct drm_private_obj *obj)
1775 {
1776         struct dm_atomic_state *old_state, *new_state;
1777
1778         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1779         if (!new_state)
1780                 return NULL;
1781
1782         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1783
1784         old_state = to_dm_atomic_state(obj->state);
1785
1786         if (old_state && old_state->context)
1787                 new_state->context = dc_copy_state(old_state->context);
1788
1789         if (!new_state->context) {
1790                 kfree(new_state);
1791                 return NULL;
1792         }
1793
1794         return &new_state->base;
1795 }
1796
1797 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1798                                     struct drm_private_state *state)
1799 {
1800         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1801
1802         if (dm_state && dm_state->context)
1803                 dc_release_state(dm_state->context);
1804
1805         kfree(dm_state);
1806 }
1807
1808 static struct drm_private_state_funcs dm_atomic_state_funcs = {
1809         .atomic_duplicate_state = dm_atomic_duplicate_state,
1810         .atomic_destroy_state = dm_atomic_destroy_state,
1811 };
1812
1813 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1814 {
1815         struct dm_atomic_state *state;
1816         int r;
1817
1818         adev->mode_info.mode_config_initialized = true;
1819
1820         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1821         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1822
1823         adev->ddev->mode_config.max_width = 16384;
1824         adev->ddev->mode_config.max_height = 16384;
1825
1826         adev->ddev->mode_config.preferred_depth = 24;
1827         adev->ddev->mode_config.prefer_shadow = 1;
1828         /* indicates support for immediate flip */
1829         adev->ddev->mode_config.async_page_flip = true;
1830
1831         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1832
1833         state = kzalloc(sizeof(*state), GFP_KERNEL);
1834         if (!state)
1835                 return -ENOMEM;
1836
1837         state->context = dc_create_state(adev->dm.dc);
1838         if (!state->context) {
1839                 kfree(state);
1840                 return -ENOMEM;
1841         }
1842
1843         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1844
1845         drm_atomic_private_obj_init(adev->ddev,
1846                                     &adev->dm.atomic_obj,
1847                                     &state->base,
1848                                     &dm_atomic_state_funcs);
1849
1850         r = amdgpu_display_modeset_create_props(adev);
1851         if (r)
1852                 return r;
1853
1854         return 0;
1855 }
1856
1857 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
1858 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
1859
1860 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1861         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1862
1863 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
1864 {
1865 #if defined(CONFIG_ACPI)
1866         struct amdgpu_dm_backlight_caps caps;
1867
1868         if (dm->backlight_caps.caps_valid)
1869                 return;
1870
1871         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
1872         if (caps.caps_valid) {
1873                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
1874                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
1875                 dm->backlight_caps.caps_valid = true;
1876         } else {
1877                 dm->backlight_caps.min_input_signal =
1878                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1879                 dm->backlight_caps.max_input_signal =
1880                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1881         }
1882 #else
1883         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
1884         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
1885 #endif
1886 }
1887
1888 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1889 {
1890         struct amdgpu_display_manager *dm = bl_get_data(bd);
1891         struct amdgpu_dm_backlight_caps caps;
1892         uint32_t brightness = bd->props.brightness;
1893
1894         amdgpu_dm_update_backlight_caps(dm);
1895         caps = dm->backlight_caps;
1896         /*
1897          * The brightness input is in the range 0-255
1898          * It needs to be rescaled to be between the
1899          * requested min and max input signal
1900          *
1901          * It also needs to be scaled up by 0x101 to
1902          * match the DC interface which has a range of
1903          * 0 to 0xffff
1904          */
1905         brightness =
1906                 brightness
1907                 * 0x101
1908                 * (caps.max_input_signal - caps.min_input_signal)
1909                 / AMDGPU_MAX_BL_LEVEL
1910                 + caps.min_input_signal * 0x101;
1911
1912         if (dc_link_set_backlight_level(dm->backlight_link,
1913                         brightness, 0))
1914                 return 0;
1915         else
1916                 return 1;
1917 }
1918
1919 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1920 {
1921         struct amdgpu_display_manager *dm = bl_get_data(bd);
1922         int ret = dc_link_get_backlight_level(dm->backlight_link);
1923
1924         if (ret == DC_ERROR_UNEXPECTED)
1925                 return bd->props.brightness;
1926         return ret;
1927 }
1928
1929 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1930         .get_brightness = amdgpu_dm_backlight_get_brightness,
1931         .update_status  = amdgpu_dm_backlight_update_status,
1932 };
1933
1934 static void
1935 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1936 {
1937         char bl_name[16];
1938         struct backlight_properties props = { 0 };
1939
1940         amdgpu_dm_update_backlight_caps(dm);
1941
1942         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1943         props.brightness = AMDGPU_MAX_BL_LEVEL;
1944         props.type = BACKLIGHT_RAW;
1945
1946         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1947                         dm->adev->ddev->primary->index);
1948
1949         dm->backlight_dev = backlight_device_register(bl_name,
1950                         dm->adev->ddev->dev,
1951                         dm,
1952                         &amdgpu_dm_backlight_ops,
1953                         &props);
1954
1955         if (IS_ERR(dm->backlight_dev))
1956                 DRM_ERROR("DM: Backlight registration failed!\n");
1957         else
1958                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1959 }
1960
1961 #endif
1962
1963 static int initialize_plane(struct amdgpu_display_manager *dm,
1964                             struct amdgpu_mode_info *mode_info, int plane_id,
1965                             enum drm_plane_type plane_type,
1966                             const struct dc_plane_cap *plane_cap)
1967 {
1968         struct drm_plane *plane;
1969         unsigned long possible_crtcs;
1970         int ret = 0;
1971
1972         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
1973         if (!plane) {
1974                 DRM_ERROR("KMS: Failed to allocate plane\n");
1975                 return -ENOMEM;
1976         }
1977         plane->type = plane_type;
1978
1979         /*
1980          * HACK: IGT tests expect that the primary plane for a CRTC
1981          * can only have one possible CRTC. Only expose support for
1982          * any CRTC if they're not going to be used as a primary plane
1983          * for a CRTC - like overlay or underlay planes.
1984          */
1985         possible_crtcs = 1 << plane_id;
1986         if (plane_id >= dm->dc->caps.max_streams)
1987                 possible_crtcs = 0xff;
1988
1989         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
1990
1991         if (ret) {
1992                 DRM_ERROR("KMS: Failed to initialize plane\n");
1993                 kfree(plane);
1994                 return ret;
1995         }
1996
1997         if (mode_info)
1998                 mode_info->planes[plane_id] = plane;
1999
2000         return ret;
2001 }
2002
2003
2004 static void register_backlight_device(struct amdgpu_display_manager *dm,
2005                                       struct dc_link *link)
2006 {
2007 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2008         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2009
2010         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2011             link->type != dc_connection_none) {
2012                 /*
2013                  * Event if registration failed, we should continue with
2014                  * DM initialization because not having a backlight control
2015                  * is better then a black screen.
2016                  */
2017                 amdgpu_dm_register_backlight_device(dm);
2018
2019                 if (dm->backlight_dev)
2020                         dm->backlight_link = link;
2021         }
2022 #endif
2023 }
2024
2025
2026 /*
2027  * In this architecture, the association
2028  * connector -> encoder -> crtc
2029  * id not really requried. The crtc and connector will hold the
2030  * display_index as an abstraction to use with DAL component
2031  *
2032  * Returns 0 on success
2033  */
2034 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2035 {
2036         struct amdgpu_display_manager *dm = &adev->dm;
2037         int32_t i;
2038         struct amdgpu_dm_connector *aconnector = NULL;
2039         struct amdgpu_encoder *aencoder = NULL;
2040         struct amdgpu_mode_info *mode_info = &adev->mode_info;
2041         uint32_t link_cnt;
2042         int32_t primary_planes;
2043         enum dc_connection_type new_connection_type = dc_connection_none;
2044         const struct dc_plane_cap *plane;
2045
2046         link_cnt = dm->dc->caps.max_links;
2047         if (amdgpu_dm_mode_config_init(dm->adev)) {
2048                 DRM_ERROR("DM: Failed to initialize mode config\n");
2049                 return -EINVAL;
2050         }
2051
2052         /* There is one primary plane per CRTC */
2053         primary_planes = dm->dc->caps.max_streams;
2054         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2055
2056         /*
2057          * Initialize primary planes, implicit planes for legacy IOCTLS.
2058          * Order is reversed to match iteration order in atomic check.
2059          */
2060         for (i = (primary_planes - 1); i >= 0; i--) {
2061                 plane = &dm->dc->caps.planes[i];
2062
2063                 if (initialize_plane(dm, mode_info, i,
2064                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
2065                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
2066                         goto fail;
2067                 }
2068         }
2069
2070         /*
2071          * Initialize overlay planes, index starting after primary planes.
2072          * These planes have a higher DRM index than the primary planes since
2073          * they should be considered as having a higher z-order.
2074          * Order is reversed to match iteration order in atomic check.
2075          *
2076          * Only support DCN for now, and only expose one so we don't encourage
2077          * userspace to use up all the pipes.
2078          */
2079         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2080                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2081
2082                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2083                         continue;
2084
2085                 if (!plane->blends_with_above || !plane->blends_with_below)
2086                         continue;
2087
2088                 if (!plane->supports_argb8888)
2089                         continue;
2090
2091                 if (initialize_plane(dm, NULL, primary_planes + i,
2092                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
2093                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2094                         goto fail;
2095                 }
2096
2097                 /* Only create one overlay plane. */
2098                 break;
2099         }
2100
2101         for (i = 0; i < dm->dc->caps.max_streams; i++)
2102                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2103                         DRM_ERROR("KMS: Failed to initialize crtc\n");
2104                         goto fail;
2105                 }
2106
2107         dm->display_indexes_num = dm->dc->caps.max_streams;
2108
2109         /* loops over all connectors on the board */
2110         for (i = 0; i < link_cnt; i++) {
2111                 struct dc_link *link = NULL;
2112
2113                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2114                         DRM_ERROR(
2115                                 "KMS: Cannot support more than %d display indexes\n",
2116                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
2117                         continue;
2118                 }
2119
2120                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2121                 if (!aconnector)
2122                         goto fail;
2123
2124                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2125                 if (!aencoder)
2126                         goto fail;
2127
2128                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2129                         DRM_ERROR("KMS: Failed to initialize encoder\n");
2130                         goto fail;
2131                 }
2132
2133                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2134                         DRM_ERROR("KMS: Failed to initialize connector\n");
2135                         goto fail;
2136                 }
2137
2138                 link = dc_get_link_at_index(dm->dc, i);
2139
2140                 if (!dc_link_detect_sink(link, &new_connection_type))
2141                         DRM_ERROR("KMS: Failed to detect connector\n");
2142
2143                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2144                         emulated_link_detect(link);
2145                         amdgpu_dm_update_connector_after_detect(aconnector);
2146
2147                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2148                         amdgpu_dm_update_connector_after_detect(aconnector);
2149                         register_backlight_device(dm, link);
2150                 }
2151
2152
2153         }
2154
2155         /* Software is initialized. Now we can register interrupt handlers. */
2156         switch (adev->asic_type) {
2157         case CHIP_BONAIRE:
2158         case CHIP_HAWAII:
2159         case CHIP_KAVERI:
2160         case CHIP_KABINI:
2161         case CHIP_MULLINS:
2162         case CHIP_TONGA:
2163         case CHIP_FIJI:
2164         case CHIP_CARRIZO:
2165         case CHIP_STONEY:
2166         case CHIP_POLARIS11:
2167         case CHIP_POLARIS10:
2168         case CHIP_POLARIS12:
2169         case CHIP_VEGAM:
2170         case CHIP_VEGA10:
2171         case CHIP_VEGA12:
2172         case CHIP_VEGA20:
2173                 if (dce110_register_irq_handlers(dm->adev)) {
2174                         DRM_ERROR("DM: Failed to initialize IRQ\n");
2175                         goto fail;
2176                 }
2177                 break;
2178 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2179         case CHIP_RAVEN:
2180                 if (dcn10_register_irq_handlers(dm->adev)) {
2181                         DRM_ERROR("DM: Failed to initialize IRQ\n");
2182                         goto fail;
2183                 }
2184                 break;
2185 #endif
2186         default:
2187                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2188                 goto fail;
2189         }
2190
2191         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2192                 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2193
2194         return 0;
2195 fail:
2196         kfree(aencoder);
2197         kfree(aconnector);
2198
2199         return -EINVAL;
2200 }
2201
2202 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2203 {
2204         drm_mode_config_cleanup(dm->ddev);
2205         drm_atomic_private_obj_fini(&dm->atomic_obj);
2206         return;
2207 }
2208
2209 /******************************************************************************
2210  * amdgpu_display_funcs functions
2211  *****************************************************************************/
2212
2213 /*
2214  * dm_bandwidth_update - program display watermarks
2215  *
2216  * @adev: amdgpu_device pointer
2217  *
2218  * Calculate and program the display watermarks and line buffer allocation.
2219  */
2220 static void dm_bandwidth_update(struct amdgpu_device *adev)
2221 {
2222         /* TODO: implement later */
2223 }
2224
2225 static const struct amdgpu_display_funcs dm_display_funcs = {
2226         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2227         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2228         .backlight_set_level = NULL, /* never called for DC */
2229         .backlight_get_level = NULL, /* never called for DC */
2230         .hpd_sense = NULL,/* called unconditionally */
2231         .hpd_set_polarity = NULL, /* called unconditionally */
2232         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2233         .page_flip_get_scanoutpos =
2234                 dm_crtc_get_scanoutpos,/* called unconditionally */
2235         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2236         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2237 };
2238
2239 #if defined(CONFIG_DEBUG_KERNEL_DC)
2240
2241 static ssize_t s3_debug_store(struct device *device,
2242                               struct device_attribute *attr,
2243                               const char *buf,
2244                               size_t count)
2245 {
2246         int ret;
2247         int s3_state;
2248         struct pci_dev *pdev = to_pci_dev(device);
2249         struct drm_device *drm_dev = pci_get_drvdata(pdev);
2250         struct amdgpu_device *adev = drm_dev->dev_private;
2251
2252         ret = kstrtoint(buf, 0, &s3_state);
2253
2254         if (ret == 0) {
2255                 if (s3_state) {
2256                         dm_resume(adev);
2257                         drm_kms_helper_hotplug_event(adev->ddev);
2258                 } else
2259                         dm_suspend(adev);
2260         }
2261
2262         return ret == 0 ? count : 0;
2263 }
2264
2265 DEVICE_ATTR_WO(s3_debug);
2266
2267 #endif
2268
2269 static int dm_early_init(void *handle)
2270 {
2271         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2272
2273         switch (adev->asic_type) {
2274         case CHIP_BONAIRE:
2275         case CHIP_HAWAII:
2276                 adev->mode_info.num_crtc = 6;
2277                 adev->mode_info.num_hpd = 6;
2278                 adev->mode_info.num_dig = 6;
2279                 break;
2280         case CHIP_KAVERI:
2281                 adev->mode_info.num_crtc = 4;
2282                 adev->mode_info.num_hpd = 6;
2283                 adev->mode_info.num_dig = 7;
2284                 break;
2285         case CHIP_KABINI:
2286         case CHIP_MULLINS:
2287                 adev->mode_info.num_crtc = 2;
2288                 adev->mode_info.num_hpd = 6;
2289                 adev->mode_info.num_dig = 6;
2290                 break;
2291         case CHIP_FIJI:
2292         case CHIP_TONGA:
2293                 adev->mode_info.num_crtc = 6;
2294                 adev->mode_info.num_hpd = 6;
2295                 adev->mode_info.num_dig = 7;
2296                 break;
2297         case CHIP_CARRIZO:
2298                 adev->mode_info.num_crtc = 3;
2299                 adev->mode_info.num_hpd = 6;
2300                 adev->mode_info.num_dig = 9;
2301                 break;
2302         case CHIP_STONEY:
2303                 adev->mode_info.num_crtc = 2;
2304                 adev->mode_info.num_hpd = 6;
2305                 adev->mode_info.num_dig = 9;
2306                 break;
2307         case CHIP_POLARIS11:
2308         case CHIP_POLARIS12:
2309                 adev->mode_info.num_crtc = 5;
2310                 adev->mode_info.num_hpd = 5;
2311                 adev->mode_info.num_dig = 5;
2312                 break;
2313         case CHIP_POLARIS10:
2314         case CHIP_VEGAM:
2315                 adev->mode_info.num_crtc = 6;
2316                 adev->mode_info.num_hpd = 6;
2317                 adev->mode_info.num_dig = 6;
2318                 break;
2319         case CHIP_VEGA10:
2320         case CHIP_VEGA12:
2321         case CHIP_VEGA20:
2322                 adev->mode_info.num_crtc = 6;
2323                 adev->mode_info.num_hpd = 6;
2324                 adev->mode_info.num_dig = 6;
2325                 break;
2326 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2327         case CHIP_RAVEN:
2328                 adev->mode_info.num_crtc = 4;
2329                 adev->mode_info.num_hpd = 4;
2330                 adev->mode_info.num_dig = 4;
2331                 break;
2332 #endif
2333         default:
2334                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2335                 return -EINVAL;
2336         }
2337
2338         amdgpu_dm_set_irq_funcs(adev);
2339
2340         if (adev->mode_info.funcs == NULL)
2341                 adev->mode_info.funcs = &dm_display_funcs;
2342
2343         /*
2344          * Note: Do NOT change adev->audio_endpt_rreg and
2345          * adev->audio_endpt_wreg because they are initialised in
2346          * amdgpu_device_init()
2347          */
2348 #if defined(CONFIG_DEBUG_KERNEL_DC)
2349         device_create_file(
2350                 adev->ddev->dev,
2351                 &dev_attr_s3_debug);
2352 #endif
2353
2354         return 0;
2355 }
2356
2357 static bool modeset_required(struct drm_crtc_state *crtc_state,
2358                              struct dc_stream_state *new_stream,
2359                              struct dc_stream_state *old_stream)
2360 {
2361         if (!drm_atomic_crtc_needs_modeset(crtc_state))
2362                 return false;
2363
2364         if (!crtc_state->enable)
2365                 return false;
2366
2367         return crtc_state->active;
2368 }
2369
2370 static bool modereset_required(struct drm_crtc_state *crtc_state)
2371 {
2372         if (!drm_atomic_crtc_needs_modeset(crtc_state))
2373                 return false;
2374
2375         return !crtc_state->enable || !crtc_state->active;
2376 }
2377
2378 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2379 {
2380         drm_encoder_cleanup(encoder);
2381         kfree(encoder);
2382 }
2383
2384 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2385         .destroy = amdgpu_dm_encoder_destroy,
2386 };
2387
2388 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
2389                                         struct dc_plane_state *plane_state)
2390 {
2391         plane_state->src_rect.x = state->src_x >> 16;
2392         plane_state->src_rect.y = state->src_y >> 16;
2393         /* we ignore the mantissa for now and do not deal with floating pixels :( */
2394         plane_state->src_rect.width = state->src_w >> 16;
2395
2396         if (plane_state->src_rect.width == 0)
2397                 return false;
2398
2399         plane_state->src_rect.height = state->src_h >> 16;
2400         if (plane_state->src_rect.height == 0)
2401                 return false;
2402
2403         plane_state->dst_rect.x = state->crtc_x;
2404         plane_state->dst_rect.y = state->crtc_y;
2405
2406         if (state->crtc_w == 0)
2407                 return false;
2408
2409         plane_state->dst_rect.width = state->crtc_w;
2410
2411         if (state->crtc_h == 0)
2412                 return false;
2413
2414         plane_state->dst_rect.height = state->crtc_h;
2415
2416         plane_state->clip_rect = plane_state->dst_rect;
2417
2418         switch (state->rotation & DRM_MODE_ROTATE_MASK) {
2419         case DRM_MODE_ROTATE_0:
2420                 plane_state->rotation = ROTATION_ANGLE_0;
2421                 break;
2422         case DRM_MODE_ROTATE_90:
2423                 plane_state->rotation = ROTATION_ANGLE_90;
2424                 break;
2425         case DRM_MODE_ROTATE_180:
2426                 plane_state->rotation = ROTATION_ANGLE_180;
2427                 break;
2428         case DRM_MODE_ROTATE_270:
2429                 plane_state->rotation = ROTATION_ANGLE_270;
2430                 break;
2431         default:
2432                 plane_state->rotation = ROTATION_ANGLE_0;
2433                 break;
2434         }
2435
2436         return true;
2437 }
2438 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2439                        uint64_t *tiling_flags)
2440 {
2441         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2442         int r = amdgpu_bo_reserve(rbo, false);
2443
2444         if (unlikely(r)) {
2445                 /* Don't show error message when returning -ERESTARTSYS */
2446                 if (r != -ERESTARTSYS)
2447                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
2448                 return r;
2449         }
2450
2451         if (tiling_flags)
2452                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2453
2454         amdgpu_bo_unreserve(rbo);
2455
2456         return r;
2457 }
2458
2459 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2460 {
2461         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2462
2463         return offset ? (address + offset * 256) : 0;
2464 }
2465
2466 static int fill_plane_dcc_attributes(struct amdgpu_device *adev,
2467                                       const struct amdgpu_framebuffer *afb,
2468                                       const struct dc_plane_state *plane_state,
2469                                       struct dc_plane_dcc_param *dcc,
2470                                       struct dc_plane_address *address,
2471                                       uint64_t info)
2472 {
2473         struct dc *dc = adev->dm.dc;
2474         struct dc_dcc_surface_param input;
2475         struct dc_surface_dcc_cap output;
2476         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2477         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2478         uint64_t dcc_address;
2479
2480         memset(&input, 0, sizeof(input));
2481         memset(&output, 0, sizeof(output));
2482
2483         if (!offset)
2484                 return 0;
2485
2486         if (plane_state->address.type != PLN_ADDR_TYPE_GRAPHICS)
2487                 return 0;
2488
2489         if (!dc->cap_funcs.get_dcc_compression_cap)
2490                 return -EINVAL;
2491
2492         input.format = plane_state->format;
2493         input.surface_size.width =
2494                 plane_state->plane_size.grph.surface_size.width;
2495         input.surface_size.height =
2496                 plane_state->plane_size.grph.surface_size.height;
2497         input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
2498
2499         if (plane_state->rotation == ROTATION_ANGLE_0 ||
2500             plane_state->rotation == ROTATION_ANGLE_180)
2501                 input.scan = SCAN_DIRECTION_HORIZONTAL;
2502         else if (plane_state->rotation == ROTATION_ANGLE_90 ||
2503                  plane_state->rotation == ROTATION_ANGLE_270)
2504                 input.scan = SCAN_DIRECTION_VERTICAL;
2505
2506         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
2507                 return -EINVAL;
2508
2509         if (!output.capable)
2510                 return -EINVAL;
2511
2512         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
2513                 return -EINVAL;
2514
2515         dcc->enable = 1;
2516         dcc->grph.meta_pitch =
2517                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
2518         dcc->grph.independent_64b_blks = i64b;
2519
2520         dcc_address = get_dcc_address(afb->address, info);
2521         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
2522         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
2523
2524         return 0;
2525 }
2526
2527 static int
2528 fill_plane_tiling_attributes(struct amdgpu_device *adev,
2529                              const struct amdgpu_framebuffer *afb,
2530                              const struct dc_plane_state *plane_state,
2531                              union dc_tiling_info *tiling_info,
2532                              struct dc_plane_dcc_param *dcc,
2533                              struct dc_plane_address *address,
2534                              uint64_t tiling_flags)
2535 {
2536         int ret;
2537
2538         memset(tiling_info, 0, sizeof(*tiling_info));
2539         memset(dcc, 0, sizeof(*dcc));
2540         memset(address, 0, sizeof(*address));
2541
2542         if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2543                 address->type = PLN_ADDR_TYPE_GRAPHICS;
2544                 address->grph.addr.low_part = lower_32_bits(afb->address);
2545                 address->grph.addr.high_part = upper_32_bits(afb->address);
2546         } else {
2547                 const struct drm_framebuffer *fb = &afb->base;
2548                 uint64_t chroma_addr = afb->address + fb->offsets[1];
2549
2550                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2551                 address->video_progressive.luma_addr.low_part =
2552                         lower_32_bits(afb->address);
2553                 address->video_progressive.luma_addr.high_part =
2554                         upper_32_bits(afb->address);
2555                 address->video_progressive.chroma_addr.low_part =
2556                         lower_32_bits(chroma_addr);
2557                 address->video_progressive.chroma_addr.high_part =
2558                         upper_32_bits(chroma_addr);
2559         }
2560
2561         /* Fill GFX8 params */
2562         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2563                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2564
2565                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2566                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2567                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2568                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2569                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2570
2571                 /* XXX fix me for VI */
2572                 tiling_info->gfx8.num_banks = num_banks;
2573                 tiling_info->gfx8.array_mode =
2574                                 DC_ARRAY_2D_TILED_THIN1;
2575                 tiling_info->gfx8.tile_split = tile_split;
2576                 tiling_info->gfx8.bank_width = bankw;
2577                 tiling_info->gfx8.bank_height = bankh;
2578                 tiling_info->gfx8.tile_aspect = mtaspect;
2579                 tiling_info->gfx8.tile_mode =
2580                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2581         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2582                         == DC_ARRAY_1D_TILED_THIN1) {
2583                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2584         }
2585
2586         tiling_info->gfx8.pipe_config =
2587                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2588
2589         if (adev->asic_type == CHIP_VEGA10 ||
2590             adev->asic_type == CHIP_VEGA12 ||
2591             adev->asic_type == CHIP_VEGA20 ||
2592             adev->asic_type == CHIP_RAVEN) {
2593                 /* Fill GFX9 params */
2594                 tiling_info->gfx9.num_pipes =
2595                         adev->gfx.config.gb_addr_config_fields.num_pipes;
2596                 tiling_info->gfx9.num_banks =
2597                         adev->gfx.config.gb_addr_config_fields.num_banks;
2598                 tiling_info->gfx9.pipe_interleave =
2599                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2600                 tiling_info->gfx9.num_shader_engines =
2601                         adev->gfx.config.gb_addr_config_fields.num_se;
2602                 tiling_info->gfx9.max_compressed_frags =
2603                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2604                 tiling_info->gfx9.num_rb_per_se =
2605                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2606                 tiling_info->gfx9.swizzle =
2607                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2608                 tiling_info->gfx9.shaderEnable = 1;
2609
2610                 ret = fill_plane_dcc_attributes(adev, afb, plane_state, dcc,
2611                                                 address, tiling_flags);
2612                 if (ret)
2613                         return ret;
2614         }
2615
2616         return 0;
2617 }
2618
2619 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
2620                                          struct dc_plane_state *plane_state,
2621                                          const struct amdgpu_framebuffer *amdgpu_fb)
2622 {
2623         uint64_t tiling_flags;
2624         const struct drm_framebuffer *fb = &amdgpu_fb->base;
2625         int ret = 0;
2626         struct drm_format_name_buf format_name;
2627
2628         ret = get_fb_info(
2629                 amdgpu_fb,
2630                 &tiling_flags);
2631
2632         if (ret)
2633                 return ret;
2634
2635         switch (fb->format->format) {
2636         case DRM_FORMAT_C8:
2637                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2638                 break;
2639         case DRM_FORMAT_RGB565:
2640                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2641                 break;
2642         case DRM_FORMAT_XRGB8888:
2643         case DRM_FORMAT_ARGB8888:
2644                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2645                 break;
2646         case DRM_FORMAT_XRGB2101010:
2647         case DRM_FORMAT_ARGB2101010:
2648                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2649                 break;
2650         case DRM_FORMAT_XBGR2101010:
2651         case DRM_FORMAT_ABGR2101010:
2652                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2653                 break;
2654         case DRM_FORMAT_XBGR8888:
2655         case DRM_FORMAT_ABGR8888:
2656                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2657                 break;
2658         case DRM_FORMAT_NV21:
2659                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2660                 break;
2661         case DRM_FORMAT_NV12:
2662                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
2663                 break;
2664         default:
2665                 DRM_ERROR("Unsupported screen format %s\n",
2666                           drm_get_format_name(fb->format->format, &format_name));
2667                 return -EINVAL;
2668         }
2669
2670         memset(&plane_state->address, 0, sizeof(plane_state->address));
2671
2672         if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2673                 plane_state->plane_size.grph.surface_size.x = 0;
2674                 plane_state->plane_size.grph.surface_size.y = 0;
2675                 plane_state->plane_size.grph.surface_size.width = fb->width;
2676                 plane_state->plane_size.grph.surface_size.height = fb->height;
2677                 plane_state->plane_size.grph.surface_pitch =
2678                                 fb->pitches[0] / fb->format->cpp[0];
2679                 /* TODO: unhardcode */
2680                 plane_state->color_space = COLOR_SPACE_SRGB;
2681
2682         } else {
2683                 plane_state->plane_size.video.luma_size.x = 0;
2684                 plane_state->plane_size.video.luma_size.y = 0;
2685                 plane_state->plane_size.video.luma_size.width = fb->width;
2686                 plane_state->plane_size.video.luma_size.height = fb->height;
2687                 plane_state->plane_size.video.luma_pitch =
2688                         fb->pitches[0] / fb->format->cpp[0];
2689
2690                 plane_state->plane_size.video.chroma_size.x = 0;
2691                 plane_state->plane_size.video.chroma_size.y = 0;
2692                 /* TODO: set these based on surface format */
2693                 plane_state->plane_size.video.chroma_size.width = fb->width / 2;
2694                 plane_state->plane_size.video.chroma_size.height = fb->height / 2;
2695
2696                 plane_state->plane_size.video.chroma_pitch =
2697                         fb->pitches[1] / fb->format->cpp[1];
2698
2699                 /* TODO: unhardcode */
2700                 plane_state->color_space = COLOR_SPACE_YCBCR709;
2701         }
2702
2703         fill_plane_tiling_attributes(adev, amdgpu_fb, plane_state,
2704                                      &plane_state->tiling_info,
2705                                      &plane_state->dcc,
2706                                      &plane_state->address,
2707                                      tiling_flags);
2708
2709         plane_state->visible = true;
2710         plane_state->scaling_quality.h_taps_c = 0;
2711         plane_state->scaling_quality.v_taps_c = 0;
2712
2713         /* is this needed? is plane_state zeroed at allocation? */
2714         plane_state->scaling_quality.h_taps = 0;
2715         plane_state->scaling_quality.v_taps = 0;
2716         plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
2717
2718         return ret;
2719
2720 }
2721
2722 static void
2723 fill_blending_from_plane_state(struct drm_plane_state *plane_state,
2724                                const struct dc_plane_state *dc_plane_state,
2725                                bool *per_pixel_alpha, bool *global_alpha,
2726                                int *global_alpha_value)
2727 {
2728         *per_pixel_alpha = false;
2729         *global_alpha = false;
2730         *global_alpha_value = 0xff;
2731
2732         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
2733                 return;
2734
2735         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
2736                 static const uint32_t alpha_formats[] = {
2737                         DRM_FORMAT_ARGB8888,
2738                         DRM_FORMAT_RGBA8888,
2739                         DRM_FORMAT_ABGR8888,
2740                 };
2741                 uint32_t format = plane_state->fb->format->format;
2742                 unsigned int i;
2743
2744                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
2745                         if (format == alpha_formats[i]) {
2746                                 *per_pixel_alpha = true;
2747                                 break;
2748                         }
2749                 }
2750         }
2751
2752         if (plane_state->alpha < 0xffff) {
2753                 *global_alpha = true;
2754                 *global_alpha_value = plane_state->alpha >> 8;
2755         }
2756 }
2757
2758 static int
2759 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
2760                             const struct dc_plane_state *dc_plane_state,
2761                             enum dc_color_space *color_space)
2762 {
2763         bool full_range;
2764
2765         *color_space = COLOR_SPACE_SRGB;
2766
2767         /* DRM color properties only affect non-RGB formats. */
2768         if (dc_plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2769                 return 0;
2770
2771         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
2772
2773         switch (plane_state->color_encoding) {
2774         case DRM_COLOR_YCBCR_BT601:
2775                 if (full_range)
2776                         *color_space = COLOR_SPACE_YCBCR601;
2777                 else
2778                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
2779                 break;
2780
2781         case DRM_COLOR_YCBCR_BT709:
2782                 if (full_range)
2783                         *color_space = COLOR_SPACE_YCBCR709;
2784                 else
2785                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
2786                 break;
2787
2788         case DRM_COLOR_YCBCR_BT2020:
2789                 if (full_range)
2790                         *color_space = COLOR_SPACE_2020_YCBCR;
2791                 else
2792                         return -EINVAL;
2793                 break;
2794
2795         default:
2796                 return -EINVAL;
2797         }
2798
2799         return 0;
2800 }
2801
2802 static int fill_plane_attributes(struct amdgpu_device *adev,
2803                                  struct dc_plane_state *dc_plane_state,
2804                                  struct drm_plane_state *plane_state,
2805                                  struct drm_crtc_state *crtc_state)
2806 {
2807         const struct amdgpu_framebuffer *amdgpu_fb =
2808                 to_amdgpu_framebuffer(plane_state->fb);
2809         const struct drm_crtc *crtc = plane_state->crtc;
2810         int ret = 0;
2811
2812         if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
2813                 return -EINVAL;
2814
2815         ret = fill_plane_attributes_from_fb(
2816                 crtc->dev->dev_private,
2817                 dc_plane_state,
2818                 amdgpu_fb);
2819
2820         if (ret)
2821                 return ret;
2822
2823         ret = fill_plane_color_attributes(plane_state, dc_plane_state,
2824                                           &dc_plane_state->color_space);
2825         if (ret)
2826                 return ret;
2827
2828         /*
2829          * Always set input transfer function, since plane state is refreshed
2830          * every time.
2831          */
2832         ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
2833         if (ret) {
2834                 dc_transfer_func_release(dc_plane_state->in_transfer_func);
2835                 dc_plane_state->in_transfer_func = NULL;
2836         }
2837
2838         fill_blending_from_plane_state(plane_state, dc_plane_state,
2839                                        &dc_plane_state->per_pixel_alpha,
2840                                        &dc_plane_state->global_alpha,
2841                                        &dc_plane_state->global_alpha_value);
2842
2843         return ret;
2844 }
2845
2846 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2847                                            const struct dm_connector_state *dm_state,
2848                                            struct dc_stream_state *stream)
2849 {
2850         enum amdgpu_rmx_type rmx_type;
2851
2852         struct rect src = { 0 }; /* viewport in composition space*/
2853         struct rect dst = { 0 }; /* stream addressable area */
2854
2855         /* no mode. nothing to be done */
2856         if (!mode)
2857                 return;
2858
2859         /* Full screen scaling by default */
2860         src.width = mode->hdisplay;
2861         src.height = mode->vdisplay;
2862         dst.width = stream->timing.h_addressable;
2863         dst.height = stream->timing.v_addressable;
2864
2865         if (dm_state) {
2866                 rmx_type = dm_state->scaling;
2867                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2868                         if (src.width * dst.height <
2869                                         src.height * dst.width) {
2870                                 /* height needs less upscaling/more downscaling */
2871                                 dst.width = src.width *
2872                                                 dst.height / src.height;
2873                         } else {
2874                                 /* width needs less upscaling/more downscaling */
2875                                 dst.height = src.height *
2876                                                 dst.width / src.width;
2877                         }
2878                 } else if (rmx_type == RMX_CENTER) {
2879                         dst = src;
2880                 }
2881
2882                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2883                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2884
2885                 if (dm_state->underscan_enable) {
2886                         dst.x += dm_state->underscan_hborder / 2;
2887                         dst.y += dm_state->underscan_vborder / 2;
2888                         dst.width -= dm_state->underscan_hborder;
2889                         dst.height -= dm_state->underscan_vborder;
2890                 }
2891         }
2892
2893         stream->src = src;
2894         stream->dst = dst;
2895
2896         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2897                         dst.x, dst.y, dst.width, dst.height);
2898
2899 }
2900
2901 static enum dc_color_depth
2902 convert_color_depth_from_display_info(const struct drm_connector *connector)
2903 {
2904         struct dm_connector_state *dm_conn_state =
2905                 to_dm_connector_state(connector->state);
2906         uint32_t bpc = connector->display_info.bpc;
2907
2908         /* TODO: Remove this when there's support for max_bpc in drm */
2909         if (dm_conn_state && bpc > dm_conn_state->max_bpc)
2910                 /* Round down to nearest even number. */
2911                 bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
2912
2913         switch (bpc) {
2914         case 0:
2915                 /*
2916                  * Temporary Work around, DRM doesn't parse color depth for
2917                  * EDID revision before 1.4
2918                  * TODO: Fix edid parsing
2919                  */
2920                 return COLOR_DEPTH_888;
2921         case 6:
2922                 return COLOR_DEPTH_666;
2923         case 8:
2924                 return COLOR_DEPTH_888;
2925         case 10:
2926                 return COLOR_DEPTH_101010;
2927         case 12:
2928                 return COLOR_DEPTH_121212;
2929         case 14:
2930                 return COLOR_DEPTH_141414;
2931         case 16:
2932                 return COLOR_DEPTH_161616;
2933         default:
2934                 return COLOR_DEPTH_UNDEFINED;
2935         }
2936 }
2937
2938 static enum dc_aspect_ratio
2939 get_aspect_ratio(const struct drm_display_mode *mode_in)
2940 {
2941         /* 1-1 mapping, since both enums follow the HDMI spec. */
2942         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
2943 }
2944
2945 static enum dc_color_space
2946 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2947 {
2948         enum dc_color_space color_space = COLOR_SPACE_SRGB;
2949
2950         switch (dc_crtc_timing->pixel_encoding) {
2951         case PIXEL_ENCODING_YCBCR422:
2952         case PIXEL_ENCODING_YCBCR444:
2953         case PIXEL_ENCODING_YCBCR420:
2954         {
2955                 /*
2956                  * 27030khz is the separation point between HDTV and SDTV
2957                  * according to HDMI spec, we use YCbCr709 and YCbCr601
2958                  * respectively
2959                  */
2960                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
2961                         if (dc_crtc_timing->flags.Y_ONLY)
2962                                 color_space =
2963                                         COLOR_SPACE_YCBCR709_LIMITED;
2964                         else
2965                                 color_space = COLOR_SPACE_YCBCR709;
2966                 } else {
2967                         if (dc_crtc_timing->flags.Y_ONLY)
2968                                 color_space =
2969                                         COLOR_SPACE_YCBCR601_LIMITED;
2970                         else
2971                                 color_space = COLOR_SPACE_YCBCR601;
2972                 }
2973
2974         }
2975         break;
2976         case PIXEL_ENCODING_RGB:
2977                 color_space = COLOR_SPACE_SRGB;
2978                 break;
2979
2980         default:
2981                 WARN_ON(1);
2982                 break;
2983         }
2984
2985         return color_space;
2986 }
2987
2988 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2989 {
2990         if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2991                 return;
2992
2993         timing_out->display_color_depth--;
2994 }
2995
2996 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2997                                                 const struct drm_display_info *info)
2998 {
2999         int normalized_clk;
3000         if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3001                 return;
3002         do {
3003                 normalized_clk = timing_out->pix_clk_100hz / 10;
3004                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3005                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3006                         normalized_clk /= 2;
3007                 /* Adjusting pix clock following on HDMI spec based on colour depth */
3008                 switch (timing_out->display_color_depth) {
3009                 case COLOR_DEPTH_101010:
3010                         normalized_clk = (normalized_clk * 30) / 24;
3011                         break;
3012                 case COLOR_DEPTH_121212:
3013                         normalized_clk = (normalized_clk * 36) / 24;
3014                         break;
3015                 case COLOR_DEPTH_161616:
3016                         normalized_clk = (normalized_clk * 48) / 24;
3017                         break;
3018                 default:
3019                         return;
3020                 }
3021                 if (normalized_clk <= info->max_tmds_clock)
3022                         return;
3023                 reduce_mode_colour_depth(timing_out);
3024
3025         } while (timing_out->display_color_depth > COLOR_DEPTH_888);
3026
3027 }
3028
3029 static void
3030 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
3031                                              const struct drm_display_mode *mode_in,
3032                                              const struct drm_connector *connector,
3033                                              const struct dc_stream_state *old_stream)
3034 {
3035         struct dc_crtc_timing *timing_out = &stream->timing;
3036         const struct drm_display_info *info = &connector->display_info;
3037
3038         memset(timing_out, 0, sizeof(struct dc_crtc_timing));
3039
3040         timing_out->h_border_left = 0;
3041         timing_out->h_border_right = 0;
3042         timing_out->v_border_top = 0;
3043         timing_out->v_border_bottom = 0;
3044         /* TODO: un-hardcode */
3045         if (drm_mode_is_420_only(info, mode_in)
3046                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3047                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3048         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3049                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3050                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3051         else
3052                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3053
3054         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3055         timing_out->display_color_depth = convert_color_depth_from_display_info(
3056                         connector);
3057         timing_out->scan_type = SCANNING_TYPE_NODATA;
3058         timing_out->hdmi_vic = 0;
3059
3060         if(old_stream) {
3061                 timing_out->vic = old_stream->timing.vic;
3062                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3063                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3064         } else {
3065                 timing_out->vic = drm_match_cea_mode(mode_in);
3066                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3067                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3068                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3069                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3070         }
3071
3072         timing_out->h_addressable = mode_in->crtc_hdisplay;
3073         timing_out->h_total = mode_in->crtc_htotal;
3074         timing_out->h_sync_width =
3075                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3076         timing_out->h_front_porch =
3077                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3078         timing_out->v_total = mode_in->crtc_vtotal;
3079         timing_out->v_addressable = mode_in->crtc_vdisplay;
3080         timing_out->v_front_porch =
3081                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3082         timing_out->v_sync_width =
3083                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3084         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3085         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3086
3087         stream->output_color_space = get_output_color_space(timing_out);
3088
3089         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3090         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3091         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3092                 adjust_colour_depth_from_display_info(timing_out, info);
3093 }
3094
3095 static void fill_audio_info(struct audio_info *audio_info,
3096                             const struct drm_connector *drm_connector,
3097                             const struct dc_sink *dc_sink)
3098 {
3099         int i = 0;
3100         int cea_revision = 0;
3101         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3102
3103         audio_info->manufacture_id = edid_caps->manufacturer_id;
3104         audio_info->product_id = edid_caps->product_id;
3105
3106         cea_revision = drm_connector->display_info.cea_rev;
3107
3108         strscpy(audio_info->display_name,
3109                 edid_caps->display_name,
3110                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3111
3112         if (cea_revision >= 3) {
3113                 audio_info->mode_count = edid_caps->audio_mode_count;
3114
3115                 for (i = 0; i < audio_info->mode_count; ++i) {
3116                         audio_info->modes[i].format_code =
3117                                         (enum audio_format_code)
3118                                         (edid_caps->audio_modes[i].format_code);
3119                         audio_info->modes[i].channel_count =
3120                                         edid_caps->audio_modes[i].channel_count;
3121                         audio_info->modes[i].sample_rates.all =
3122                                         edid_caps->audio_modes[i].sample_rate;
3123                         audio_info->modes[i].sample_size =
3124                                         edid_caps->audio_modes[i].sample_size;
3125                 }
3126         }
3127
3128         audio_info->flags.all = edid_caps->speaker_flags;
3129
3130         /* TODO: We only check for the progressive mode, check for interlace mode too */
3131         if (drm_connector->latency_present[0]) {
3132                 audio_info->video_latency = drm_connector->video_latency[0];
3133                 audio_info->audio_latency = drm_connector->audio_latency[0];
3134         }
3135
3136         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3137
3138 }
3139
3140 static void
3141 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3142                                       struct drm_display_mode *dst_mode)
3143 {
3144         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3145         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3146         dst_mode->crtc_clock = src_mode->crtc_clock;
3147         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3148         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3149         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
3150         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3151         dst_mode->crtc_htotal = src_mode->crtc_htotal;
3152         dst_mode->crtc_hskew = src_mode->crtc_hskew;
3153         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3154         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3155         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3156         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3157         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3158 }
3159
3160 static void
3161 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3162                                         const struct drm_display_mode *native_mode,
3163                                         bool scale_enabled)
3164 {
3165         if (scale_enabled) {
3166                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3167         } else if (native_mode->clock == drm_mode->clock &&
3168                         native_mode->htotal == drm_mode->htotal &&
3169                         native_mode->vtotal == drm_mode->vtotal) {
3170                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3171         } else {
3172                 /* no scaling nor amdgpu inserted, no need to patch */
3173         }
3174 }
3175
3176 static struct dc_sink *
3177 create_fake_sink(struct amdgpu_dm_connector *aconnector)
3178 {
3179         struct dc_sink_init_data sink_init_data = { 0 };
3180         struct dc_sink *sink = NULL;
3181         sink_init_data.link = aconnector->dc_link;
3182         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3183
3184         sink = dc_sink_create(&sink_init_data);
3185         if (!sink) {
3186                 DRM_ERROR("Failed to create sink!\n");
3187                 return NULL;
3188         }
3189         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
3190
3191         return sink;
3192 }
3193
3194 static void set_multisync_trigger_params(
3195                 struct dc_stream_state *stream)
3196 {
3197         if (stream->triggered_crtc_reset.enabled) {
3198                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3199                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3200         }
3201 }
3202
3203 static void set_master_stream(struct dc_stream_state *stream_set[],
3204                               int stream_count)
3205 {
3206         int j, highest_rfr = 0, master_stream = 0;
3207
3208         for (j = 0;  j < stream_count; j++) {
3209                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3210                         int refresh_rate = 0;
3211
3212                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
3213                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3214                         if (refresh_rate > highest_rfr) {
3215                                 highest_rfr = refresh_rate;
3216                                 master_stream = j;
3217                         }
3218                 }
3219         }
3220         for (j = 0;  j < stream_count; j++) {
3221                 if (stream_set[j])
3222                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3223         }
3224 }
3225
3226 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3227 {
3228         int i = 0;
3229
3230         if (context->stream_count < 2)
3231                 return;
3232         for (i = 0; i < context->stream_count ; i++) {
3233                 if (!context->streams[i])
3234                         continue;
3235                 /*
3236                  * TODO: add a function to read AMD VSDB bits and set
3237                  * crtc_sync_master.multi_sync_enabled flag
3238                  * For now it's set to false
3239                  */
3240                 set_multisync_trigger_params(context->streams[i]);
3241         }
3242         set_master_stream(context->streams, context->stream_count);
3243 }
3244
3245 static struct dc_stream_state *
3246 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3247                        const struct drm_display_mode *drm_mode,
3248                        const struct dm_connector_state *dm_state,
3249                        const struct dc_stream_state *old_stream)
3250 {
3251         struct drm_display_mode *preferred_mode = NULL;
3252         struct drm_connector *drm_connector;
3253         struct dc_stream_state *stream = NULL;
3254         struct drm_display_mode mode = *drm_mode;
3255         bool native_mode_found = false;
3256         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3257         int mode_refresh;
3258         int preferred_refresh = 0;
3259
3260         struct dc_sink *sink = NULL;
3261         if (aconnector == NULL) {
3262                 DRM_ERROR("aconnector is NULL!\n");
3263                 return stream;
3264         }
3265
3266         drm_connector = &aconnector->base;
3267
3268         if (!aconnector->dc_sink) {
3269                 sink = create_fake_sink(aconnector);
3270                 if (!sink)
3271                         return stream;
3272         } else {
3273                 sink = aconnector->dc_sink;
3274                 dc_sink_retain(sink);
3275         }
3276
3277         stream = dc_create_stream_for_sink(sink);
3278
3279         if (stream == NULL) {
3280                 DRM_ERROR("Failed to create stream for sink!\n");
3281                 goto finish;
3282         }
3283
3284         stream->dm_stream_context = aconnector;
3285
3286         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3287                 /* Search for preferred mode */
3288                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3289                         native_mode_found = true;
3290                         break;
3291                 }
3292         }
3293         if (!native_mode_found)
3294                 preferred_mode = list_first_entry_or_null(
3295                                 &aconnector->base.modes,
3296                                 struct drm_display_mode,
3297                                 head);
3298
3299         mode_refresh = drm_mode_vrefresh(&mode);
3300
3301         if (preferred_mode == NULL) {
3302                 /*
3303                  * This may not be an error, the use case is when we have no
3304                  * usermode calls to reset and set mode upon hotplug. In this
3305                  * case, we call set mode ourselves to restore the previous mode
3306                  * and the modelist may not be filled in in time.
3307                  */
3308                 DRM_DEBUG_DRIVER("No preferred mode found\n");
3309         } else {
3310                 decide_crtc_timing_for_drm_display_mode(
3311                                 &mode, preferred_mode,
3312                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
3313                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
3314         }
3315
3316         if (!dm_state)
3317                 drm_mode_set_crtcinfo(&mode, 0);
3318
3319         /*
3320         * If scaling is enabled and refresh rate didn't change
3321         * we copy the vic and polarities of the old timings
3322         */
3323         if (!scale || mode_refresh != preferred_refresh)
3324                 fill_stream_properties_from_drm_display_mode(stream,
3325                         &mode, &aconnector->base, NULL);
3326         else
3327                 fill_stream_properties_from_drm_display_mode(stream,
3328                         &mode, &aconnector->base, old_stream);
3329
3330         update_stream_scaling_settings(&mode, dm_state, stream);
3331
3332         fill_audio_info(
3333                 &stream->audio_info,
3334                 drm_connector,
3335                 sink);
3336
3337         update_stream_signal(stream, sink);
3338
3339 finish:
3340         dc_sink_release(sink);
3341
3342         return stream;
3343 }
3344
3345 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
3346 {
3347         drm_crtc_cleanup(crtc);
3348         kfree(crtc);
3349 }
3350
3351 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3352                                   struct drm_crtc_state *state)
3353 {
3354         struct dm_crtc_state *cur = to_dm_crtc_state(state);
3355
3356         /* TODO Destroy dc_stream objects are stream object is flattened */
3357         if (cur->stream)
3358                 dc_stream_release(cur->stream);
3359
3360
3361         __drm_atomic_helper_crtc_destroy_state(state);
3362
3363
3364         kfree(state);
3365 }
3366
3367 static void dm_crtc_reset_state(struct drm_crtc *crtc)
3368 {
3369         struct dm_crtc_state *state;
3370
3371         if (crtc->state)
3372                 dm_crtc_destroy_state(crtc, crtc->state);
3373
3374         state = kzalloc(sizeof(*state), GFP_KERNEL);
3375         if (WARN_ON(!state))
3376                 return;
3377
3378         crtc->state = &state->base;
3379         crtc->state->crtc = crtc;
3380
3381 }
3382
3383 static struct drm_crtc_state *
3384 dm_crtc_duplicate_state(struct drm_crtc *crtc)
3385 {
3386         struct dm_crtc_state *state, *cur;
3387
3388         cur = to_dm_crtc_state(crtc->state);
3389
3390         if (WARN_ON(!crtc->state))
3391                 return NULL;
3392
3393         state = kzalloc(sizeof(*state), GFP_KERNEL);
3394         if (!state)
3395                 return NULL;
3396
3397         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3398
3399         if (cur->stream) {
3400                 state->stream = cur->stream;
3401                 dc_stream_retain(state->stream);
3402         }
3403
3404         state->vrr_params = cur->vrr_params;
3405         state->vrr_infopacket = cur->vrr_infopacket;
3406         state->abm_level = cur->abm_level;
3407         state->vrr_supported = cur->vrr_supported;
3408         state->freesync_config = cur->freesync_config;
3409         state->crc_enabled = cur->crc_enabled;
3410
3411         /* TODO Duplicate dc_stream after objects are stream object is flattened */
3412
3413         return &state->base;
3414 }
3415
3416 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
3417 {
3418         enum dc_irq_source irq_source;
3419         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3420         struct amdgpu_device *adev = crtc->dev->dev_private;
3421         int rc;
3422
3423         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
3424
3425         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3426
3427         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
3428                          acrtc->crtc_id, enable ? "en" : "dis", rc);
3429         return rc;
3430 }
3431
3432 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3433 {
3434         enum dc_irq_source irq_source;
3435         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3436         struct amdgpu_device *adev = crtc->dev->dev_private;
3437         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3438         int rc = 0;
3439
3440         if (enable) {
3441                 /* vblank irq on -> Only need vupdate irq in vrr mode */
3442                 if (amdgpu_dm_vrr_active(acrtc_state))
3443                         rc = dm_set_vupdate_irq(crtc, true);
3444         } else {
3445                 /* vblank irq off -> vupdate irq off */
3446                 rc = dm_set_vupdate_irq(crtc, false);
3447         }
3448
3449         if (rc)
3450                 return rc;
3451
3452         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3453         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3454 }
3455
3456 static int dm_enable_vblank(struct drm_crtc *crtc)
3457 {
3458         return dm_set_vblank(crtc, true);
3459 }
3460
3461 static void dm_disable_vblank(struct drm_crtc *crtc)
3462 {
3463         dm_set_vblank(crtc, false);
3464 }
3465
3466 /* Implemented only the options currently availible for the driver */
3467 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3468         .reset = dm_crtc_reset_state,
3469         .destroy = amdgpu_dm_crtc_destroy,
3470         .gamma_set = drm_atomic_helper_legacy_gamma_set,
3471         .set_config = drm_atomic_helper_set_config,
3472         .page_flip = drm_atomic_helper_page_flip,
3473         .atomic_duplicate_state = dm_crtc_duplicate_state,
3474         .atomic_destroy_state = dm_crtc_destroy_state,
3475         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3476         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3477         .enable_vblank = dm_enable_vblank,
3478         .disable_vblank = dm_disable_vblank,
3479 };
3480
3481 static enum drm_connector_status
3482 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3483 {
3484         bool connected;
3485         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3486
3487         /*
3488          * Notes:
3489          * 1. This interface is NOT called in context of HPD irq.
3490          * 2. This interface *is called* in context of user-mode ioctl. Which
3491          * makes it a bad place for *any* MST-related activity.
3492          */
3493
3494         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3495             !aconnector->fake_enable)
3496                 connected = (aconnector->dc_sink != NULL);
3497         else
3498                 connected = (aconnector->base.force == DRM_FORCE_ON);
3499
3500         return (connected ? connector_status_connected :
3501                         connector_status_disconnected);
3502 }
3503
3504 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3505                                             struct drm_connector_state *connector_state,
3506                                             struct drm_property *property,
3507                                             uint64_t val)
3508 {
3509         struct drm_device *dev = connector->dev;
3510         struct amdgpu_device *adev = dev->dev_private;
3511         struct dm_connector_state *dm_old_state =
3512                 to_dm_connector_state(connector->state);
3513         struct dm_connector_state *dm_new_state =
3514                 to_dm_connector_state(connector_state);
3515
3516         int ret = -EINVAL;
3517
3518         if (property == dev->mode_config.scaling_mode_property) {
3519                 enum amdgpu_rmx_type rmx_type;
3520
3521                 switch (val) {
3522                 case DRM_MODE_SCALE_CENTER:
3523                         rmx_type = RMX_CENTER;
3524                         break;
3525                 case DRM_MODE_SCALE_ASPECT:
3526                         rmx_type = RMX_ASPECT;
3527                         break;
3528                 case DRM_MODE_SCALE_FULLSCREEN:
3529                         rmx_type = RMX_FULL;
3530                         break;
3531                 case DRM_MODE_SCALE_NONE:
3532                 default:
3533                         rmx_type = RMX_OFF;
3534                         break;
3535                 }
3536
3537                 if (dm_old_state->scaling == rmx_type)
3538                         return 0;
3539
3540                 dm_new_state->scaling = rmx_type;
3541                 ret = 0;
3542         } else if (property == adev->mode_info.underscan_hborder_property) {
3543                 dm_new_state->underscan_hborder = val;
3544                 ret = 0;
3545         } else if (property == adev->mode_info.underscan_vborder_property) {
3546                 dm_new_state->underscan_vborder = val;
3547                 ret = 0;
3548         } else if (property == adev->mode_info.underscan_property) {
3549                 dm_new_state->underscan_enable = val;
3550                 ret = 0;
3551         } else if (property == adev->mode_info.max_bpc_property) {
3552                 dm_new_state->max_bpc = val;
3553                 ret = 0;
3554         } else if (property == adev->mode_info.abm_level_property) {
3555                 dm_new_state->abm_level = val;
3556                 ret = 0;
3557         }
3558
3559         return ret;
3560 }
3561
3562 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3563                                             const struct drm_connector_state *state,
3564                                             struct drm_property *property,
3565                                             uint64_t *val)
3566 {
3567         struct drm_device *dev = connector->dev;
3568         struct amdgpu_device *adev = dev->dev_private;
3569         struct dm_connector_state *dm_state =
3570                 to_dm_connector_state(state);
3571         int ret = -EINVAL;
3572
3573         if (property == dev->mode_config.scaling_mode_property) {
3574                 switch (dm_state->scaling) {
3575                 case RMX_CENTER:
3576                         *val = DRM_MODE_SCALE_CENTER;
3577                         break;
3578                 case RMX_ASPECT:
3579                         *val = DRM_MODE_SCALE_ASPECT;
3580                         break;
3581                 case RMX_FULL:
3582                         *val = DRM_MODE_SCALE_FULLSCREEN;
3583                         break;
3584                 case RMX_OFF:
3585                 default:
3586                         *val = DRM_MODE_SCALE_NONE;
3587                         break;
3588                 }
3589                 ret = 0;
3590         } else if (property == adev->mode_info.underscan_hborder_property) {
3591                 *val = dm_state->underscan_hborder;
3592                 ret = 0;
3593         } else if (property == adev->mode_info.underscan_vborder_property) {
3594                 *val = dm_state->underscan_vborder;
3595                 ret = 0;
3596         } else if (property == adev->mode_info.underscan_property) {
3597                 *val = dm_state->underscan_enable;
3598                 ret = 0;
3599         } else if (property == adev->mode_info.max_bpc_property) {
3600                 *val = dm_state->max_bpc;
3601                 ret = 0;
3602         } else if (property == adev->mode_info.abm_level_property) {
3603                 *val = dm_state->abm_level;
3604                 ret = 0;
3605         }
3606
3607         return ret;
3608 }
3609
3610 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3611 {
3612         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3613         const struct dc_link *link = aconnector->dc_link;
3614         struct amdgpu_device *adev = connector->dev->dev_private;
3615         struct amdgpu_display_manager *dm = &adev->dm;
3616
3617 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3618         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3619
3620         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3621             link->type != dc_connection_none &&
3622             dm->backlight_dev) {
3623                 backlight_device_unregister(dm->backlight_dev);
3624                 dm->backlight_dev = NULL;
3625         }
3626 #endif
3627
3628         if (aconnector->dc_em_sink)
3629                 dc_sink_release(aconnector->dc_em_sink);
3630         aconnector->dc_em_sink = NULL;
3631         if (aconnector->dc_sink)
3632                 dc_sink_release(aconnector->dc_sink);
3633         aconnector->dc_sink = NULL;
3634
3635         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3636         drm_connector_unregister(connector);
3637         drm_connector_cleanup(connector);
3638         kfree(connector);
3639 }
3640
3641 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3642 {
3643         struct dm_connector_state *state =
3644                 to_dm_connector_state(connector->state);
3645
3646         if (connector->state)
3647                 __drm_atomic_helper_connector_destroy_state(connector->state);
3648
3649         kfree(state);
3650
3651         state = kzalloc(sizeof(*state), GFP_KERNEL);
3652
3653         if (state) {
3654                 state->scaling = RMX_OFF;
3655                 state->underscan_enable = false;
3656                 state->underscan_hborder = 0;
3657                 state->underscan_vborder = 0;
3658                 state->max_bpc = 8;
3659
3660                 __drm_atomic_helper_connector_reset(connector, &state->base);
3661         }
3662 }
3663
3664 struct drm_connector_state *
3665 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3666 {
3667         struct dm_connector_state *state =
3668                 to_dm_connector_state(connector->state);
3669
3670         struct dm_connector_state *new_state =
3671                         kmemdup(state, sizeof(*state), GFP_KERNEL);
3672
3673         if (!new_state)
3674                 return NULL;
3675
3676         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3677
3678         new_state->freesync_capable = state->freesync_capable;
3679         new_state->abm_level = state->abm_level;
3680         new_state->scaling = state->scaling;
3681         new_state->underscan_enable = state->underscan_enable;
3682         new_state->underscan_hborder = state->underscan_hborder;
3683         new_state->underscan_vborder = state->underscan_vborder;
3684         new_state->max_bpc = state->max_bpc;
3685
3686         return &new_state->base;
3687 }
3688
3689 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
3690         .reset = amdgpu_dm_connector_funcs_reset,
3691         .detect = amdgpu_dm_connector_detect,
3692         .fill_modes = drm_helper_probe_single_connector_modes,
3693         .destroy = amdgpu_dm_connector_destroy,
3694         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
3695         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
3696         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
3697         .atomic_get_property = amdgpu_dm_connector_atomic_get_property
3698 };
3699
3700 static int get_modes(struct drm_connector *connector)
3701 {
3702         return amdgpu_dm_connector_get_modes(connector);
3703 }
3704
3705 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
3706 {
3707         struct dc_sink_init_data init_params = {
3708                         .link = aconnector->dc_link,
3709                         .sink_signal = SIGNAL_TYPE_VIRTUAL
3710         };
3711         struct edid *edid;
3712
3713         if (!aconnector->base.edid_blob_ptr) {
3714                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
3715                                 aconnector->base.name);
3716
3717                 aconnector->base.force = DRM_FORCE_OFF;
3718                 aconnector->base.override_edid = false;
3719                 return;
3720         }
3721
3722         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
3723
3724         aconnector->edid = edid;
3725
3726         aconnector->dc_em_sink = dc_link_add_remote_sink(
3727                 aconnector->dc_link,
3728                 (uint8_t *)edid,
3729                 (edid->extensions + 1) * EDID_LENGTH,
3730                 &init_params);
3731
3732         if (aconnector->base.force == DRM_FORCE_ON) {
3733                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
3734                 aconnector->dc_link->local_sink :
3735                 aconnector->dc_em_sink;
3736                 dc_sink_retain(aconnector->dc_sink);
3737         }
3738 }
3739
3740 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
3741 {
3742         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
3743
3744         /*
3745          * In case of headless boot with force on for DP managed connector
3746          * Those settings have to be != 0 to get initial modeset
3747          */
3748         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3749                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
3750                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
3751         }
3752
3753
3754         aconnector->base.override_edid = true;
3755         create_eml_sink(aconnector);
3756 }
3757
3758 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
3759                                    struct drm_display_mode *mode)
3760 {
3761         int result = MODE_ERROR;
3762         struct dc_sink *dc_sink;
3763         struct amdgpu_device *adev = connector->dev->dev_private;
3764         /* TODO: Unhardcode stream count */
3765         struct dc_stream_state *stream;
3766         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3767         enum dc_status dc_result = DC_OK;
3768
3769         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
3770                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
3771                 return result;
3772
3773         /*
3774          * Only run this the first time mode_valid is called to initilialize
3775          * EDID mgmt
3776          */
3777         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
3778                 !aconnector->dc_em_sink)
3779                 handle_edid_mgmt(aconnector);
3780
3781         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
3782
3783         if (dc_sink == NULL) {
3784                 DRM_ERROR("dc_sink is NULL!\n");
3785                 goto fail;
3786         }
3787
3788         stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
3789         if (stream == NULL) {
3790                 DRM_ERROR("Failed to create stream for sink!\n");
3791                 goto fail;
3792         }
3793
3794         dc_result = dc_validate_stream(adev->dm.dc, stream);
3795
3796         if (dc_result == DC_OK)
3797                 result = MODE_OK;
3798         else
3799                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
3800                               mode->vdisplay,
3801                               mode->hdisplay,
3802                               mode->clock,
3803                               dc_result);
3804
3805         dc_stream_release(stream);
3806
3807 fail:
3808         /* TODO: error handling*/
3809         return result;
3810 }
3811
3812 static const struct drm_connector_helper_funcs
3813 amdgpu_dm_connector_helper_funcs = {
3814         /*
3815          * If hotplugging a second bigger display in FB Con mode, bigger resolution
3816          * modes will be filtered by drm_mode_validate_size(), and those modes
3817          * are missing after user start lightdm. So we need to renew modes list.
3818          * in get_modes call back, not just return the modes count
3819          */
3820         .get_modes = get_modes,
3821         .mode_valid = amdgpu_dm_connector_mode_valid,
3822 };
3823
3824 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
3825 {
3826 }
3827
3828 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
3829                                        struct drm_crtc_state *state)
3830 {
3831         struct amdgpu_device *adev = crtc->dev->dev_private;
3832         struct dc *dc = adev->dm.dc;
3833         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
3834         int ret = -EINVAL;
3835
3836         if (unlikely(!dm_crtc_state->stream &&
3837                      modeset_required(state, NULL, dm_crtc_state->stream))) {
3838                 WARN_ON(1);
3839                 return ret;
3840         }
3841
3842         /* In some use cases, like reset, no stream is attached */
3843         if (!dm_crtc_state->stream)
3844                 return 0;
3845
3846         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
3847                 return 0;
3848
3849         return ret;
3850 }
3851
3852 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
3853                                       const struct drm_display_mode *mode,
3854                                       struct drm_display_mode *adjusted_mode)
3855 {
3856         return true;
3857 }
3858
3859 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
3860         .disable = dm_crtc_helper_disable,
3861         .atomic_check = dm_crtc_helper_atomic_check,
3862         .mode_fixup = dm_crtc_helper_mode_fixup
3863 };
3864
3865 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
3866 {
3867
3868 }
3869
3870 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
3871                                           struct drm_crtc_state *crtc_state,
3872                                           struct drm_connector_state *conn_state)
3873 {
3874         return 0;
3875 }
3876
3877 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
3878         .disable = dm_encoder_helper_disable,
3879         .atomic_check = dm_encoder_helper_atomic_check
3880 };
3881
3882 static void dm_drm_plane_reset(struct drm_plane *plane)
3883 {
3884         struct dm_plane_state *amdgpu_state = NULL;
3885
3886         if (plane->state)
3887                 plane->funcs->atomic_destroy_state(plane, plane->state);
3888
3889         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
3890         WARN_ON(amdgpu_state == NULL);
3891
3892         if (amdgpu_state)
3893                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
3894 }
3895
3896 static struct drm_plane_state *
3897 dm_drm_plane_duplicate_state(struct drm_plane *plane)
3898 {
3899         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
3900
3901         old_dm_plane_state = to_dm_plane_state(plane->state);
3902         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
3903         if (!dm_plane_state)
3904                 return NULL;
3905
3906         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
3907
3908         if (old_dm_plane_state->dc_state) {
3909                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
3910                 dc_plane_state_retain(dm_plane_state->dc_state);
3911         }
3912
3913         return &dm_plane_state->base;
3914 }
3915
3916 void dm_drm_plane_destroy_state(struct drm_plane *plane,
3917                                 struct drm_plane_state *state)
3918 {
3919         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3920
3921         if (dm_plane_state->dc_state)
3922                 dc_plane_state_release(dm_plane_state->dc_state);
3923
3924         drm_atomic_helper_plane_destroy_state(plane, state);
3925 }
3926
3927 static const struct drm_plane_funcs dm_plane_funcs = {
3928         .update_plane   = drm_atomic_helper_update_plane,
3929         .disable_plane  = drm_atomic_helper_disable_plane,
3930         .destroy        = drm_primary_helper_destroy,
3931         .reset = dm_drm_plane_reset,
3932         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
3933         .atomic_destroy_state = dm_drm_plane_destroy_state,
3934 };
3935
3936 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
3937                                       struct drm_plane_state *new_state)
3938 {
3939         struct amdgpu_framebuffer *afb;
3940         struct drm_gem_object *obj;
3941         struct amdgpu_device *adev;
3942         struct amdgpu_bo *rbo;
3943         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
3944         uint64_t tiling_flags;
3945         uint32_t domain;
3946         int r;
3947
3948         dm_plane_state_old = to_dm_plane_state(plane->state);
3949         dm_plane_state_new = to_dm_plane_state(new_state);
3950
3951         if (!new_state->fb) {
3952                 DRM_DEBUG_DRIVER("No FB bound\n");
3953                 return 0;
3954         }
3955
3956         afb = to_amdgpu_framebuffer(new_state->fb);
3957         obj = new_state->fb->obj[0];
3958         rbo = gem_to_amdgpu_bo(obj);
3959         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
3960         r = amdgpu_bo_reserve(rbo, false);
3961         if (unlikely(r != 0))
3962                 return r;
3963
3964         if (plane->type != DRM_PLANE_TYPE_CURSOR)
3965                 domain = amdgpu_display_supported_domains(adev);
3966         else
3967                 domain = AMDGPU_GEM_DOMAIN_VRAM;
3968
3969         r = amdgpu_bo_pin(rbo, domain);
3970         if (unlikely(r != 0)) {
3971                 if (r != -ERESTARTSYS)
3972                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
3973                 amdgpu_bo_unreserve(rbo);
3974                 return r;
3975         }
3976
3977         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
3978         if (unlikely(r != 0)) {
3979                 amdgpu_bo_unpin(rbo);
3980                 amdgpu_bo_unreserve(rbo);
3981                 DRM_ERROR("%p bind failed\n", rbo);
3982                 return r;
3983         }
3984
3985         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
3986
3987         amdgpu_bo_unreserve(rbo);
3988
3989         afb->address = amdgpu_bo_gpu_offset(rbo);
3990
3991         amdgpu_bo_ref(rbo);
3992
3993         if (dm_plane_state_new->dc_state &&
3994                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
3995                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
3996
3997                 fill_plane_tiling_attributes(
3998                         adev, afb, plane_state, &plane_state->tiling_info,
3999                         &plane_state->dcc, &plane_state->address, tiling_flags);
4000         }
4001
4002         return 0;
4003 }
4004
4005 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
4006                                        struct drm_plane_state *old_state)
4007 {
4008         struct amdgpu_bo *rbo;
4009         int r;
4010
4011         if (!old_state->fb)
4012                 return;
4013
4014         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
4015         r = amdgpu_bo_reserve(rbo, false);
4016         if (unlikely(r)) {
4017                 DRM_ERROR("failed to reserve rbo before unpin\n");
4018                 return;
4019         }
4020
4021         amdgpu_bo_unpin(rbo);
4022         amdgpu_bo_unreserve(rbo);
4023         amdgpu_bo_unref(&rbo);
4024 }
4025
4026 static int dm_plane_atomic_check(struct drm_plane *plane,
4027                                  struct drm_plane_state *state)
4028 {
4029         struct amdgpu_device *adev = plane->dev->dev_private;
4030         struct dc *dc = adev->dm.dc;
4031         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
4032
4033         if (!dm_plane_state->dc_state)
4034                 return 0;
4035
4036         if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
4037                 return -EINVAL;
4038
4039         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
4040                 return 0;
4041
4042         return -EINVAL;
4043 }
4044
4045 static int dm_plane_atomic_async_check(struct drm_plane *plane,
4046                                        struct drm_plane_state *new_plane_state)
4047 {
4048         struct drm_plane_state *old_plane_state =
4049                 drm_atomic_get_old_plane_state(new_plane_state->state, plane);
4050
4051         /* Only support async updates on cursor planes. */
4052         if (plane->type != DRM_PLANE_TYPE_CURSOR)
4053                 return -EINVAL;
4054
4055         /*
4056          * DRM calls prepare_fb and cleanup_fb on new_plane_state for
4057          * async commits so don't allow fb changes.
4058          */
4059         if (old_plane_state->fb != new_plane_state->fb)
4060                 return -EINVAL;
4061
4062         return 0;
4063 }
4064
4065 static void dm_plane_atomic_async_update(struct drm_plane *plane,
4066                                          struct drm_plane_state *new_state)
4067 {
4068         struct drm_plane_state *old_state =
4069                 drm_atomic_get_old_plane_state(new_state->state, plane);
4070
4071         if (plane->state->fb != new_state->fb)
4072                 drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
4073
4074         plane->state->src_x = new_state->src_x;
4075         plane->state->src_y = new_state->src_y;
4076         plane->state->src_w = new_state->src_w;
4077         plane->state->src_h = new_state->src_h;
4078         plane->state->crtc_x = new_state->crtc_x;
4079         plane->state->crtc_y = new_state->crtc_y;
4080         plane->state->crtc_w = new_state->crtc_w;
4081         plane->state->crtc_h = new_state->crtc_h;
4082
4083         handle_cursor_update(plane, old_state);
4084 }
4085
4086 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
4087         .prepare_fb = dm_plane_helper_prepare_fb,
4088         .cleanup_fb = dm_plane_helper_cleanup_fb,
4089         .atomic_check = dm_plane_atomic_check,
4090         .atomic_async_check = dm_plane_atomic_async_check,
4091         .atomic_async_update = dm_plane_atomic_async_update
4092 };
4093
4094 /*
4095  * TODO: these are currently initialized to rgb formats only.
4096  * For future use cases we should either initialize them dynamically based on
4097  * plane capabilities, or initialize this array to all formats, so internal drm
4098  * check will succeed, and let DC implement proper check
4099  */
4100 static const uint32_t rgb_formats[] = {
4101         DRM_FORMAT_XRGB8888,
4102         DRM_FORMAT_ARGB8888,
4103         DRM_FORMAT_RGBA8888,
4104         DRM_FORMAT_XRGB2101010,
4105         DRM_FORMAT_XBGR2101010,
4106         DRM_FORMAT_ARGB2101010,
4107         DRM_FORMAT_ABGR2101010,
4108         DRM_FORMAT_XBGR8888,
4109         DRM_FORMAT_ABGR8888,
4110 };
4111
4112 static const uint32_t overlay_formats[] = {
4113         DRM_FORMAT_XRGB8888,
4114         DRM_FORMAT_ARGB8888,
4115         DRM_FORMAT_RGBA8888,
4116         DRM_FORMAT_XBGR8888,
4117         DRM_FORMAT_ABGR8888,
4118 };
4119
4120 static const u32 cursor_formats[] = {
4121         DRM_FORMAT_ARGB8888
4122 };
4123
4124 static int get_plane_formats(const struct drm_plane *plane,
4125                              const struct dc_plane_cap *plane_cap,
4126                              uint32_t *formats, int max_formats)
4127 {
4128         int i, num_formats = 0;
4129
4130         /*
4131          * TODO: Query support for each group of formats directly from
4132          * DC plane caps. This will require adding more formats to the
4133          * caps list.
4134          */
4135
4136         switch (plane->type) {
4137         case DRM_PLANE_TYPE_PRIMARY:
4138                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
4139                         if (num_formats >= max_formats)
4140                                 break;
4141
4142                         formats[num_formats++] = rgb_formats[i];
4143                 }
4144
4145                 if (plane_cap && plane_cap->supports_nv12)
4146                         formats[num_formats++] = DRM_FORMAT_NV12;
4147                 break;
4148
4149         case DRM_PLANE_TYPE_OVERLAY:
4150                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
4151                         if (num_formats >= max_formats)
4152                                 break;
4153
4154                         formats[num_formats++] = overlay_formats[i];
4155                 }
4156                 break;
4157
4158         case DRM_PLANE_TYPE_CURSOR:
4159                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
4160                         if (num_formats >= max_formats)
4161                                 break;
4162
4163                         formats[num_formats++] = cursor_formats[i];
4164                 }
4165                 break;
4166         }
4167
4168         return num_formats;
4169 }
4170
4171 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
4172                                 struct drm_plane *plane,
4173                                 unsigned long possible_crtcs,
4174                                 const struct dc_plane_cap *plane_cap)
4175 {
4176         uint32_t formats[32];
4177         int num_formats;
4178         int res = -EPERM;
4179
4180         num_formats = get_plane_formats(plane, plane_cap, formats,
4181                                         ARRAY_SIZE(formats));
4182
4183         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
4184                                        &dm_plane_funcs, formats, num_formats,
4185                                        NULL, plane->type, NULL);
4186         if (res)
4187                 return res;
4188
4189         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
4190             plane_cap && plane_cap->per_pixel_alpha) {
4191                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
4192                                           BIT(DRM_MODE_BLEND_PREMULTI);
4193
4194                 drm_plane_create_alpha_property(plane);
4195                 drm_plane_create_blend_mode_property(plane, blend_caps);
4196         }
4197
4198         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
4199
4200         /* Create (reset) the plane state */
4201         if (plane->funcs->reset)
4202                 plane->funcs->reset(plane);
4203
4204         return 0;
4205 }
4206
4207 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
4208                                struct drm_plane *plane,
4209                                uint32_t crtc_index)
4210 {
4211         struct amdgpu_crtc *acrtc = NULL;
4212         struct drm_plane *cursor_plane;
4213
4214         int res = -ENOMEM;
4215
4216         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
4217         if (!cursor_plane)
4218                 goto fail;
4219
4220         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
4221         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
4222
4223         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
4224         if (!acrtc)
4225                 goto fail;
4226
4227         res = drm_crtc_init_with_planes(
4228                         dm->ddev,
4229                         &acrtc->base,
4230                         plane,
4231                         cursor_plane,
4232                         &amdgpu_dm_crtc_funcs, NULL);
4233
4234         if (res)
4235                 goto fail;
4236
4237         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
4238
4239         /* Create (reset) the plane state */
4240         if (acrtc->base.funcs->reset)
4241                 acrtc->base.funcs->reset(&acrtc->base);
4242
4243         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
4244         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
4245
4246         acrtc->crtc_id = crtc_index;
4247         acrtc->base.enabled = false;
4248         acrtc->otg_inst = -1;
4249
4250         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
4251         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
4252                                    true, MAX_COLOR_LUT_ENTRIES);
4253         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
4254
4255         return 0;
4256
4257 fail:
4258         kfree(acrtc);
4259         kfree(cursor_plane);
4260         return res;
4261 }
4262
4263
4264 static int to_drm_connector_type(enum signal_type st)
4265 {
4266         switch (st) {
4267         case SIGNAL_TYPE_HDMI_TYPE_A:
4268                 return DRM_MODE_CONNECTOR_HDMIA;
4269         case SIGNAL_TYPE_EDP:
4270                 return DRM_MODE_CONNECTOR_eDP;
4271         case SIGNAL_TYPE_LVDS:
4272                 return DRM_MODE_CONNECTOR_LVDS;
4273         case SIGNAL_TYPE_RGB:
4274                 return DRM_MODE_CONNECTOR_VGA;
4275         case SIGNAL_TYPE_DISPLAY_PORT:
4276         case SIGNAL_TYPE_DISPLAY_PORT_MST:
4277                 return DRM_MODE_CONNECTOR_DisplayPort;
4278         case SIGNAL_TYPE_DVI_DUAL_LINK:
4279         case SIGNAL_TYPE_DVI_SINGLE_LINK:
4280                 return DRM_MODE_CONNECTOR_DVID;
4281         case SIGNAL_TYPE_VIRTUAL:
4282                 return DRM_MODE_CONNECTOR_VIRTUAL;
4283
4284         default:
4285                 return DRM_MODE_CONNECTOR_Unknown;
4286         }
4287 }
4288
4289 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
4290 {
4291         return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
4292 }
4293
4294 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
4295 {
4296         struct drm_encoder *encoder;
4297         struct amdgpu_encoder *amdgpu_encoder;
4298
4299         encoder = amdgpu_dm_connector_to_encoder(connector);
4300
4301         if (encoder == NULL)
4302                 return;
4303
4304         amdgpu_encoder = to_amdgpu_encoder(encoder);
4305
4306         amdgpu_encoder->native_mode.clock = 0;
4307
4308         if (!list_empty(&connector->probed_modes)) {
4309                 struct drm_display_mode *preferred_mode = NULL;
4310
4311                 list_for_each_entry(preferred_mode,
4312                                     &connector->probed_modes,
4313                                     head) {
4314                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
4315                                 amdgpu_encoder->native_mode = *preferred_mode;
4316
4317                         break;
4318                 }
4319
4320         }
4321 }
4322
4323 static struct drm_display_mode *
4324 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
4325                              char *name,
4326                              int hdisplay, int vdisplay)
4327 {
4328         struct drm_device *dev = encoder->dev;
4329         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4330         struct drm_display_mode *mode = NULL;
4331         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4332
4333         mode = drm_mode_duplicate(dev, native_mode);
4334
4335         if (mode == NULL)
4336                 return NULL;
4337
4338         mode->hdisplay = hdisplay;
4339         mode->vdisplay = vdisplay;
4340         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
4341         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
4342
4343         return mode;
4344
4345 }
4346
4347 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
4348                                                  struct drm_connector *connector)
4349 {
4350         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4351         struct drm_display_mode *mode = NULL;
4352         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4353         struct amdgpu_dm_connector *amdgpu_dm_connector =
4354                                 to_amdgpu_dm_connector(connector);
4355         int i;
4356         int n;
4357         struct mode_size {
4358                 char name[DRM_DISPLAY_MODE_LEN];
4359                 int w;
4360                 int h;
4361         } common_modes[] = {
4362                 {  "640x480",  640,  480},
4363                 {  "800x600",  800,  600},
4364                 { "1024x768", 1024,  768},
4365                 { "1280x720", 1280,  720},
4366                 { "1280x800", 1280,  800},
4367                 {"1280x1024", 1280, 1024},
4368                 { "1440x900", 1440,  900},
4369                 {"1680x1050", 1680, 1050},
4370                 {"1600x1200", 1600, 1200},
4371                 {"1920x1080", 1920, 1080},
4372                 {"1920x1200", 1920, 1200}
4373         };
4374
4375         n = ARRAY_SIZE(common_modes);
4376
4377         for (i = 0; i < n; i++) {
4378                 struct drm_display_mode *curmode = NULL;
4379                 bool mode_existed = false;
4380
4381                 if (common_modes[i].w > native_mode->hdisplay ||
4382                     common_modes[i].h > native_mode->vdisplay ||
4383                    (common_modes[i].w == native_mode->hdisplay &&
4384                     common_modes[i].h == native_mode->vdisplay))
4385                         continue;
4386
4387                 list_for_each_entry(curmode, &connector->probed_modes, head) {
4388                         if (common_modes[i].w == curmode->hdisplay &&
4389                             common_modes[i].h == curmode->vdisplay) {
4390                                 mode_existed = true;
4391                                 break;
4392                         }
4393                 }
4394
4395                 if (mode_existed)
4396                         continue;
4397
4398                 mode = amdgpu_dm_create_common_mode(encoder,
4399                                 common_modes[i].name, common_modes[i].w,
4400                                 common_modes[i].h);
4401                 drm_mode_probed_add(connector, mode);
4402                 amdgpu_dm_connector->num_modes++;
4403         }
4404 }
4405
4406 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
4407                                               struct edid *edid)
4408 {
4409         struct amdgpu_dm_connector *amdgpu_dm_connector =
4410                         to_amdgpu_dm_connector(connector);
4411
4412         if (edid) {
4413                 /* empty probed_modes */
4414                 INIT_LIST_HEAD(&connector->probed_modes);
4415                 amdgpu_dm_connector->num_modes =
4416                                 drm_add_edid_modes(connector, edid);
4417
4418                 amdgpu_dm_get_native_mode(connector);
4419         } else {
4420                 amdgpu_dm_connector->num_modes = 0;
4421         }
4422 }
4423
4424 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
4425 {
4426         struct amdgpu_dm_connector *amdgpu_dm_connector =
4427                         to_amdgpu_dm_connector(connector);
4428         struct drm_encoder *encoder;
4429         struct edid *edid = amdgpu_dm_connector->edid;
4430
4431         encoder = amdgpu_dm_connector_to_encoder(connector);
4432
4433         if (!edid || !drm_edid_is_valid(edid)) {
4434                 amdgpu_dm_connector->num_modes =
4435                                 drm_add_modes_noedid(connector, 640, 480);
4436         } else {
4437                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
4438                 amdgpu_dm_connector_add_common_modes(encoder, connector);
4439         }
4440         amdgpu_dm_fbc_init(connector);
4441
4442         return amdgpu_dm_connector->num_modes;
4443 }
4444
4445 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4446                                      struct amdgpu_dm_connector *aconnector,
4447                                      int connector_type,
4448                                      struct dc_link *link,
4449                                      int link_index)
4450 {
4451         struct amdgpu_device *adev = dm->ddev->dev_private;
4452
4453         aconnector->connector_id = link_index;
4454         aconnector->dc_link = link;
4455         aconnector->base.interlace_allowed = false;
4456         aconnector->base.doublescan_allowed = false;
4457         aconnector->base.stereo_allowed = false;
4458         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
4459         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
4460         mutex_init(&aconnector->hpd_lock);
4461
4462         /*
4463          * configure support HPD hot plug connector_>polled default value is 0
4464          * which means HPD hot plug not supported
4465          */
4466         switch (connector_type) {
4467         case DRM_MODE_CONNECTOR_HDMIA:
4468                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4469                 aconnector->base.ycbcr_420_allowed =
4470                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
4471                 break;
4472         case DRM_MODE_CONNECTOR_DisplayPort:
4473                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4474                 aconnector->base.ycbcr_420_allowed =
4475                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
4476                 break;
4477         case DRM_MODE_CONNECTOR_DVID:
4478                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
4479                 break;
4480         default:
4481                 break;
4482         }
4483
4484         drm_object_attach_property(&aconnector->base.base,
4485                                 dm->ddev->mode_config.scaling_mode_property,
4486                                 DRM_MODE_SCALE_NONE);
4487
4488         drm_object_attach_property(&aconnector->base.base,
4489                                 adev->mode_info.underscan_property,
4490                                 UNDERSCAN_OFF);
4491         drm_object_attach_property(&aconnector->base.base,
4492                                 adev->mode_info.underscan_hborder_property,
4493                                 0);
4494         drm_object_attach_property(&aconnector->base.base,
4495                                 adev->mode_info.underscan_vborder_property,
4496                                 0);
4497         drm_object_attach_property(&aconnector->base.base,
4498                                 adev->mode_info.max_bpc_property,
4499                                 0);
4500
4501         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
4502             dc_is_dmcu_initialized(adev->dm.dc)) {
4503                 drm_object_attach_property(&aconnector->base.base,
4504                                 adev->mode_info.abm_level_property, 0);
4505         }
4506
4507         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4508             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4509             connector_type == DRM_MODE_CONNECTOR_eDP) {
4510                 drm_connector_attach_vrr_capable_property(
4511                         &aconnector->base);
4512         }
4513 }
4514
4515 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
4516                               struct i2c_msg *msgs, int num)
4517 {
4518         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
4519         struct ddc_service *ddc_service = i2c->ddc_service;
4520         struct i2c_command cmd;
4521         int i;
4522         int result = -EIO;
4523
4524         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
4525
4526         if (!cmd.payloads)
4527                 return result;
4528
4529         cmd.number_of_payloads = num;
4530         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
4531         cmd.speed = 100;
4532
4533         for (i = 0; i < num; i++) {
4534                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
4535                 cmd.payloads[i].address = msgs[i].addr;
4536                 cmd.payloads[i].length = msgs[i].len;
4537                 cmd.payloads[i].data = msgs[i].buf;
4538         }
4539
4540         if (dc_submit_i2c(
4541                         ddc_service->ctx->dc,
4542                         ddc_service->ddc_pin->hw_info.ddc_channel,
4543                         &cmd))
4544                 result = num;
4545
4546         kfree(cmd.payloads);
4547         return result;
4548 }
4549
4550 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
4551 {
4552         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
4553 }
4554
4555 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
4556         .master_xfer = amdgpu_dm_i2c_xfer,
4557         .functionality = amdgpu_dm_i2c_func,
4558 };
4559
4560 static struct amdgpu_i2c_adapter *
4561 create_i2c(struct ddc_service *ddc_service,
4562            int link_index,
4563            int *res)
4564 {
4565         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
4566         struct amdgpu_i2c_adapter *i2c;
4567
4568         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
4569         if (!i2c)
4570                 return NULL;
4571         i2c->base.owner = THIS_MODULE;
4572         i2c->base.class = I2C_CLASS_DDC;
4573         i2c->base.dev.parent = &adev->pdev->dev;
4574         i2c->base.algo = &amdgpu_dm_i2c_algo;
4575         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
4576         i2c_set_adapdata(&i2c->base, i2c);
4577         i2c->ddc_service = ddc_service;
4578         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
4579
4580         return i2c;
4581 }
4582
4583
4584 /*
4585  * Note: this function assumes that dc_link_detect() was called for the
4586  * dc_link which will be represented by this aconnector.
4587  */
4588 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
4589                                     struct amdgpu_dm_connector *aconnector,
4590                                     uint32_t link_index,
4591                                     struct amdgpu_encoder *aencoder)
4592 {
4593         int res = 0;
4594         int connector_type;
4595         struct dc *dc = dm->dc;
4596         struct dc_link *link = dc_get_link_at_index(dc, link_index);
4597         struct amdgpu_i2c_adapter *i2c;
4598
4599         link->priv = aconnector;
4600
4601         DRM_DEBUG_DRIVER("%s()\n", __func__);
4602
4603         i2c = create_i2c(link->ddc, link->link_index, &res);
4604         if (!i2c) {
4605                 DRM_ERROR("Failed to create i2c adapter data\n");
4606                 return -ENOMEM;
4607         }
4608
4609         aconnector->i2c = i2c;
4610         res = i2c_add_adapter(&i2c->base);
4611
4612         if (res) {
4613                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
4614                 goto out_free;
4615         }
4616
4617         connector_type = to_drm_connector_type(link->connector_signal);
4618
4619         res = drm_connector_init(
4620                         dm->ddev,
4621                         &aconnector->base,
4622                         &amdgpu_dm_connector_funcs,
4623                         connector_type);
4624
4625         if (res) {
4626                 DRM_ERROR("connector_init failed\n");
4627                 aconnector->connector_id = -1;
4628                 goto out_free;
4629         }
4630
4631         drm_connector_helper_add(
4632                         &aconnector->base,
4633                         &amdgpu_dm_connector_helper_funcs);
4634
4635         if (aconnector->base.funcs->reset)
4636                 aconnector->base.funcs->reset(&aconnector->base);
4637
4638         amdgpu_dm_connector_init_helper(
4639                 dm,
4640                 aconnector,
4641                 connector_type,
4642                 link,
4643                 link_index);
4644
4645         drm_connector_attach_encoder(
4646                 &aconnector->base, &aencoder->base);
4647
4648         drm_connector_register(&aconnector->base);
4649 #if defined(CONFIG_DEBUG_FS)
4650         res = connector_debugfs_init(aconnector);
4651         if (res) {
4652                 DRM_ERROR("Failed to create debugfs for connector");
4653                 goto out_free;
4654         }
4655         aconnector->debugfs_dpcd_address = 0;
4656         aconnector->debugfs_dpcd_size = 0;
4657 #endif
4658
4659         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
4660                 || connector_type == DRM_MODE_CONNECTOR_eDP)
4661                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
4662
4663 out_free:
4664         if (res) {
4665                 kfree(i2c);
4666                 aconnector->i2c = NULL;
4667         }
4668         return res;
4669 }
4670
4671 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
4672 {
4673         switch (adev->mode_info.num_crtc) {
4674         case 1:
4675                 return 0x1;
4676         case 2:
4677                 return 0x3;
4678         case 3:
4679                 return 0x7;
4680         case 4:
4681                 return 0xf;
4682         case 5:
4683                 return 0x1f;
4684         case 6:
4685         default:
4686                 return 0x3f;
4687         }
4688 }
4689
4690 static int amdgpu_dm_encoder_init(struct drm_device *dev,
4691                                   struct amdgpu_encoder *aencoder,
4692                                   uint32_t link_index)
4693 {
4694         struct amdgpu_device *adev = dev->dev_private;
4695
4696         int res = drm_encoder_init(dev,
4697                                    &aencoder->base,
4698                                    &amdgpu_dm_encoder_funcs,
4699                                    DRM_MODE_ENCODER_TMDS,
4700                                    NULL);
4701
4702         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
4703
4704         if (!res)
4705                 aencoder->encoder_id = link_index;
4706         else
4707                 aencoder->encoder_id = -1;
4708
4709         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
4710
4711         return res;
4712 }
4713
4714 static void manage_dm_interrupts(struct amdgpu_device *adev,
4715                                  struct amdgpu_crtc *acrtc,
4716                                  bool enable)
4717 {
4718         /*
4719          * this is not correct translation but will work as soon as VBLANK
4720          * constant is the same as PFLIP
4721          */
4722         int irq_type =
4723                 amdgpu_display_crtc_idx_to_irq_type(
4724                         adev,
4725                         acrtc->crtc_id);
4726
4727         if (enable) {
4728                 drm_crtc_vblank_on(&acrtc->base);
4729                 amdgpu_irq_get(
4730                         adev,
4731                         &adev->pageflip_irq,
4732                         irq_type);
4733         } else {
4734
4735                 amdgpu_irq_put(
4736                         adev,
4737                         &adev->pageflip_irq,
4738                         irq_type);
4739                 drm_crtc_vblank_off(&acrtc->base);
4740         }
4741 }
4742
4743 static bool
4744 is_scaling_state_different(const struct dm_connector_state *dm_state,
4745                            const struct dm_connector_state *old_dm_state)
4746 {
4747         if (dm_state->scaling != old_dm_state->scaling)
4748                 return true;
4749         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
4750                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
4751                         return true;
4752         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
4753                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
4754                         return true;
4755         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
4756                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
4757                 return true;
4758         return false;
4759 }
4760
4761 static void remove_stream(struct amdgpu_device *adev,
4762                           struct amdgpu_crtc *acrtc,
4763                           struct dc_stream_state *stream)
4764 {
4765         /* this is the update mode case */
4766
4767         acrtc->otg_inst = -1;
4768         acrtc->enabled = false;
4769 }
4770
4771 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
4772                                struct dc_cursor_position *position)
4773 {
4774         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4775         int x, y;
4776         int xorigin = 0, yorigin = 0;
4777
4778         if (!crtc || !plane->state->fb) {
4779                 position->enable = false;
4780                 position->x = 0;
4781                 position->y = 0;
4782                 return 0;
4783         }
4784
4785         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
4786             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
4787                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
4788                           __func__,
4789                           plane->state->crtc_w,
4790                           plane->state->crtc_h);
4791                 return -EINVAL;
4792         }
4793
4794         x = plane->state->crtc_x;
4795         y = plane->state->crtc_y;
4796         /* avivo cursor are offset into the total surface */
4797         x += crtc->primary->state->src_x >> 16;
4798         y += crtc->primary->state->src_y >> 16;
4799         if (x < 0) {
4800                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
4801                 x = 0;
4802         }
4803         if (y < 0) {
4804                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
4805                 y = 0;
4806         }
4807         position->enable = true;
4808         position->x = x;
4809         position->y = y;
4810         position->x_hotspot = xorigin;
4811         position->y_hotspot = yorigin;
4812
4813         return 0;
4814 }
4815
4816 static void handle_cursor_update(struct drm_plane *plane,
4817                                  struct drm_plane_state *old_plane_state)
4818 {
4819         struct amdgpu_device *adev = plane->dev->dev_private;
4820         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
4821         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
4822         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
4823         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
4824         uint64_t address = afb ? afb->address : 0;
4825         struct dc_cursor_position position;
4826         struct dc_cursor_attributes attributes;
4827         int ret;
4828
4829         if (!plane->state->fb && !old_plane_state->fb)
4830                 return;
4831
4832         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
4833                          __func__,
4834                          amdgpu_crtc->crtc_id,
4835                          plane->state->crtc_w,
4836                          plane->state->crtc_h);
4837
4838         ret = get_cursor_position(plane, crtc, &position);
4839         if (ret)
4840                 return;
4841
4842         if (!position.enable) {
4843                 /* turn off cursor */
4844                 if (crtc_state && crtc_state->stream) {
4845                         mutex_lock(&adev->dm.dc_lock);
4846                         dc_stream_set_cursor_position(crtc_state->stream,
4847                                                       &position);
4848                         mutex_unlock(&adev->dm.dc_lock);
4849                 }
4850                 return;
4851         }
4852
4853         amdgpu_crtc->cursor_width = plane->state->crtc_w;
4854         amdgpu_crtc->cursor_height = plane->state->crtc_h;
4855
4856         attributes.address.high_part = upper_32_bits(address);
4857         attributes.address.low_part  = lower_32_bits(address);
4858         attributes.width             = plane->state->crtc_w;
4859         attributes.height            = plane->state->crtc_h;
4860         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
4861         attributes.rotation_angle    = 0;
4862         attributes.attribute_flags.value = 0;
4863
4864         attributes.pitch = attributes.width;
4865
4866         if (crtc_state->stream) {
4867                 mutex_lock(&adev->dm.dc_lock);
4868                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
4869                                                          &attributes))
4870                         DRM_ERROR("DC failed to set cursor attributes\n");
4871
4872                 if (!dc_stream_set_cursor_position(crtc_state->stream,
4873                                                    &position))
4874                         DRM_ERROR("DC failed to set cursor position\n");
4875                 mutex_unlock(&adev->dm.dc_lock);
4876         }
4877 }
4878
4879 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4880 {
4881
4882         assert_spin_locked(&acrtc->base.dev->event_lock);
4883         WARN_ON(acrtc->event);
4884
4885         acrtc->event = acrtc->base.state->event;
4886
4887         /* Set the flip status */
4888         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
4889
4890         /* Mark this event as consumed */
4891         acrtc->base.state->event = NULL;
4892
4893         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
4894                                                  acrtc->crtc_id);
4895 }
4896
4897 static void update_freesync_state_on_stream(
4898         struct amdgpu_display_manager *dm,
4899         struct dm_crtc_state *new_crtc_state,
4900         struct dc_stream_state *new_stream,
4901         struct dc_plane_state *surface,
4902         u32 flip_timestamp_in_us)
4903 {
4904         struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
4905         struct dc_info_packet vrr_infopacket = {0};
4906
4907         if (!new_stream)
4908                 return;
4909
4910         /*
4911          * TODO: Determine why min/max totals and vrefresh can be 0 here.
4912          * For now it's sufficient to just guard against these conditions.
4913          */
4914
4915         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
4916                 return;
4917
4918         if (surface) {
4919                 mod_freesync_handle_preflip(
4920                         dm->freesync_module,
4921                         surface,
4922                         new_stream,
4923                         flip_timestamp_in_us,
4924                         &vrr_params);
4925         }
4926
4927         mod_freesync_build_vrr_infopacket(
4928                 dm->freesync_module,
4929                 new_stream,
4930                 &vrr_params,
4931                 PACKET_TYPE_VRR,
4932                 TRANSFER_FUNC_UNKNOWN,
4933                 &vrr_infopacket);
4934
4935         new_crtc_state->freesync_timing_changed |=
4936                 (memcmp(&new_crtc_state->vrr_params.adjust,
4937                         &vrr_params.adjust,
4938                         sizeof(vrr_params.adjust)) != 0);
4939
4940         new_crtc_state->freesync_vrr_info_changed |=
4941                 (memcmp(&new_crtc_state->vrr_infopacket,
4942                         &vrr_infopacket,
4943                         sizeof(vrr_infopacket)) != 0);
4944
4945         new_crtc_state->vrr_params = vrr_params;
4946         new_crtc_state->vrr_infopacket = vrr_infopacket;
4947
4948         new_stream->adjust = new_crtc_state->vrr_params.adjust;
4949         new_stream->vrr_infopacket = vrr_infopacket;
4950
4951         if (new_crtc_state->freesync_vrr_info_changed)
4952                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
4953                               new_crtc_state->base.crtc->base.id,
4954                               (int)new_crtc_state->base.vrr_enabled,
4955                               (int)vrr_params.state);
4956 }
4957
4958 static void pre_update_freesync_state_on_stream(
4959         struct amdgpu_display_manager *dm,
4960         struct dm_crtc_state *new_crtc_state)
4961 {
4962         struct dc_stream_state *new_stream = new_crtc_state->stream;
4963         struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
4964         struct mod_freesync_config config = new_crtc_state->freesync_config;
4965
4966         if (!new_stream)
4967                 return;
4968
4969         /*
4970          * TODO: Determine why min/max totals and vrefresh can be 0 here.
4971          * For now it's sufficient to just guard against these conditions.
4972          */
4973         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
4974                 return;
4975
4976         if (new_crtc_state->vrr_supported &&
4977             config.min_refresh_in_uhz &&
4978             config.max_refresh_in_uhz) {
4979                 config.state = new_crtc_state->base.vrr_enabled ?
4980                         VRR_STATE_ACTIVE_VARIABLE :
4981                         VRR_STATE_INACTIVE;
4982         } else {
4983                 config.state = VRR_STATE_UNSUPPORTED;
4984         }
4985
4986         mod_freesync_build_vrr_params(dm->freesync_module,
4987                                       new_stream,
4988                                       &config, &vrr_params);
4989
4990         new_crtc_state->freesync_timing_changed |=
4991                 (memcmp(&new_crtc_state->vrr_params.adjust,
4992                         &vrr_params.adjust,
4993                         sizeof(vrr_params.adjust)) != 0);
4994
4995         new_crtc_state->vrr_params = vrr_params;
4996 }
4997
4998 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
4999                                             struct dm_crtc_state *new_state)
5000 {
5001         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
5002         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
5003
5004         if (!old_vrr_active && new_vrr_active) {
5005                 /* Transition VRR inactive -> active:
5006                  * While VRR is active, we must not disable vblank irq, as a
5007                  * reenable after disable would compute bogus vblank/pflip
5008                  * timestamps if it likely happened inside display front-porch.
5009                  *
5010                  * We also need vupdate irq for the actual core vblank handling
5011                  * at end of vblank.
5012                  */
5013                 dm_set_vupdate_irq(new_state->base.crtc, true);
5014                 drm_crtc_vblank_get(new_state->base.crtc);
5015                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
5016                                  __func__, new_state->base.crtc->base.id);
5017         } else if (old_vrr_active && !new_vrr_active) {
5018                 /* Transition VRR active -> inactive:
5019                  * Allow vblank irq disable again for fixed refresh rate.
5020                  */
5021                 dm_set_vupdate_irq(new_state->base.crtc, false);
5022                 drm_crtc_vblank_put(new_state->base.crtc);
5023                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
5024                                  __func__, new_state->base.crtc->base.id);
5025         }
5026 }
5027
5028 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5029                                     struct dc_state *dc_state,
5030                                     struct drm_device *dev,
5031                                     struct amdgpu_display_manager *dm,
5032                                     struct drm_crtc *pcrtc,
5033                                     bool wait_for_vblank)
5034 {
5035         uint32_t i, r;
5036         uint64_t timestamp_ns;
5037         struct drm_plane *plane;
5038         struct drm_plane_state *old_plane_state, *new_plane_state;
5039         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
5040         struct drm_crtc_state *new_pcrtc_state =
5041                         drm_atomic_get_new_crtc_state(state, pcrtc);
5042         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
5043         struct dm_crtc_state *dm_old_crtc_state =
5044                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
5045         int planes_count = 0, vpos, hpos;
5046         unsigned long flags;
5047         struct amdgpu_bo *abo;
5048         uint64_t tiling_flags;
5049         uint32_t target_vblank, last_flip_vblank;
5050         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
5051         bool pflip_present = false;
5052         struct {
5053                 struct dc_surface_update surface_updates[MAX_SURFACES];
5054                 struct dc_plane_info plane_infos[MAX_SURFACES];
5055                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
5056                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
5057                 struct dc_stream_update stream_update;
5058         } *bundle;
5059
5060         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
5061
5062         if (!bundle) {
5063                 dm_error("Failed to allocate update bundle\n");
5064                 goto cleanup;
5065         }
5066
5067         /* update planes when needed */
5068         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
5069                 struct drm_crtc *crtc = new_plane_state->crtc;
5070                 struct drm_crtc_state *new_crtc_state;
5071                 struct drm_framebuffer *fb = new_plane_state->fb;
5072                 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
5073                 bool plane_needs_flip;
5074                 struct dc_plane_state *dc_plane;
5075                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
5076
5077                 /* Cursor plane is handled after stream updates */
5078                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5079                         continue;
5080
5081                 if (!fb || !crtc || pcrtc != crtc)
5082                         continue;
5083
5084                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
5085                 if (!new_crtc_state->active)
5086                         continue;
5087
5088                 dc_plane = dm_new_plane_state->dc_state;
5089
5090                 bundle->surface_updates[planes_count].surface = dc_plane;
5091                 if (new_pcrtc_state->color_mgmt_changed) {
5092                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
5093                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
5094                 }
5095
5096
5097                 bundle->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality;
5098                 bundle->scaling_infos[planes_count].src_rect = dc_plane->src_rect;
5099                 bundle->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
5100                 bundle->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
5101                 bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count];
5102
5103                 fill_plane_color_attributes(
5104                         new_plane_state, dc_plane,
5105                         &bundle->plane_infos[planes_count].color_space);
5106
5107                 bundle->plane_infos[planes_count].format = dc_plane->format;
5108                 bundle->plane_infos[planes_count].plane_size = dc_plane->plane_size;
5109                 bundle->plane_infos[planes_count].rotation = dc_plane->rotation;
5110                 bundle->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
5111                 bundle->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
5112                 bundle->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
5113                 bundle->plane_infos[planes_count].visible = dc_plane->visible;
5114                 bundle->plane_infos[planes_count].global_alpha = dc_plane->global_alpha;
5115                 bundle->plane_infos[planes_count].global_alpha_value = dc_plane->global_alpha_value;
5116                 bundle->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
5117                 bundle->plane_infos[planes_count].dcc = dc_plane->dcc;
5118                 bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count];
5119
5120                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
5121
5122                 pflip_present = pflip_present || plane_needs_flip;
5123
5124                 if (!plane_needs_flip) {
5125                         planes_count += 1;
5126                         continue;
5127                 }
5128
5129                 /*
5130                  * TODO This might fail and hence better not used, wait
5131                  * explicitly on fences instead
5132                  * and in general should be called for
5133                  * blocking commit to as per framework helpers
5134                  */
5135                 abo = gem_to_amdgpu_bo(fb->obj[0]);
5136                 r = amdgpu_bo_reserve(abo, true);
5137                 if (unlikely(r != 0)) {
5138                         DRM_ERROR("failed to reserve buffer before flip\n");
5139                         WARN_ON(1);
5140                 }
5141
5142                 /* Wait for all fences on this FB */
5143                 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
5144                                                                             MAX_SCHEDULE_TIMEOUT) < 0);
5145
5146                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
5147
5148                 amdgpu_bo_unreserve(abo);
5149
5150                 fill_plane_tiling_attributes(dm->adev, afb, dc_plane,
5151                         &bundle->plane_infos[planes_count].tiling_info,
5152                         &bundle->plane_infos[planes_count].dcc,
5153                         &bundle->flip_addrs[planes_count].address,
5154                         tiling_flags);
5155
5156                 bundle->flip_addrs[planes_count].flip_immediate =
5157                                 (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
5158
5159                 timestamp_ns = ktime_get_ns();
5160                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
5161                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
5162                 bundle->surface_updates[planes_count].surface = dc_plane;
5163
5164                 if (!bundle->surface_updates[planes_count].surface) {
5165                         DRM_ERROR("No surface for CRTC: id=%d\n",
5166                                         acrtc_attach->crtc_id);
5167                         continue;
5168                 }
5169
5170                 if (plane == pcrtc->primary)
5171                         update_freesync_state_on_stream(
5172                                 dm,
5173                                 acrtc_state,
5174                                 acrtc_state->stream,
5175                                 dc_plane,
5176                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
5177
5178                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
5179                                  __func__,
5180                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
5181                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
5182
5183                 planes_count += 1;
5184
5185         }
5186
5187         if (pflip_present) {
5188                 if (!vrr_active) {
5189                         /* Use old throttling in non-vrr fixed refresh rate mode
5190                          * to keep flip scheduling based on target vblank counts
5191                          * working in a backwards compatible way, e.g., for
5192                          * clients using the GLX_OML_sync_control extension or
5193                          * DRI3/Present extension with defined target_msc.
5194                          */
5195                         last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
5196                 }
5197                 else {
5198                         /* For variable refresh rate mode only:
5199                          * Get vblank of last completed flip to avoid > 1 vrr
5200                          * flips per video frame by use of throttling, but allow
5201                          * flip programming anywhere in the possibly large
5202                          * variable vrr vblank interval for fine-grained flip
5203                          * timing control and more opportunity to avoid stutter
5204                          * on late submission of flips.
5205                          */
5206                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5207                         last_flip_vblank = acrtc_attach->last_flip_vblank;
5208                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5209                 }
5210
5211                 target_vblank = last_flip_vblank + wait_for_vblank;
5212
5213                 /*
5214                  * Wait until we're out of the vertical blank period before the one
5215                  * targeted by the flip
5216                  */
5217                 while ((acrtc_attach->enabled &&
5218                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
5219                                                             0, &vpos, &hpos, NULL,
5220                                                             NULL, &pcrtc->hwmode)
5221                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
5222                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
5223                         (int)(target_vblank -
5224                           amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
5225                         usleep_range(1000, 1100);
5226                 }
5227
5228                 if (acrtc_attach->base.state->event) {
5229                         drm_crtc_vblank_get(pcrtc);
5230
5231                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5232
5233                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
5234                         prepare_flip_isr(acrtc_attach);
5235
5236                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5237                 }
5238
5239                 if (acrtc_state->stream) {
5240
5241                         if (acrtc_state->freesync_timing_changed)
5242                                 bundle->stream_update.adjust =
5243                                         &acrtc_state->stream->adjust;
5244
5245                         if (acrtc_state->freesync_vrr_info_changed)
5246                                 bundle->stream_update.vrr_infopacket =
5247                                         &acrtc_state->stream->vrr_infopacket;
5248                 }
5249         }
5250
5251         if (planes_count) {
5252                 if (new_pcrtc_state->mode_changed) {
5253                         bundle->stream_update.src = acrtc_state->stream->src;
5254                         bundle->stream_update.dst = acrtc_state->stream->dst;
5255                 }
5256
5257                 if (new_pcrtc_state->color_mgmt_changed)
5258                         bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
5259
5260                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
5261                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
5262                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
5263
5264                 mutex_lock(&dm->dc_lock);
5265                 dc_commit_updates_for_stream(dm->dc,
5266                                                      bundle->surface_updates,
5267                                                      planes_count,
5268                                                      acrtc_state->stream,
5269                                                      &bundle->stream_update,
5270                                                      dc_state);
5271                 mutex_unlock(&dm->dc_lock);
5272         }
5273
5274         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
5275                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5276                         handle_cursor_update(plane, old_plane_state);
5277
5278 cleanup:
5279         kfree(bundle);
5280 }
5281
5282 /*
5283  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
5284  * @crtc_state: the DRM CRTC state
5285  * @stream_state: the DC stream state.
5286  *
5287  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
5288  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
5289  */
5290 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
5291                                                 struct dc_stream_state *stream_state)
5292 {
5293         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
5294 }
5295
5296 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
5297                                    struct drm_atomic_state *state,
5298                                    bool nonblock)
5299 {
5300         struct drm_crtc *crtc;
5301         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5302         struct amdgpu_device *adev = dev->dev_private;
5303         int i;
5304
5305         /*
5306          * We evade vblanks and pflips on crtc that
5307          * should be changed. We do it here to flush & disable
5308          * interrupts before drm_swap_state is called in drm_atomic_helper_commit
5309          * it will update crtc->dm_crtc_state->stream pointer which is used in
5310          * the ISRs.
5311          */
5312         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5313                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5314                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5315                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5316
5317                 if (drm_atomic_crtc_needs_modeset(new_crtc_state)
5318                     && dm_old_crtc_state->stream) {
5319                         /*
5320                          * If the stream is removed and CRC capture was
5321                          * enabled on the CRTC the extra vblank reference
5322                          * needs to be dropped since CRC capture will be
5323                          * disabled.
5324                          */
5325                         if (!dm_new_crtc_state->stream
5326                             && dm_new_crtc_state->crc_enabled) {
5327                                 drm_crtc_vblank_put(crtc);
5328                                 dm_new_crtc_state->crc_enabled = false;
5329                         }
5330
5331                         manage_dm_interrupts(adev, acrtc, false);
5332                 }
5333         }
5334         /*
5335          * Add check here for SoC's that support hardware cursor plane, to
5336          * unset legacy_cursor_update
5337          */
5338
5339         return drm_atomic_helper_commit(dev, state, nonblock);
5340
5341         /*TODO Handle EINTR, reenable IRQ*/
5342 }
5343
5344 /**
5345  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
5346  * @state: The atomic state to commit
5347  *
5348  * This will tell DC to commit the constructed DC state from atomic_check,
5349  * programming the hardware. Any failures here implies a hardware failure, since
5350  * atomic check should have filtered anything non-kosher.
5351  */
5352 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
5353 {
5354         struct drm_device *dev = state->dev;
5355         struct amdgpu_device *adev = dev->dev_private;
5356         struct amdgpu_display_manager *dm = &adev->dm;
5357         struct dm_atomic_state *dm_state;
5358         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
5359         uint32_t i, j;
5360         struct drm_crtc *crtc;
5361         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5362         unsigned long flags;
5363         bool wait_for_vblank = true;
5364         struct drm_connector *connector;
5365         struct drm_connector_state *old_con_state, *new_con_state;
5366         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5367         int crtc_disable_count = 0;
5368
5369         drm_atomic_helper_update_legacy_modeset_state(dev, state);
5370
5371         dm_state = dm_atomic_get_new_state(state);
5372         if (dm_state && dm_state->context) {
5373                 dc_state = dm_state->context;
5374         } else {
5375                 /* No state changes, retain current state. */
5376                 dc_state_temp = dc_create_state(dm->dc);
5377                 ASSERT(dc_state_temp);
5378                 dc_state = dc_state_temp;
5379                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
5380         }
5381
5382         /* update changed items */
5383         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5384                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5385
5386                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5387                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5388
5389                 DRM_DEBUG_DRIVER(
5390                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5391                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5392                         "connectors_changed:%d\n",
5393                         acrtc->crtc_id,
5394                         new_crtc_state->enable,
5395                         new_crtc_state->active,
5396                         new_crtc_state->planes_changed,
5397                         new_crtc_state->mode_changed,
5398                         new_crtc_state->active_changed,
5399                         new_crtc_state->connectors_changed);
5400
5401                 /* Copy all transient state flags into dc state */
5402                 if (dm_new_crtc_state->stream) {
5403                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
5404                                                             dm_new_crtc_state->stream);
5405                 }
5406
5407                 /* handles headless hotplug case, updating new_state and
5408                  * aconnector as needed
5409                  */
5410
5411                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
5412
5413                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
5414
5415                         if (!dm_new_crtc_state->stream) {
5416                                 /*
5417                                  * this could happen because of issues with
5418                                  * userspace notifications delivery.
5419                                  * In this case userspace tries to set mode on
5420                                  * display which is disconnected in fact.
5421                                  * dc_sink is NULL in this case on aconnector.
5422                                  * We expect reset mode will come soon.
5423                                  *
5424                                  * This can also happen when unplug is done
5425                                  * during resume sequence ended
5426                                  *
5427                                  * In this case, we want to pretend we still
5428                                  * have a sink to keep the pipe running so that
5429                                  * hw state is consistent with the sw state
5430                                  */
5431                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5432                                                 __func__, acrtc->base.base.id);
5433                                 continue;
5434                         }
5435
5436                         if (dm_old_crtc_state->stream)
5437                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5438
5439                         pm_runtime_get_noresume(dev->dev);
5440
5441                         acrtc->enabled = true;
5442                         acrtc->hw_mode = new_crtc_state->mode;
5443                         crtc->hwmode = new_crtc_state->mode;
5444                 } else if (modereset_required(new_crtc_state)) {
5445                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
5446
5447                         /* i.e. reset mode */
5448                         if (dm_old_crtc_state->stream)
5449                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
5450                 }
5451         } /* for_each_crtc_in_state() */
5452
5453         if (dc_state) {
5454                 dm_enable_per_frame_crtc_master_sync(dc_state);
5455                 mutex_lock(&dm->dc_lock);
5456                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
5457                 mutex_unlock(&dm->dc_lock);
5458         }
5459
5460         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5461                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5462
5463                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5464
5465                 if (dm_new_crtc_state->stream != NULL) {
5466                         const struct dc_stream_status *status =
5467                                         dc_stream_get_status(dm_new_crtc_state->stream);
5468
5469                         if (!status)
5470                                 status = dc_stream_get_status_from_state(dc_state,
5471                                                                          dm_new_crtc_state->stream);
5472
5473                         if (!status)
5474                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
5475                         else
5476                                 acrtc->otg_inst = status->primary_otg_inst;
5477                 }
5478         }
5479
5480         /* Handle connector state changes */
5481         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5482                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
5483                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
5484                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
5485                 struct dc_surface_update dummy_updates[MAX_SURFACES];
5486                 struct dc_stream_update stream_update;
5487                 struct dc_stream_status *status = NULL;
5488
5489                 memset(&dummy_updates, 0, sizeof(dummy_updates));
5490                 memset(&stream_update, 0, sizeof(stream_update));
5491
5492                 if (acrtc) {
5493                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
5494                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
5495                 }
5496
5497                 /* Skip any modesets/resets */
5498                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
5499                         continue;
5500
5501                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5502                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5503
5504                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
5505                                 (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
5506                         continue;
5507
5508                 if (is_scaling_state_different(dm_new_con_state, dm_old_con_state)) {
5509                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
5510                                         dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
5511
5512                         stream_update.src = dm_new_crtc_state->stream->src;
5513                         stream_update.dst = dm_new_crtc_state->stream->dst;
5514                 }
5515
5516                 if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
5517                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
5518
5519                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
5520                 }
5521
5522                 status = dc_stream_get_status(dm_new_crtc_state->stream);
5523                 WARN_ON(!status);
5524                 WARN_ON(!status->plane_count);
5525
5526                 /*
5527                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
5528                  * Here we create an empty update on each plane.
5529                  * To fix this, DC should permit updating only stream properties.
5530                  */
5531                 for (j = 0; j < status->plane_count; j++)
5532                         dummy_updates[j].surface = status->plane_states[0];
5533
5534
5535                 mutex_lock(&dm->dc_lock);
5536                 dc_commit_updates_for_stream(dm->dc,
5537                                                      dummy_updates,
5538                                                      status->plane_count,
5539                                                      dm_new_crtc_state->stream,
5540                                                      &stream_update,
5541                                                      dc_state);
5542                 mutex_unlock(&dm->dc_lock);
5543         }
5544
5545         /* Update freesync state before amdgpu_dm_handle_vrr_transition(). */
5546         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5547                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5548                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
5549         }
5550
5551         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
5552                         new_crtc_state, i) {
5553                 /*
5554                  * loop to enable interrupts on newly arrived crtc
5555                  */
5556                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5557                 bool modeset_needed;
5558
5559                 if (old_crtc_state->active && !new_crtc_state->active)
5560                         crtc_disable_count++;
5561
5562                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5563                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5564
5565                 /* Handle vrr on->off / off->on transitions */
5566                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
5567                                                 dm_new_crtc_state);
5568
5569                 modeset_needed = modeset_required(
5570                                 new_crtc_state,
5571                                 dm_new_crtc_state->stream,
5572                                 dm_old_crtc_state->stream);
5573
5574                 if (dm_new_crtc_state->stream == NULL || !modeset_needed)
5575                         continue;
5576
5577                 manage_dm_interrupts(adev, acrtc, true);
5578
5579 #ifdef CONFIG_DEBUG_FS
5580                 /* The stream has changed so CRC capture needs to re-enabled. */
5581                 if (dm_new_crtc_state->crc_enabled)
5582                         amdgpu_dm_crtc_set_crc_source(crtc, "auto");
5583 #endif
5584         }
5585
5586         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
5587                 if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
5588                         wait_for_vblank = false;
5589
5590         /* update planes when needed per crtc*/
5591         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
5592                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5593
5594                 if (dm_new_crtc_state->stream)
5595                         amdgpu_dm_commit_planes(state, dc_state, dev,
5596                                                 dm, crtc, wait_for_vblank);
5597         }
5598
5599
5600         /*
5601          * send vblank event on all events not handled in flip and
5602          * mark consumed event for drm_atomic_helper_commit_hw_done
5603          */
5604         spin_lock_irqsave(&adev->ddev->event_lock, flags);
5605         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
5606
5607                 if (new_crtc_state->event)
5608                         drm_send_event_locked(dev, &new_crtc_state->event->base);
5609
5610                 new_crtc_state->event = NULL;
5611         }
5612         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5613
5614         /* Signal HW programming completion */
5615         drm_atomic_helper_commit_hw_done(state);
5616
5617         if (wait_for_vblank)
5618                 drm_atomic_helper_wait_for_flip_done(dev, state);
5619
5620         drm_atomic_helper_cleanup_planes(dev, state);
5621
5622         /*
5623          * Finally, drop a runtime PM reference for each newly disabled CRTC,
5624          * so we can put the GPU into runtime suspend if we're not driving any
5625          * displays anymore
5626          */
5627         for (i = 0; i < crtc_disable_count; i++)
5628                 pm_runtime_put_autosuspend(dev->dev);
5629         pm_runtime_mark_last_busy(dev->dev);
5630
5631         if (dc_state_temp)
5632                 dc_release_state(dc_state_temp);
5633 }
5634
5635
5636 static int dm_force_atomic_commit(struct drm_connector *connector)
5637 {
5638         int ret = 0;
5639         struct drm_device *ddev = connector->dev;
5640         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
5641         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5642         struct drm_plane *plane = disconnected_acrtc->base.primary;
5643         struct drm_connector_state *conn_state;
5644         struct drm_crtc_state *crtc_state;
5645         struct drm_plane_state *plane_state;
5646
5647         if (!state)
5648                 return -ENOMEM;
5649
5650         state->acquire_ctx = ddev->mode_config.acquire_ctx;
5651
5652         /* Construct an atomic state to restore previous display setting */
5653
5654         /*
5655          * Attach connectors to drm_atomic_state
5656          */
5657         conn_state = drm_atomic_get_connector_state(state, connector);
5658
5659         ret = PTR_ERR_OR_ZERO(conn_state);
5660         if (ret)
5661                 goto err;
5662
5663         /* Attach crtc to drm_atomic_state*/
5664         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
5665
5666         ret = PTR_ERR_OR_ZERO(crtc_state);
5667         if (ret)
5668                 goto err;
5669
5670         /* force a restore */
5671         crtc_state->mode_changed = true;
5672
5673         /* Attach plane to drm_atomic_state */
5674         plane_state = drm_atomic_get_plane_state(state, plane);
5675
5676         ret = PTR_ERR_OR_ZERO(plane_state);
5677         if (ret)
5678                 goto err;
5679
5680
5681         /* Call commit internally with the state we just constructed */
5682         ret = drm_atomic_commit(state);
5683         if (!ret)
5684                 return 0;
5685
5686 err:
5687         DRM_ERROR("Restoring old state failed with %i\n", ret);
5688         drm_atomic_state_put(state);
5689
5690         return ret;
5691 }
5692
5693 /*
5694  * This function handles all cases when set mode does not come upon hotplug.
5695  * This includes when a display is unplugged then plugged back into the
5696  * same port and when running without usermode desktop manager supprot
5697  */
5698 void dm_restore_drm_connector_state(struct drm_device *dev,
5699                                     struct drm_connector *connector)
5700 {
5701         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5702         struct amdgpu_crtc *disconnected_acrtc;
5703         struct dm_crtc_state *acrtc_state;
5704
5705         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
5706                 return;
5707
5708         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
5709         if (!disconnected_acrtc)
5710                 return;
5711
5712         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
5713         if (!acrtc_state->stream)
5714                 return;
5715
5716         /*
5717          * If the previous sink is not released and different from the current,
5718          * we deduce we are in a state where we can not rely on usermode call
5719          * to turn on the display, so we do it here
5720          */
5721         if (acrtc_state->stream->sink != aconnector->dc_sink)
5722                 dm_force_atomic_commit(&aconnector->base);
5723 }
5724
5725 /*
5726  * Grabs all modesetting locks to serialize against any blocking commits,
5727  * Waits for completion of all non blocking commits.
5728  */
5729 static int do_aquire_global_lock(struct drm_device *dev,
5730                                  struct drm_atomic_state *state)
5731 {
5732         struct drm_crtc *crtc;
5733         struct drm_crtc_commit *commit;
5734         long ret;
5735
5736         /*
5737          * Adding all modeset locks to aquire_ctx will
5738          * ensure that when the framework release it the
5739          * extra locks we are locking here will get released to
5740          */
5741         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
5742         if (ret)
5743                 return ret;
5744
5745         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5746                 spin_lock(&crtc->commit_lock);
5747                 commit = list_first_entry_or_null(&crtc->commit_list,
5748                                 struct drm_crtc_commit, commit_entry);
5749                 if (commit)
5750                         drm_crtc_commit_get(commit);
5751                 spin_unlock(&crtc->commit_lock);
5752
5753                 if (!commit)
5754                         continue;
5755
5756                 /*
5757                  * Make sure all pending HW programming completed and
5758                  * page flips done
5759                  */
5760                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
5761
5762                 if (ret > 0)
5763                         ret = wait_for_completion_interruptible_timeout(
5764                                         &commit->flip_done, 10*HZ);
5765
5766                 if (ret == 0)
5767                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
5768                                   "timed out\n", crtc->base.id, crtc->name);
5769
5770                 drm_crtc_commit_put(commit);
5771         }
5772
5773         return ret < 0 ? ret : 0;
5774 }
5775
5776 static void get_freesync_config_for_crtc(
5777         struct dm_crtc_state *new_crtc_state,
5778         struct dm_connector_state *new_con_state)
5779 {
5780         struct mod_freesync_config config = {0};
5781         struct amdgpu_dm_connector *aconnector =
5782                         to_amdgpu_dm_connector(new_con_state->base.connector);
5783         struct drm_display_mode *mode = &new_crtc_state->base.mode;
5784
5785         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
5786                 aconnector->min_vfreq <= drm_mode_vrefresh(mode);
5787
5788         if (new_crtc_state->vrr_supported) {
5789                 new_crtc_state->stream->ignore_msa_timing_param = true;
5790                 config.state = new_crtc_state->base.vrr_enabled ?
5791                                 VRR_STATE_ACTIVE_VARIABLE :
5792                                 VRR_STATE_INACTIVE;
5793                 config.min_refresh_in_uhz =
5794                                 aconnector->min_vfreq * 1000000;
5795                 config.max_refresh_in_uhz =
5796                                 aconnector->max_vfreq * 1000000;
5797                 config.vsif_supported = true;
5798                 config.btr = true;
5799         }
5800
5801         new_crtc_state->freesync_config = config;
5802 }
5803
5804 static void reset_freesync_config_for_crtc(
5805         struct dm_crtc_state *new_crtc_state)
5806 {
5807         new_crtc_state->vrr_supported = false;
5808
5809         memset(&new_crtc_state->vrr_params, 0,
5810                sizeof(new_crtc_state->vrr_params));
5811         memset(&new_crtc_state->vrr_infopacket, 0,
5812                sizeof(new_crtc_state->vrr_infopacket));
5813 }
5814
5815 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
5816                                 struct drm_atomic_state *state,
5817                                 struct drm_crtc *crtc,
5818                                 struct drm_crtc_state *old_crtc_state,
5819                                 struct drm_crtc_state *new_crtc_state,
5820                                 bool enable,
5821                                 bool *lock_and_validation_needed)
5822 {
5823         struct dm_atomic_state *dm_state = NULL;
5824         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5825         struct dc_stream_state *new_stream;
5826         int ret = 0;
5827
5828         /*
5829          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
5830          * update changed items
5831          */
5832         struct amdgpu_crtc *acrtc = NULL;
5833         struct amdgpu_dm_connector *aconnector = NULL;
5834         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
5835         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
5836         struct drm_plane_state *new_plane_state = NULL;
5837
5838         new_stream = NULL;
5839
5840         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
5841         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
5842         acrtc = to_amdgpu_crtc(crtc);
5843
5844         new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
5845
5846         if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
5847                 ret = -EINVAL;
5848                 goto fail;
5849         }
5850
5851         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
5852
5853         /* TODO This hack should go away */
5854         if (aconnector && enable) {
5855                 /* Make sure fake sink is created in plug-in scenario */
5856                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
5857                                                             &aconnector->base);
5858                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
5859                                                             &aconnector->base);
5860
5861                 if (IS_ERR(drm_new_conn_state)) {
5862                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
5863                         goto fail;
5864                 }
5865
5866                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
5867                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
5868
5869                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5870                         goto skip_modeset;
5871
5872                 new_stream = create_stream_for_sink(aconnector,
5873                                                      &new_crtc_state->mode,
5874                                                     dm_new_conn_state,
5875                                                     dm_old_crtc_state->stream);
5876
5877                 /*
5878                  * we can have no stream on ACTION_SET if a display
5879                  * was disconnected during S3, in this case it is not an
5880                  * error, the OS will be updated after detection, and
5881                  * will do the right thing on next atomic commit
5882                  */
5883
5884                 if (!new_stream) {
5885                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
5886                                         __func__, acrtc->base.base.id);
5887                         ret = -ENOMEM;
5888                         goto fail;
5889                 }
5890
5891                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
5892
5893                 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
5894                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
5895                         new_crtc_state->mode_changed = false;
5896                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
5897                                          new_crtc_state->mode_changed);
5898                 }
5899         }
5900
5901         /* mode_changed flag may get updated above, need to check again */
5902         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5903                 goto skip_modeset;
5904
5905         DRM_DEBUG_DRIVER(
5906                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
5907                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
5908                 "connectors_changed:%d\n",
5909                 acrtc->crtc_id,
5910                 new_crtc_state->enable,
5911                 new_crtc_state->active,
5912                 new_crtc_state->planes_changed,
5913                 new_crtc_state->mode_changed,
5914                 new_crtc_state->active_changed,
5915                 new_crtc_state->connectors_changed);
5916
5917         /* Remove stream for any changed/disabled CRTC */
5918         if (!enable) {
5919
5920                 if (!dm_old_crtc_state->stream)
5921                         goto skip_modeset;
5922
5923                 ret = dm_atomic_get_state(state, &dm_state);
5924                 if (ret)
5925                         goto fail;
5926
5927                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5928                                 crtc->base.id);
5929
5930                 /* i.e. reset mode */
5931                 if (dc_remove_stream_from_ctx(
5932                                 dm->dc,
5933                                 dm_state->context,
5934                                 dm_old_crtc_state->stream) != DC_OK) {
5935                         ret = -EINVAL;
5936                         goto fail;
5937                 }
5938
5939                 dc_stream_release(dm_old_crtc_state->stream);
5940                 dm_new_crtc_state->stream = NULL;
5941
5942                 reset_freesync_config_for_crtc(dm_new_crtc_state);
5943
5944                 *lock_and_validation_needed = true;
5945
5946         } else {/* Add stream for any updated/enabled CRTC */
5947                 /*
5948                  * Quick fix to prevent NULL pointer on new_stream when
5949                  * added MST connectors not found in existing crtc_state in the chained mode
5950                  * TODO: need to dig out the root cause of that
5951                  */
5952                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
5953                         goto skip_modeset;
5954
5955                 if (modereset_required(new_crtc_state))
5956                         goto skip_modeset;
5957
5958                 if (modeset_required(new_crtc_state, new_stream,
5959                                      dm_old_crtc_state->stream)) {
5960
5961                         WARN_ON(dm_new_crtc_state->stream);
5962
5963                         ret = dm_atomic_get_state(state, &dm_state);
5964                         if (ret)
5965                                 goto fail;
5966
5967                         dm_new_crtc_state->stream = new_stream;
5968
5969                         dc_stream_retain(new_stream);
5970
5971                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
5972                                                 crtc->base.id);
5973
5974                         if (dc_add_stream_to_ctx(
5975                                         dm->dc,
5976                                         dm_state->context,
5977                                         dm_new_crtc_state->stream) != DC_OK) {
5978                                 ret = -EINVAL;
5979                                 goto fail;
5980                         }
5981
5982                         *lock_and_validation_needed = true;
5983                 }
5984         }
5985
5986 skip_modeset:
5987         /* Release extra reference */
5988         if (new_stream)
5989                  dc_stream_release(new_stream);
5990
5991         /*
5992          * We want to do dc stream updates that do not require a
5993          * full modeset below.
5994          */
5995         if (!(enable && aconnector && new_crtc_state->enable &&
5996               new_crtc_state->active))
5997                 return 0;
5998         /*
5999          * Given above conditions, the dc state cannot be NULL because:
6000          * 1. We're in the process of enabling CRTCs (just been added
6001          *    to the dc context, or already is on the context)
6002          * 2. Has a valid connector attached, and
6003          * 3. Is currently active and enabled.
6004          * => The dc stream state currently exists.
6005          */
6006         BUG_ON(dm_new_crtc_state->stream == NULL);
6007
6008         /* Scaling or underscan settings */
6009         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
6010                 update_stream_scaling_settings(
6011                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
6012
6013         /* ABM settings */
6014         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6015
6016         /*
6017          * Color management settings. We also update color properties
6018          * when a modeset is needed, to ensure it gets reprogrammed.
6019          */
6020         if (dm_new_crtc_state->base.color_mgmt_changed ||
6021             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
6022                 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
6023                 if (ret)
6024                         goto fail;
6025                 amdgpu_dm_set_ctm(dm_new_crtc_state);
6026         }
6027
6028         /* Update Freesync settings. */
6029         get_freesync_config_for_crtc(dm_new_crtc_state,
6030                                      dm_new_conn_state);
6031
6032         return ret;
6033
6034 fail:
6035         if (new_stream)
6036                 dc_stream_release(new_stream);
6037         return ret;
6038 }
6039
6040 static int dm_update_plane_state(struct dc *dc,
6041                                  struct drm_atomic_state *state,
6042                                  struct drm_plane *plane,
6043                                  struct drm_plane_state *old_plane_state,
6044                                  struct drm_plane_state *new_plane_state,
6045                                  bool enable,
6046                                  bool *lock_and_validation_needed)
6047 {
6048
6049         struct dm_atomic_state *dm_state = NULL;
6050         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6051         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6052         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
6053         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
6054         /* TODO return page_flip_needed() function */
6055         bool pflip_needed  = !state->allow_modeset;
6056         int ret = 0;
6057
6058
6059         new_plane_crtc = new_plane_state->crtc;
6060         old_plane_crtc = old_plane_state->crtc;
6061         dm_new_plane_state = to_dm_plane_state(new_plane_state);
6062         dm_old_plane_state = to_dm_plane_state(old_plane_state);
6063
6064         /*TODO Implement atomic check for cursor plane */
6065         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6066                 return 0;
6067
6068         /* Remove any changed/removed planes */
6069         if (!enable) {
6070                 if (pflip_needed &&
6071                     plane->type != DRM_PLANE_TYPE_OVERLAY)
6072                         return 0;
6073
6074                 if (!old_plane_crtc)
6075                         return 0;
6076
6077                 old_crtc_state = drm_atomic_get_old_crtc_state(
6078                                 state, old_plane_crtc);
6079                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6080
6081                 if (!dm_old_crtc_state->stream)
6082                         return 0;
6083
6084                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
6085                                 plane->base.id, old_plane_crtc->base.id);
6086
6087                 ret = dm_atomic_get_state(state, &dm_state);
6088                 if (ret)
6089                         return ret;
6090
6091                 if (!dc_remove_plane_from_context(
6092                                 dc,
6093                                 dm_old_crtc_state->stream,
6094                                 dm_old_plane_state->dc_state,
6095                                 dm_state->context)) {
6096
6097                         ret = EINVAL;
6098                         return ret;
6099                 }
6100
6101
6102                 dc_plane_state_release(dm_old_plane_state->dc_state);
6103                 dm_new_plane_state->dc_state = NULL;
6104
6105                 *lock_and_validation_needed = true;
6106
6107         } else { /* Add new planes */
6108                 struct dc_plane_state *dc_new_plane_state;
6109
6110                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
6111                         return 0;
6112
6113                 if (!new_plane_crtc)
6114                         return 0;
6115
6116                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
6117                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6118
6119                 if (!dm_new_crtc_state->stream)
6120                         return 0;
6121
6122                 if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
6123                         return 0;
6124
6125                 WARN_ON(dm_new_plane_state->dc_state);
6126
6127                 dc_new_plane_state = dc_create_plane_state(dc);
6128                 if (!dc_new_plane_state)
6129                         return -ENOMEM;
6130
6131                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
6132                                 plane->base.id, new_plane_crtc->base.id);
6133
6134                 ret = fill_plane_attributes(
6135                         new_plane_crtc->dev->dev_private,
6136                         dc_new_plane_state,
6137                         new_plane_state,
6138                         new_crtc_state);
6139                 if (ret) {
6140                         dc_plane_state_release(dc_new_plane_state);
6141                         return ret;
6142                 }
6143
6144                 ret = dm_atomic_get_state(state, &dm_state);
6145                 if (ret) {
6146                         dc_plane_state_release(dc_new_plane_state);
6147                         return ret;
6148                 }
6149
6150                 /*
6151                  * Any atomic check errors that occur after this will
6152                  * not need a release. The plane state will be attached
6153                  * to the stream, and therefore part of the atomic
6154                  * state. It'll be released when the atomic state is
6155                  * cleaned.
6156                  */
6157                 if (!dc_add_plane_to_context(
6158                                 dc,
6159                                 dm_new_crtc_state->stream,
6160                                 dc_new_plane_state,
6161                                 dm_state->context)) {
6162
6163                         dc_plane_state_release(dc_new_plane_state);
6164                         return -EINVAL;
6165                 }
6166
6167                 dm_new_plane_state->dc_state = dc_new_plane_state;
6168
6169                 /* Tell DC to do a full surface update every time there
6170                  * is a plane change. Inefficient, but works for now.
6171                  */
6172                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
6173
6174                 *lock_and_validation_needed = true;
6175         }
6176
6177
6178         return ret;
6179 }
6180
6181 static int
6182 dm_determine_update_type_for_commit(struct dc *dc,
6183                                     struct drm_atomic_state *state,
6184                                     enum surface_update_type *out_type)
6185 {
6186         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
6187         int i, j, num_plane, ret = 0;
6188         struct drm_plane_state *old_plane_state, *new_plane_state;
6189         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
6190         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6191         struct drm_plane *plane;
6192
6193         struct drm_crtc *crtc;
6194         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
6195         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
6196         struct dc_stream_status *status = NULL;
6197
6198         struct dc_surface_update *updates;
6199         struct dc_plane_state *surface;
6200         enum surface_update_type update_type = UPDATE_TYPE_FAST;
6201
6202         updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
6203         surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
6204
6205         if (!updates || !surface) {
6206                 DRM_ERROR("Plane or surface update failed to allocate");
6207                 /* Set type to FULL to avoid crashing in DC*/
6208                 update_type = UPDATE_TYPE_FULL;
6209                 goto cleanup;
6210         }
6211
6212         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6213                 struct dc_stream_update stream_update;
6214
6215                 memset(&stream_update, 0, sizeof(stream_update));
6216
6217                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6218                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
6219                 num_plane = 0;
6220
6221                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
6222                         update_type = UPDATE_TYPE_FULL;
6223                         goto cleanup;
6224                 }
6225
6226                 if (!new_dm_crtc_state->stream)
6227                         continue;
6228
6229                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
6230                         new_plane_crtc = new_plane_state->crtc;
6231                         old_plane_crtc = old_plane_state->crtc;
6232                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
6233                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
6234
6235                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6236                                 continue;
6237
6238                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
6239                                 update_type = UPDATE_TYPE_FULL;
6240                                 goto cleanup;
6241                         }
6242
6243                         if (!state->allow_modeset)
6244                                 continue;
6245
6246                         if (crtc != new_plane_crtc)
6247                                 continue;
6248
6249                         updates[num_plane].surface = &surface[num_plane];
6250
6251                         if (new_crtc_state->mode_changed) {
6252                                 updates[num_plane].surface->src_rect =
6253                                                 new_dm_plane_state->dc_state->src_rect;
6254                                 updates[num_plane].surface->dst_rect =
6255                                                 new_dm_plane_state->dc_state->dst_rect;
6256                                 updates[num_plane].surface->rotation =
6257                                                 new_dm_plane_state->dc_state->rotation;
6258                                 updates[num_plane].surface->in_transfer_func =
6259                                                 new_dm_plane_state->dc_state->in_transfer_func;
6260                                 stream_update.dst = new_dm_crtc_state->stream->dst;
6261                                 stream_update.src = new_dm_crtc_state->stream->src;
6262                         }
6263
6264                         if (new_crtc_state->color_mgmt_changed) {
6265                                 updates[num_plane].gamma =
6266                                                 new_dm_plane_state->dc_state->gamma_correction;
6267                                 updates[num_plane].in_transfer_func =
6268                                                 new_dm_plane_state->dc_state->in_transfer_func;
6269                                 stream_update.gamut_remap =
6270                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
6271                                 stream_update.out_transfer_func =
6272                                                 new_dm_crtc_state->stream->out_transfer_func;
6273                         }
6274
6275                         num_plane++;
6276                 }
6277
6278                 if (num_plane == 0)
6279                         continue;
6280
6281                 ret = dm_atomic_get_state(state, &dm_state);
6282                 if (ret)
6283                         goto cleanup;
6284
6285                 old_dm_state = dm_atomic_get_old_state(state);
6286                 if (!old_dm_state) {
6287                         ret = -EINVAL;
6288                         goto cleanup;
6289                 }
6290
6291                 status = dc_stream_get_status_from_state(old_dm_state->context,
6292                                                          new_dm_crtc_state->stream);
6293
6294                 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
6295                                                                   &stream_update, status);
6296
6297                 if (update_type > UPDATE_TYPE_MED) {
6298                         update_type = UPDATE_TYPE_FULL;
6299                         goto cleanup;
6300                 }
6301         }
6302
6303 cleanup:
6304         kfree(updates);
6305         kfree(surface);
6306
6307         *out_type = update_type;
6308         return ret;
6309 }
6310
6311 /**
6312  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
6313  * @dev: The DRM device
6314  * @state: The atomic state to commit
6315  *
6316  * Validate that the given atomic state is programmable by DC into hardware.
6317  * This involves constructing a &struct dc_state reflecting the new hardware
6318  * state we wish to commit, then querying DC to see if it is programmable. It's
6319  * important not to modify the existing DC state. Otherwise, atomic_check
6320  * may unexpectedly commit hardware changes.
6321  *
6322  * When validating the DC state, it's important that the right locks are
6323  * acquired. For full updates case which removes/adds/updates streams on one
6324  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
6325  * that any such full update commit will wait for completion of any outstanding
6326  * flip using DRMs synchronization events. See
6327  * dm_determine_update_type_for_commit()
6328  *
6329  * Note that DM adds the affected connectors for all CRTCs in state, when that
6330  * might not seem necessary. This is because DC stream creation requires the
6331  * DC sink, which is tied to the DRM connector state. Cleaning this up should
6332  * be possible but non-trivial - a possible TODO item.
6333  *
6334  * Return: -Error code if validation failed.
6335  */
6336 static int amdgpu_dm_atomic_check(struct drm_device *dev,
6337                                   struct drm_atomic_state *state)
6338 {
6339         struct amdgpu_device *adev = dev->dev_private;
6340         struct dm_atomic_state *dm_state = NULL;
6341         struct dc *dc = adev->dm.dc;
6342         struct drm_connector *connector;
6343         struct drm_connector_state *old_con_state, *new_con_state;
6344         struct drm_crtc *crtc;
6345         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6346         struct drm_plane *plane;
6347         struct drm_plane_state *old_plane_state, *new_plane_state;
6348         enum surface_update_type update_type = UPDATE_TYPE_FAST;
6349         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
6350
6351         int ret, i;
6352
6353         /*
6354          * This bool will be set for true for any modeset/reset
6355          * or plane update which implies non fast surface update.
6356          */
6357         bool lock_and_validation_needed = false;
6358
6359         ret = drm_atomic_helper_check_modeset(dev, state);
6360         if (ret)
6361                 goto fail;
6362
6363         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6364                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
6365                     !new_crtc_state->color_mgmt_changed &&
6366                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
6367                         continue;
6368
6369                 if (!new_crtc_state->enable)
6370                         continue;
6371
6372                 ret = drm_atomic_add_affected_connectors(state, crtc);
6373                 if (ret)
6374                         return ret;
6375
6376                 ret = drm_atomic_add_affected_planes(state, crtc);
6377                 if (ret)
6378                         goto fail;
6379         }
6380
6381         /*
6382          * Add all primary and overlay planes on the CRTC to the state
6383          * whenever a plane is enabled to maintain correct z-ordering
6384          * and to enable fast surface updates.
6385          */
6386         drm_for_each_crtc(crtc, dev) {
6387                 bool modified = false;
6388
6389                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6390                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6391                                 continue;
6392
6393                         if (new_plane_state->crtc == crtc ||
6394                             old_plane_state->crtc == crtc) {
6395                                 modified = true;
6396                                 break;
6397                         }
6398                 }
6399
6400                 if (!modified)
6401                         continue;
6402
6403                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
6404                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
6405                                 continue;
6406
6407                         new_plane_state =
6408                                 drm_atomic_get_plane_state(state, plane);
6409
6410                         if (IS_ERR(new_plane_state)) {
6411                                 ret = PTR_ERR(new_plane_state);
6412                                 goto fail;
6413                         }
6414                 }
6415         }
6416
6417         /* Remove exiting planes if they are modified */
6418         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6419                 ret = dm_update_plane_state(dc, state, plane,
6420                                             old_plane_state,
6421                                             new_plane_state,
6422                                             false,
6423                                             &lock_and_validation_needed);
6424                 if (ret)
6425                         goto fail;
6426         }
6427
6428         /* Disable all crtcs which require disable */
6429         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6430                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6431                                            old_crtc_state,
6432                                            new_crtc_state,
6433                                            false,
6434                                            &lock_and_validation_needed);
6435                 if (ret)
6436                         goto fail;
6437         }
6438
6439         /* Enable all crtcs which require enable */
6440         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6441                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
6442                                            old_crtc_state,
6443                                            new_crtc_state,
6444                                            true,
6445                                            &lock_and_validation_needed);
6446                 if (ret)
6447                         goto fail;
6448         }
6449
6450         /* Add new/modified planes */
6451         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
6452                 ret = dm_update_plane_state(dc, state, plane,
6453                                             old_plane_state,
6454                                             new_plane_state,
6455                                             true,
6456                                             &lock_and_validation_needed);
6457                 if (ret)
6458                         goto fail;
6459         }
6460
6461         /* Run this here since we want to validate the streams we created */
6462         ret = drm_atomic_helper_check_planes(dev, state);
6463         if (ret)
6464                 goto fail;
6465
6466         /* Check scaling and underscan changes*/
6467         /* TODO Removed scaling changes validation due to inability to commit
6468          * new stream into context w\o causing full reset. Need to
6469          * decide how to handle.
6470          */
6471         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6472                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6473                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6474                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6475
6476                 /* Skip any modesets/resets */
6477                 if (!acrtc || drm_atomic_crtc_needs_modeset(
6478                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
6479                         continue;
6480
6481                 /* Skip any thing not scale or underscan changes */
6482                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
6483                         continue;
6484
6485                 overall_update_type = UPDATE_TYPE_FULL;
6486                 lock_and_validation_needed = true;
6487         }
6488
6489         ret = dm_determine_update_type_for_commit(dc, state, &update_type);
6490         if (ret)
6491                 goto fail;
6492
6493         if (overall_update_type < update_type)
6494                 overall_update_type = update_type;
6495
6496         /*
6497          * lock_and_validation_needed was an old way to determine if we need to set
6498          * the global lock. Leaving it in to check if we broke any corner cases
6499          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
6500          * lock_and_validation_needed false = UPDATE_TYPE_FAST
6501          */
6502         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
6503                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
6504         else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
6505                 WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
6506
6507
6508         if (overall_update_type > UPDATE_TYPE_FAST) {
6509                 ret = dm_atomic_get_state(state, &dm_state);
6510                 if (ret)
6511                         goto fail;
6512
6513                 ret = do_aquire_global_lock(dev, state);
6514                 if (ret)
6515                         goto fail;
6516
6517                 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
6518                         ret = -EINVAL;
6519                         goto fail;
6520                 }
6521         } else if (state->legacy_cursor_update) {
6522                 /*
6523                  * This is a fast cursor update coming from the plane update
6524                  * helper, check if it can be done asynchronously for better
6525                  * performance.
6526                  */
6527                 state->async_update = !drm_atomic_helper_async_check(dev, state);
6528         }
6529
6530         /* Must be success */
6531         WARN_ON(ret);
6532         return ret;
6533
6534 fail:
6535         if (ret == -EDEADLK)
6536                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
6537         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
6538                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
6539         else
6540                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
6541
6542         return ret;
6543 }
6544
6545 static bool is_dp_capable_without_timing_msa(struct dc *dc,
6546                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
6547 {
6548         uint8_t dpcd_data;
6549         bool capable = false;
6550
6551         if (amdgpu_dm_connector->dc_link &&
6552                 dm_helpers_dp_read_dpcd(
6553                                 NULL,
6554                                 amdgpu_dm_connector->dc_link,
6555                                 DP_DOWN_STREAM_PORT_COUNT,
6556                                 &dpcd_data,
6557                                 sizeof(dpcd_data))) {
6558                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
6559         }
6560
6561         return capable;
6562 }
6563 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
6564                                         struct edid *edid)
6565 {
6566         int i;
6567         bool edid_check_required;
6568         struct detailed_timing *timing;
6569         struct detailed_non_pixel *data;
6570         struct detailed_data_monitor_range *range;
6571         struct amdgpu_dm_connector *amdgpu_dm_connector =
6572                         to_amdgpu_dm_connector(connector);
6573         struct dm_connector_state *dm_con_state = NULL;
6574
6575         struct drm_device *dev = connector->dev;
6576         struct amdgpu_device *adev = dev->dev_private;
6577         bool freesync_capable = false;
6578
6579         if (!connector->state) {
6580                 DRM_ERROR("%s - Connector has no state", __func__);
6581                 goto update;
6582         }
6583
6584         if (!edid) {
6585                 dm_con_state = to_dm_connector_state(connector->state);
6586
6587                 amdgpu_dm_connector->min_vfreq = 0;
6588                 amdgpu_dm_connector->max_vfreq = 0;
6589                 amdgpu_dm_connector->pixel_clock_mhz = 0;
6590
6591                 goto update;
6592         }
6593
6594         dm_con_state = to_dm_connector_state(connector->state);
6595
6596         edid_check_required = false;
6597         if (!amdgpu_dm_connector->dc_sink) {
6598                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
6599                 goto update;
6600         }
6601         if (!adev->dm.freesync_module)
6602                 goto update;
6603         /*
6604          * if edid non zero restrict freesync only for dp and edp
6605          */
6606         if (edid) {
6607                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
6608                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
6609                         edid_check_required = is_dp_capable_without_timing_msa(
6610                                                 adev->dm.dc,
6611                                                 amdgpu_dm_connector);
6612                 }
6613         }
6614         if (edid_check_required == true && (edid->version > 1 ||
6615            (edid->version == 1 && edid->revision > 1))) {
6616                 for (i = 0; i < 4; i++) {
6617
6618                         timing  = &edid->detailed_timings[i];
6619                         data    = &timing->data.other_data;
6620                         range   = &data->data.range;
6621                         /*
6622                          * Check if monitor has continuous frequency mode
6623                          */
6624                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
6625                                 continue;
6626                         /*
6627                          * Check for flag range limits only. If flag == 1 then
6628                          * no additional timing information provided.
6629                          * Default GTF, GTF Secondary curve and CVT are not
6630                          * supported
6631                          */
6632                         if (range->flags != 1)
6633                                 continue;
6634
6635                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
6636                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
6637                         amdgpu_dm_connector->pixel_clock_mhz =
6638                                 range->pixel_clock_mhz * 10;
6639                         break;
6640                 }
6641
6642                 if (amdgpu_dm_connector->max_vfreq -
6643                     amdgpu_dm_connector->min_vfreq > 10) {
6644
6645                         freesync_capable = true;
6646                 }
6647         }
6648
6649 update:
6650         if (dm_con_state)
6651                 dm_con_state->freesync_capable = freesync_capable;
6652
6653         if (connector->vrr_capable_property)
6654                 drm_connector_set_vrr_capable_property(connector,
6655                                                        freesync_capable);
6656 }
6657