Merge remote-tracking branches 'asoc/topic/hdac_hdmi', 'asoc/topic/hisilicon', 'asoc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42
43 #include "ivsrcid/ivsrcid_vislands30.h"
44
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49
50 #include <drm/drmP.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
56
57 #include "modules/inc/mod_freesync.h"
58
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "vega10/soc15ip.h"
65
66 #include "soc15_common.h"
67 #endif
68
69 #include "modules/inc/mod_freesync.h"
70
71 #include "i2caux_interface.h"
72
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device *adev);
75 static void amdgpu_dm_fini(struct amdgpu_device *adev);
76
77 /* initializes drm_device display related structures, based on the information
78  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79  * drm_encoder, drm_mode_config
80  *
81  * Returns 0 on success
82  */
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
86
87 static void
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
89
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
91                                 struct amdgpu_plane *aplane,
92                                 unsigned long possible_crtcs);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
94                                struct drm_plane *plane,
95                                uint32_t link_index);
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
97                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
98                                     uint32_t link_index,
99                                     struct amdgpu_encoder *amdgpu_encoder);
100 static int amdgpu_dm_encoder_init(struct drm_device *dev,
101                                   struct amdgpu_encoder *aencoder,
102                                   uint32_t link_index);
103
104 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
105
106 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
107                                    struct drm_atomic_state *state,
108                                    bool nonblock);
109
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
111
112 static int amdgpu_dm_atomic_check(struct drm_device *dev,
113                                   struct drm_atomic_state *state);
114
115
116
117
118 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
119         DRM_PLANE_TYPE_PRIMARY,
120         DRM_PLANE_TYPE_PRIMARY,
121         DRM_PLANE_TYPE_PRIMARY,
122         DRM_PLANE_TYPE_PRIMARY,
123         DRM_PLANE_TYPE_PRIMARY,
124         DRM_PLANE_TYPE_PRIMARY,
125 };
126
127 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
128         DRM_PLANE_TYPE_PRIMARY,
129         DRM_PLANE_TYPE_PRIMARY,
130         DRM_PLANE_TYPE_PRIMARY,
131         DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
132 };
133
134 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
135         DRM_PLANE_TYPE_PRIMARY,
136         DRM_PLANE_TYPE_PRIMARY,
137         DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
138 };
139
140 /*
141  * dm_vblank_get_counter
142  *
143  * @brief
144  * Get counter for number of vertical blanks
145  *
146  * @param
147  * struct amdgpu_device *adev - [in] desired amdgpu device
148  * int disp_idx - [in] which CRTC to get the counter from
149  *
150  * @return
151  * Counter for vertical blanks
152  */
153 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155         if (crtc >= adev->mode_info.num_crtc)
156                 return 0;
157         else {
158                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
159                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160                                 acrtc->base.state);
161
162
163                 if (acrtc_state->stream == NULL) {
164                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165                                   crtc);
166                         return 0;
167                 }
168
169                 return dc_stream_get_vblank_counter(acrtc_state->stream);
170         }
171 }
172
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
174                                   u32 *vbl, u32 *position)
175 {
176         uint32_t v_blank_start, v_blank_end, h_position, v_position;
177
178         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179                 return -EINVAL;
180         else {
181                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183                                                 acrtc->base.state);
184
185                 if (acrtc_state->stream ==  NULL) {
186                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187                                   crtc);
188                         return 0;
189                 }
190
191                 /*
192                  * TODO rework base driver to use values directly.
193                  * for now parse it back into reg-format
194                  */
195                 dc_stream_get_scanoutpos(acrtc_state->stream,
196                                          &v_blank_start,
197                                          &v_blank_end,
198                                          &h_position,
199                                          &v_position);
200
201                 *position = v_position | (h_position << 16);
202                 *vbl = v_blank_start | (v_blank_end << 16);
203         }
204
205         return 0;
206 }
207
208 static bool dm_is_idle(void *handle)
209 {
210         /* XXX todo */
211         return true;
212 }
213
214 static int dm_wait_for_idle(void *handle)
215 {
216         /* XXX todo */
217         return 0;
218 }
219
220 static bool dm_check_soft_reset(void *handle)
221 {
222         return false;
223 }
224
225 static int dm_soft_reset(void *handle)
226 {
227         /* XXX todo */
228         return 0;
229 }
230
231 static struct amdgpu_crtc *
232 get_crtc_by_otg_inst(struct amdgpu_device *adev,
233                      int otg_inst)
234 {
235         struct drm_device *dev = adev->ddev;
236         struct drm_crtc *crtc;
237         struct amdgpu_crtc *amdgpu_crtc;
238
239         /*
240          * following if is check inherited from both functions where this one is
241          * used now. Need to be checked why it could happen.
242          */
243         if (otg_inst == -1) {
244                 WARN_ON(1);
245                 return adev->mode_info.crtcs[0];
246         }
247
248         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
249                 amdgpu_crtc = to_amdgpu_crtc(crtc);
250
251                 if (amdgpu_crtc->otg_inst == otg_inst)
252                         return amdgpu_crtc;
253         }
254
255         return NULL;
256 }
257
258 static void dm_pflip_high_irq(void *interrupt_params)
259 {
260         struct amdgpu_crtc *amdgpu_crtc;
261         struct common_irq_params *irq_params = interrupt_params;
262         struct amdgpu_device *adev = irq_params->adev;
263         unsigned long flags;
264
265         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
266
267         /* IRQ could occur when in initial stage */
268         /*TODO work and BO cleanup */
269         if (amdgpu_crtc == NULL) {
270                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
271                 return;
272         }
273
274         spin_lock_irqsave(&adev->ddev->event_lock, flags);
275
276         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
277                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278                                                  amdgpu_crtc->pflip_status,
279                                                  AMDGPU_FLIP_SUBMITTED,
280                                                  amdgpu_crtc->crtc_id,
281                                                  amdgpu_crtc);
282                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
283                 return;
284         }
285
286
287         /* wakeup usersapce */
288         if (amdgpu_crtc->event) {
289                 /* Update to correct count/ts if racing with vblank irq */
290                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
291
292                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
293
294                 /* page flip completed. clean up */
295                 amdgpu_crtc->event = NULL;
296
297         } else
298                 WARN_ON(1);
299
300         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
301         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
302
303         DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304                                         __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
305
306         drm_crtc_vblank_put(&amdgpu_crtc->base);
307 }
308
309 static void dm_crtc_high_irq(void *interrupt_params)
310 {
311         struct common_irq_params *irq_params = interrupt_params;
312         struct amdgpu_device *adev = irq_params->adev;
313         uint8_t crtc_index = 0;
314         struct amdgpu_crtc *acrtc;
315
316         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
317
318         if (acrtc)
319                 crtc_index = acrtc->crtc_id;
320
321         drm_handle_vblank(adev->ddev, crtc_index);
322 }
323
324 static int dm_set_clockgating_state(void *handle,
325                   enum amd_clockgating_state state)
326 {
327         return 0;
328 }
329
330 static int dm_set_powergating_state(void *handle,
331                   enum amd_powergating_state state)
332 {
333         return 0;
334 }
335
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle);
338
339 static void hotplug_notify_work_func(struct work_struct *work)
340 {
341         struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
342         struct drm_device *dev = dm->ddev;
343
344         drm_kms_helper_hotplug_event(dev);
345 }
346
347 #if defined(CONFIG_DRM_AMD_DC_FBC)
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data  */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE    (3840 * 2160 * 4)
352
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
354 {
355         int r;
356         struct dm_comressor_info *compressor = &adev->dm.compressor;
357
358         if (!compressor->bo_ptr) {
359                 r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
360                                 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
361                                 &compressor->gpu_addr, &compressor->cpu_addr);
362
363                 if (r)
364                         DRM_ERROR("DM: Failed to initialize fbc\n");
365         }
366
367 }
368 #endif
369
370
371 /* Init display KMS
372  *
373  * Returns 0 on success
374  */
375 static int amdgpu_dm_init(struct amdgpu_device *adev)
376 {
377         struct dc_init_data init_data;
378         adev->dm.ddev = adev->ddev;
379         adev->dm.adev = adev;
380
381         /* Zero all the fields */
382         memset(&init_data, 0, sizeof(init_data));
383
384         /* initialize DAL's lock (for SYNC context use) */
385         spin_lock_init(&adev->dm.dal_lock);
386
387         /* initialize DAL's mutex */
388         mutex_init(&adev->dm.dal_mutex);
389
390         if(amdgpu_dm_irq_init(adev)) {
391                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
392                 goto error;
393         }
394
395         init_data.asic_id.chip_family = adev->family;
396
397         init_data.asic_id.pci_revision_id = adev->rev_id;
398         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
399
400         init_data.asic_id.vram_width = adev->mc.vram_width;
401         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402         init_data.asic_id.atombios_base_address =
403                 adev->mode_info.atom_context->bios;
404
405         init_data.driver = adev;
406
407         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
408
409         if (!adev->dm.cgs_device) {
410                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
411                 goto error;
412         }
413
414         init_data.cgs_device = adev->dm.cgs_device;
415
416         adev->dm.dal = NULL;
417
418         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
419
420         if (amdgpu_dc_log)
421                 init_data.log_mask = DC_DEFAULT_LOG_MASK;
422         else
423                 init_data.log_mask = DC_MIN_LOG_MASK;
424
425 #if defined(CONFIG_DRM_AMD_DC_FBC)
426         if (adev->family == FAMILY_CZ)
427                 amdgpu_dm_initialize_fbc(adev);
428         init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
429 #endif
430         /* Display Core create. */
431         adev->dm.dc = dc_create(&init_data);
432
433         if (adev->dm.dc) {
434                 DRM_INFO("Display Core initialized!\n");
435         } else {
436                 DRM_INFO("Display Core failed to initialize!\n");
437                 goto error;
438         }
439
440         INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
441
442         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
443         if (!adev->dm.freesync_module) {
444                 DRM_ERROR(
445                 "amdgpu: failed to initialize freesync_module.\n");
446         } else
447                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
448                                 adev->dm.freesync_module);
449
450         if (amdgpu_dm_initialize_drm_device(adev)) {
451                 DRM_ERROR(
452                 "amdgpu: failed to initialize sw for display support.\n");
453                 goto error;
454         }
455
456         /* Update the actual used number of crtc */
457         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
458
459         /* TODO: Add_display_info? */
460
461         /* TODO use dynamic cursor width */
462         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
463         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
464
465         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
466                 DRM_ERROR(
467                 "amdgpu: failed to initialize sw for display support.\n");
468                 goto error;
469         }
470
471         DRM_DEBUG_DRIVER("KMS initialized.\n");
472
473         return 0;
474 error:
475         amdgpu_dm_fini(adev);
476
477         return -1;
478 }
479
480 static void amdgpu_dm_fini(struct amdgpu_device *adev)
481 {
482         amdgpu_dm_destroy_drm_device(&adev->dm);
483         /*
484          * TODO: pageflip, vlank interrupt
485          *
486          * amdgpu_dm_irq_fini(adev);
487          */
488
489         if (adev->dm.cgs_device) {
490                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
491                 adev->dm.cgs_device = NULL;
492         }
493         if (adev->dm.freesync_module) {
494                 mod_freesync_destroy(adev->dm.freesync_module);
495                 adev->dm.freesync_module = NULL;
496         }
497         /* DC Destroy TODO: Replace destroy DAL */
498         if (adev->dm.dc)
499                 dc_destroy(&adev->dm.dc);
500         return;
501 }
502
503 static int dm_sw_init(void *handle)
504 {
505         return 0;
506 }
507
508 static int dm_sw_fini(void *handle)
509 {
510         return 0;
511 }
512
513 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
514 {
515         struct amdgpu_dm_connector *aconnector;
516         struct drm_connector *connector;
517         int ret = 0;
518
519         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
520
521         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
522                 aconnector = to_amdgpu_dm_connector(connector);
523                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
524                     aconnector->mst_mgr.aux) {
525                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
526                                         aconnector, aconnector->base.base.id);
527
528                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
529                         if (ret < 0) {
530                                 DRM_ERROR("DM_MST: Failed to start MST\n");
531                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
532                                 return ret;
533                                 }
534                         }
535         }
536
537         drm_modeset_unlock(&dev->mode_config.connection_mutex);
538         return ret;
539 }
540
541 static int dm_late_init(void *handle)
542 {
543         struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
544
545         return detect_mst_link_for_all_connectors(dev);
546 }
547
548 static void s3_handle_mst(struct drm_device *dev, bool suspend)
549 {
550         struct amdgpu_dm_connector *aconnector;
551         struct drm_connector *connector;
552
553         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
554
555         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
556                    aconnector = to_amdgpu_dm_connector(connector);
557                    if (aconnector->dc_link->type == dc_connection_mst_branch &&
558                                    !aconnector->mst_port) {
559
560                            if (suspend)
561                                    drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
562                            else
563                                    drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
564                    }
565         }
566
567         drm_modeset_unlock(&dev->mode_config.connection_mutex);
568 }
569
570 static int dm_hw_init(void *handle)
571 {
572         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
573         /* Create DAL display manager */
574         amdgpu_dm_init(adev);
575         amdgpu_dm_hpd_init(adev);
576
577         return 0;
578 }
579
580 static int dm_hw_fini(void *handle)
581 {
582         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583
584         amdgpu_dm_hpd_fini(adev);
585
586         amdgpu_dm_irq_fini(adev);
587         amdgpu_dm_fini(adev);
588         return 0;
589 }
590
591 static int dm_suspend(void *handle)
592 {
593         struct amdgpu_device *adev = handle;
594         struct amdgpu_display_manager *dm = &adev->dm;
595         int ret = 0;
596
597         s3_handle_mst(adev->ddev, true);
598
599         amdgpu_dm_irq_suspend(adev);
600
601         WARN_ON(adev->dm.cached_state);
602         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
603
604         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
605
606         return ret;
607 }
608
609 static struct amdgpu_dm_connector *
610 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
611                                              struct drm_crtc *crtc)
612 {
613         uint32_t i;
614         struct drm_connector_state *new_con_state;
615         struct drm_connector *connector;
616         struct drm_crtc *crtc_from_state;
617
618         for_each_new_connector_in_state(state, connector, new_con_state, i) {
619                 crtc_from_state = new_con_state->crtc;
620
621                 if (crtc_from_state == crtc)
622                         return to_amdgpu_dm_connector(connector);
623         }
624
625         return NULL;
626 }
627
628 static int dm_resume(void *handle)
629 {
630         struct amdgpu_device *adev = handle;
631         struct amdgpu_display_manager *dm = &adev->dm;
632
633         /* power on hardware */
634         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
635
636         return 0;
637 }
638
639 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
640 {
641         struct drm_device *ddev = adev->ddev;
642         struct amdgpu_display_manager *dm = &adev->dm;
643         struct amdgpu_dm_connector *aconnector;
644         struct drm_connector *connector;
645         struct drm_crtc *crtc;
646         struct drm_crtc_state *new_crtc_state;
647         struct dm_crtc_state *dm_new_crtc_state;
648         struct drm_plane *plane;
649         struct drm_plane_state *new_plane_state;
650         struct dm_plane_state *dm_new_plane_state;
651
652         int ret = 0;
653         int i;
654
655         /* program HPD filter */
656         dc_resume(dm->dc);
657
658         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
659         s3_handle_mst(ddev, false);
660
661         /*
662          * early enable HPD Rx IRQ, should be done before set mode as short
663          * pulse interrupts are used for MST
664          */
665         amdgpu_dm_irq_resume_early(adev);
666
667         /* Do detection*/
668         list_for_each_entry(connector,
669                         &ddev->mode_config.connector_list, head) {
670                 aconnector = to_amdgpu_dm_connector(connector);
671
672                 /*
673                  * this is the case when traversing through already created
674                  * MST connectors, should be skipped
675                  */
676                 if (aconnector->mst_port)
677                         continue;
678
679                 mutex_lock(&aconnector->hpd_lock);
680                 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
681
682                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
683                         aconnector->fake_enable = false;
684
685                 aconnector->dc_sink = NULL;
686                 amdgpu_dm_update_connector_after_detect(aconnector);
687                 mutex_unlock(&aconnector->hpd_lock);
688         }
689
690         /* Force mode set in atomic comit */
691         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
692                 new_crtc_state->active_changed = true;
693
694         /*
695          * atomic_check is expected to create the dc states. We need to release
696          * them here, since they were duplicated as part of the suspend
697          * procedure.
698          */
699         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
700                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
701                 if (dm_new_crtc_state->stream) {
702                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
703                         dc_stream_release(dm_new_crtc_state->stream);
704                         dm_new_crtc_state->stream = NULL;
705                 }
706         }
707
708         for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
709                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
710                 if (dm_new_plane_state->dc_state) {
711                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
712                         dc_plane_state_release(dm_new_plane_state->dc_state);
713                         dm_new_plane_state->dc_state = NULL;
714                 }
715         }
716
717         ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
718
719         adev->dm.cached_state = NULL;
720
721         amdgpu_dm_irq_resume_late(adev);
722
723         return ret;
724 }
725
726 static const struct amd_ip_funcs amdgpu_dm_funcs = {
727         .name = "dm",
728         .early_init = dm_early_init,
729         .late_init = dm_late_init,
730         .sw_init = dm_sw_init,
731         .sw_fini = dm_sw_fini,
732         .hw_init = dm_hw_init,
733         .hw_fini = dm_hw_fini,
734         .suspend = dm_suspend,
735         .resume = dm_resume,
736         .is_idle = dm_is_idle,
737         .wait_for_idle = dm_wait_for_idle,
738         .check_soft_reset = dm_check_soft_reset,
739         .soft_reset = dm_soft_reset,
740         .set_clockgating_state = dm_set_clockgating_state,
741         .set_powergating_state = dm_set_powergating_state,
742 };
743
744 const struct amdgpu_ip_block_version dm_ip_block =
745 {
746         .type = AMD_IP_BLOCK_TYPE_DCE,
747         .major = 1,
748         .minor = 0,
749         .rev = 0,
750         .funcs = &amdgpu_dm_funcs,
751 };
752
753
754 static struct drm_atomic_state *
755 dm_atomic_state_alloc(struct drm_device *dev)
756 {
757         struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
758
759         if (!state)
760                 return NULL;
761
762         if (drm_atomic_state_init(dev, &state->base) < 0)
763                 goto fail;
764
765         return &state->base;
766
767 fail:
768         kfree(state);
769         return NULL;
770 }
771
772 static void
773 dm_atomic_state_clear(struct drm_atomic_state *state)
774 {
775         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
776
777         if (dm_state->context) {
778                 dc_release_state(dm_state->context);
779                 dm_state->context = NULL;
780         }
781
782         drm_atomic_state_default_clear(state);
783 }
784
785 static void
786 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
787 {
788         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
789         drm_atomic_state_default_release(state);
790         kfree(dm_state);
791 }
792
793 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
794         .fb_create = amdgpu_user_framebuffer_create,
795         .output_poll_changed = amdgpu_output_poll_changed,
796         .atomic_check = amdgpu_dm_atomic_check,
797         .atomic_commit = amdgpu_dm_atomic_commit,
798         .atomic_state_alloc = dm_atomic_state_alloc,
799         .atomic_state_clear = dm_atomic_state_clear,
800         .atomic_state_free = dm_atomic_state_alloc_free
801 };
802
803 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
804         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
805 };
806
807 static void
808 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
809 {
810         struct drm_connector *connector = &aconnector->base;
811         struct drm_device *dev = connector->dev;
812         struct dc_sink *sink;
813
814         /* MST handled by drm_mst framework */
815         if (aconnector->mst_mgr.mst_state == true)
816                 return;
817
818
819         sink = aconnector->dc_link->local_sink;
820
821         /* Edid mgmt connector gets first update only in mode_valid hook and then
822          * the connector sink is set to either fake or physical sink depends on link status.
823          * don't do it here if u are during boot
824          */
825         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
826                         && aconnector->dc_em_sink) {
827
828                 /* For S3 resume with headless use eml_sink to fake stream
829                  * because on resume connecotr->sink is set ti NULL
830                  */
831                 mutex_lock(&dev->mode_config.mutex);
832
833                 if (sink) {
834                         if (aconnector->dc_sink) {
835                                 amdgpu_dm_remove_sink_from_freesync_module(
836                                                                 connector);
837                                 /* retain and release bellow are used for
838                                  * bump up refcount for sink because the link don't point
839                                  * to it anymore after disconnect so on next crtc to connector
840                                  * reshuffle by UMD we will get into unwanted dc_sink release
841                                  */
842                                 if (aconnector->dc_sink != aconnector->dc_em_sink)
843                                         dc_sink_release(aconnector->dc_sink);
844                         }
845                         aconnector->dc_sink = sink;
846                         amdgpu_dm_add_sink_to_freesync_module(
847                                                 connector, aconnector->edid);
848                 } else {
849                         amdgpu_dm_remove_sink_from_freesync_module(connector);
850                         if (!aconnector->dc_sink)
851                                 aconnector->dc_sink = aconnector->dc_em_sink;
852                         else if (aconnector->dc_sink != aconnector->dc_em_sink)
853                                 dc_sink_retain(aconnector->dc_sink);
854                 }
855
856                 mutex_unlock(&dev->mode_config.mutex);
857                 return;
858         }
859
860         /*
861          * TODO: temporary guard to look for proper fix
862          * if this sink is MST sink, we should not do anything
863          */
864         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
865                 return;
866
867         if (aconnector->dc_sink == sink) {
868                 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
869                  * Do nothing!! */
870                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
871                                 aconnector->connector_id);
872                 return;
873         }
874
875         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
876                 aconnector->connector_id, aconnector->dc_sink, sink);
877
878         mutex_lock(&dev->mode_config.mutex);
879
880         /* 1. Update status of the drm connector
881          * 2. Send an event and let userspace tell us what to do */
882         if (sink) {
883                 /* TODO: check if we still need the S3 mode update workaround.
884                  * If yes, put it here. */
885                 if (aconnector->dc_sink)
886                         amdgpu_dm_remove_sink_from_freesync_module(
887                                                         connector);
888
889                 aconnector->dc_sink = sink;
890                 if (sink->dc_edid.length == 0) {
891                         aconnector->edid = NULL;
892                 } else {
893                         aconnector->edid =
894                                 (struct edid *) sink->dc_edid.raw_edid;
895
896
897                         drm_mode_connector_update_edid_property(connector,
898                                         aconnector->edid);
899                 }
900                 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
901
902         } else {
903                 amdgpu_dm_remove_sink_from_freesync_module(connector);
904                 drm_mode_connector_update_edid_property(connector, NULL);
905                 aconnector->num_modes = 0;
906                 aconnector->dc_sink = NULL;
907         }
908
909         mutex_unlock(&dev->mode_config.mutex);
910 }
911
912 static void handle_hpd_irq(void *param)
913 {
914         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
915         struct drm_connector *connector = &aconnector->base;
916         struct drm_device *dev = connector->dev;
917
918         /* In case of failure or MST no need to update connector status or notify the OS
919          * since (for MST case) MST does this in it's own context.
920          */
921         mutex_lock(&aconnector->hpd_lock);
922
923         if (aconnector->fake_enable)
924                 aconnector->fake_enable = false;
925
926         if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
927                 amdgpu_dm_update_connector_after_detect(aconnector);
928
929
930                 drm_modeset_lock_all(dev);
931                 dm_restore_drm_connector_state(dev, connector);
932                 drm_modeset_unlock_all(dev);
933
934                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
935                         drm_kms_helper_hotplug_event(dev);
936         }
937         mutex_unlock(&aconnector->hpd_lock);
938
939 }
940
941 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
942 {
943         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
944         uint8_t dret;
945         bool new_irq_handled = false;
946         int dpcd_addr;
947         int dpcd_bytes_to_read;
948
949         const int max_process_count = 30;
950         int process_count = 0;
951
952         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
953
954         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
955                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
956                 /* DPCD 0x200 - 0x201 for downstream IRQ */
957                 dpcd_addr = DP_SINK_COUNT;
958         } else {
959                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
960                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
961                 dpcd_addr = DP_SINK_COUNT_ESI;
962         }
963
964         dret = drm_dp_dpcd_read(
965                 &aconnector->dm_dp_aux.aux,
966                 dpcd_addr,
967                 esi,
968                 dpcd_bytes_to_read);
969
970         while (dret == dpcd_bytes_to_read &&
971                 process_count < max_process_count) {
972                 uint8_t retry;
973                 dret = 0;
974
975                 process_count++;
976
977                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
978                 /* handle HPD short pulse irq */
979                 if (aconnector->mst_mgr.mst_state)
980                         drm_dp_mst_hpd_irq(
981                                 &aconnector->mst_mgr,
982                                 esi,
983                                 &new_irq_handled);
984
985                 if (new_irq_handled) {
986                         /* ACK at DPCD to notify down stream */
987                         const int ack_dpcd_bytes_to_write =
988                                 dpcd_bytes_to_read - 1;
989
990                         for (retry = 0; retry < 3; retry++) {
991                                 uint8_t wret;
992
993                                 wret = drm_dp_dpcd_write(
994                                         &aconnector->dm_dp_aux.aux,
995                                         dpcd_addr + 1,
996                                         &esi[1],
997                                         ack_dpcd_bytes_to_write);
998                                 if (wret == ack_dpcd_bytes_to_write)
999                                         break;
1000                         }
1001
1002                         /* check if there is new irq to be handle */
1003                         dret = drm_dp_dpcd_read(
1004                                 &aconnector->dm_dp_aux.aux,
1005                                 dpcd_addr,
1006                                 esi,
1007                                 dpcd_bytes_to_read);
1008
1009                         new_irq_handled = false;
1010                 } else {
1011                         break;
1012                 }
1013         }
1014
1015         if (process_count == max_process_count)
1016                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1017 }
1018
1019 static void handle_hpd_rx_irq(void *param)
1020 {
1021         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1022         struct drm_connector *connector = &aconnector->base;
1023         struct drm_device *dev = connector->dev;
1024         struct dc_link *dc_link = aconnector->dc_link;
1025         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1026
1027         /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1028          * conflict, after implement i2c helper, this mutex should be
1029          * retired.
1030          */
1031         if (dc_link->type != dc_connection_mst_branch)
1032                 mutex_lock(&aconnector->hpd_lock);
1033
1034         if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1035                         !is_mst_root_connector) {
1036                 /* Downstream Port status changed. */
1037                 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1038                         amdgpu_dm_update_connector_after_detect(aconnector);
1039
1040
1041                         drm_modeset_lock_all(dev);
1042                         dm_restore_drm_connector_state(dev, connector);
1043                         drm_modeset_unlock_all(dev);
1044
1045                         drm_kms_helper_hotplug_event(dev);
1046                 }
1047         }
1048         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1049             (dc_link->type == dc_connection_mst_branch))
1050                 dm_handle_hpd_rx_irq(aconnector);
1051
1052         if (dc_link->type != dc_connection_mst_branch)
1053                 mutex_unlock(&aconnector->hpd_lock);
1054 }
1055
1056 static void register_hpd_handlers(struct amdgpu_device *adev)
1057 {
1058         struct drm_device *dev = adev->ddev;
1059         struct drm_connector *connector;
1060         struct amdgpu_dm_connector *aconnector;
1061         const struct dc_link *dc_link;
1062         struct dc_interrupt_params int_params = {0};
1063
1064         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1065         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1066
1067         list_for_each_entry(connector,
1068                         &dev->mode_config.connector_list, head) {
1069
1070                 aconnector = to_amdgpu_dm_connector(connector);
1071                 dc_link = aconnector->dc_link;
1072
1073                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1074                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1075                         int_params.irq_source = dc_link->irq_source_hpd;
1076
1077                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1078                                         handle_hpd_irq,
1079                                         (void *) aconnector);
1080                 }
1081
1082                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1083
1084                         /* Also register for DP short pulse (hpd_rx). */
1085                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1086                         int_params.irq_source = dc_link->irq_source_hpd_rx;
1087
1088                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1089                                         handle_hpd_rx_irq,
1090                                         (void *) aconnector);
1091                 }
1092         }
1093 }
1094
1095 /* Register IRQ sources and initialize IRQ callbacks */
1096 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1097 {
1098         struct dc *dc = adev->dm.dc;
1099         struct common_irq_params *c_irq_params;
1100         struct dc_interrupt_params int_params = {0};
1101         int r;
1102         int i;
1103         unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1104
1105         if (adev->asic_type == CHIP_VEGA10 ||
1106             adev->asic_type == CHIP_RAVEN)
1107                 client_id = AMDGPU_IH_CLIENTID_DCE;
1108
1109         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1110         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1111
1112         /* Actions of amdgpu_irq_add_id():
1113          * 1. Register a set() function with base driver.
1114          *    Base driver will call set() function to enable/disable an
1115          *    interrupt in DC hardware.
1116          * 2. Register amdgpu_dm_irq_handler().
1117          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1118          *    coming from DC hardware.
1119          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1120          *    for acknowledging and handling. */
1121
1122         /* Use VBLANK interrupt */
1123         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1124                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1125                 if (r) {
1126                         DRM_ERROR("Failed to add crtc irq id!\n");
1127                         return r;
1128                 }
1129
1130                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1131                 int_params.irq_source =
1132                         dc_interrupt_to_irq_source(dc, i, 0);
1133
1134                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1135
1136                 c_irq_params->adev = adev;
1137                 c_irq_params->irq_src = int_params.irq_source;
1138
1139                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1140                                 dm_crtc_high_irq, c_irq_params);
1141         }
1142
1143         /* Use GRPH_PFLIP interrupt */
1144         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1145                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1146                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1147                 if (r) {
1148                         DRM_ERROR("Failed to add page flip irq id!\n");
1149                         return r;
1150                 }
1151
1152                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1153                 int_params.irq_source =
1154                         dc_interrupt_to_irq_source(dc, i, 0);
1155
1156                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1157
1158                 c_irq_params->adev = adev;
1159                 c_irq_params->irq_src = int_params.irq_source;
1160
1161                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1162                                 dm_pflip_high_irq, c_irq_params);
1163
1164         }
1165
1166         /* HPD */
1167         r = amdgpu_irq_add_id(adev, client_id,
1168                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1169         if (r) {
1170                 DRM_ERROR("Failed to add hpd irq id!\n");
1171                 return r;
1172         }
1173
1174         register_hpd_handlers(adev);
1175
1176         return 0;
1177 }
1178
1179 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1180 /* Register IRQ sources and initialize IRQ callbacks */
1181 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1182 {
1183         struct dc *dc = adev->dm.dc;
1184         struct common_irq_params *c_irq_params;
1185         struct dc_interrupt_params int_params = {0};
1186         int r;
1187         int i;
1188
1189         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1190         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1191
1192         /* Actions of amdgpu_irq_add_id():
1193          * 1. Register a set() function with base driver.
1194          *    Base driver will call set() function to enable/disable an
1195          *    interrupt in DC hardware.
1196          * 2. Register amdgpu_dm_irq_handler().
1197          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1198          *    coming from DC hardware.
1199          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1200          *    for acknowledging and handling.
1201          * */
1202
1203         /* Use VSTARTUP interrupt */
1204         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1205                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1206                         i++) {
1207                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1208
1209                 if (r) {
1210                         DRM_ERROR("Failed to add crtc irq id!\n");
1211                         return r;
1212                 }
1213
1214                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1215                 int_params.irq_source =
1216                         dc_interrupt_to_irq_source(dc, i, 0);
1217
1218                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1219
1220                 c_irq_params->adev = adev;
1221                 c_irq_params->irq_src = int_params.irq_source;
1222
1223                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1224                                 dm_crtc_high_irq, c_irq_params);
1225         }
1226
1227         /* Use GRPH_PFLIP interrupt */
1228         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1229                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1230                         i++) {
1231                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1232                 if (r) {
1233                         DRM_ERROR("Failed to add page flip irq id!\n");
1234                         return r;
1235                 }
1236
1237                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1238                 int_params.irq_source =
1239                         dc_interrupt_to_irq_source(dc, i, 0);
1240
1241                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1242
1243                 c_irq_params->adev = adev;
1244                 c_irq_params->irq_src = int_params.irq_source;
1245
1246                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1247                                 dm_pflip_high_irq, c_irq_params);
1248
1249         }
1250
1251         /* HPD */
1252         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1253                         &adev->hpd_irq);
1254         if (r) {
1255                 DRM_ERROR("Failed to add hpd irq id!\n");
1256                 return r;
1257         }
1258
1259         register_hpd_handlers(adev);
1260
1261         return 0;
1262 }
1263 #endif
1264
1265 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1266 {
1267         int r;
1268
1269         adev->mode_info.mode_config_initialized = true;
1270
1271         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1272         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1273
1274         adev->ddev->mode_config.max_width = 16384;
1275         adev->ddev->mode_config.max_height = 16384;
1276
1277         adev->ddev->mode_config.preferred_depth = 24;
1278         adev->ddev->mode_config.prefer_shadow = 1;
1279         /* indicate support of immediate flip */
1280         adev->ddev->mode_config.async_page_flip = true;
1281
1282         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1283
1284         r = amdgpu_modeset_create_props(adev);
1285         if (r)
1286                 return r;
1287
1288         return 0;
1289 }
1290
1291 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1292         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1293
1294 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1295 {
1296         struct amdgpu_display_manager *dm = bl_get_data(bd);
1297
1298         if (dc_link_set_backlight_level(dm->backlight_link,
1299                         bd->props.brightness, 0, 0))
1300                 return 0;
1301         else
1302                 return 1;
1303 }
1304
1305 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1306 {
1307         return bd->props.brightness;
1308 }
1309
1310 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1311         .get_brightness = amdgpu_dm_backlight_get_brightness,
1312         .update_status  = amdgpu_dm_backlight_update_status,
1313 };
1314
1315 static void
1316 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1317 {
1318         char bl_name[16];
1319         struct backlight_properties props = { 0 };
1320
1321         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1322         props.type = BACKLIGHT_RAW;
1323
1324         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1325                         dm->adev->ddev->primary->index);
1326
1327         dm->backlight_dev = backlight_device_register(bl_name,
1328                         dm->adev->ddev->dev,
1329                         dm,
1330                         &amdgpu_dm_backlight_ops,
1331                         &props);
1332
1333         if (IS_ERR(dm->backlight_dev))
1334                 DRM_ERROR("DM: Backlight registration failed!\n");
1335         else
1336                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1337 }
1338
1339 #endif
1340
1341 /* In this architecture, the association
1342  * connector -> encoder -> crtc
1343  * id not really requried. The crtc and connector will hold the
1344  * display_index as an abstraction to use with DAL component
1345  *
1346  * Returns 0 on success
1347  */
1348 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1349 {
1350         struct amdgpu_display_manager *dm = &adev->dm;
1351         uint32_t i;
1352         struct amdgpu_dm_connector *aconnector = NULL;
1353         struct amdgpu_encoder *aencoder = NULL;
1354         struct amdgpu_mode_info *mode_info = &adev->mode_info;
1355         uint32_t link_cnt;
1356         unsigned long possible_crtcs;
1357
1358         link_cnt = dm->dc->caps.max_links;
1359         if (amdgpu_dm_mode_config_init(dm->adev)) {
1360                 DRM_ERROR("DM: Failed to initialize mode config\n");
1361                 return -1;
1362         }
1363
1364         for (i = 0; i < dm->dc->caps.max_planes; i++) {
1365                 struct amdgpu_plane *plane;
1366
1367                 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1368                 mode_info->planes[i] = plane;
1369
1370                 if (!plane) {
1371                         DRM_ERROR("KMS: Failed to allocate plane\n");
1372                         goto fail;
1373                 }
1374                 plane->base.type = mode_info->plane_type[i];
1375
1376                 /*
1377                  * HACK: IGT tests expect that each plane can only have one
1378                  * one possible CRTC. For now, set one CRTC for each
1379                  * plane that is not an underlay, but still allow multiple
1380                  * CRTCs for underlay planes.
1381                  */
1382                 possible_crtcs = 1 << i;
1383                 if (i >= dm->dc->caps.max_streams)
1384                         possible_crtcs = 0xff;
1385
1386                 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
1387                         DRM_ERROR("KMS: Failed to initialize plane\n");
1388                         goto fail;
1389                 }
1390         }
1391
1392         for (i = 0; i < dm->dc->caps.max_streams; i++)
1393                 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1394                         DRM_ERROR("KMS: Failed to initialize crtc\n");
1395                         goto fail;
1396                 }
1397
1398         dm->display_indexes_num = dm->dc->caps.max_streams;
1399
1400         /* loops over all connectors on the board */
1401         for (i = 0; i < link_cnt; i++) {
1402
1403                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1404                         DRM_ERROR(
1405                                 "KMS: Cannot support more than %d display indexes\n",
1406                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
1407                         continue;
1408                 }
1409
1410                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1411                 if (!aconnector)
1412                         goto fail;
1413
1414                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1415                 if (!aencoder)
1416                         goto fail;
1417
1418                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1419                         DRM_ERROR("KMS: Failed to initialize encoder\n");
1420                         goto fail;
1421                 }
1422
1423                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1424                         DRM_ERROR("KMS: Failed to initialize connector\n");
1425                         goto fail;
1426                 }
1427
1428                 if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1429                                 DETECT_REASON_BOOT))
1430                         amdgpu_dm_update_connector_after_detect(aconnector);
1431         }
1432
1433         /* Software is initialized. Now we can register interrupt handlers. */
1434         switch (adev->asic_type) {
1435         case CHIP_BONAIRE:
1436         case CHIP_HAWAII:
1437         case CHIP_KAVERI:
1438         case CHIP_KABINI:
1439         case CHIP_MULLINS:
1440         case CHIP_TONGA:
1441         case CHIP_FIJI:
1442         case CHIP_CARRIZO:
1443         case CHIP_STONEY:
1444         case CHIP_POLARIS11:
1445         case CHIP_POLARIS10:
1446         case CHIP_POLARIS12:
1447         case CHIP_VEGA10:
1448                 if (dce110_register_irq_handlers(dm->adev)) {
1449                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1450                         goto fail;
1451                 }
1452                 break;
1453 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1454         case CHIP_RAVEN:
1455                 if (dcn10_register_irq_handlers(dm->adev)) {
1456                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1457                         goto fail;
1458                 }
1459                 /*
1460                  * Temporary disable until pplib/smu interaction is implemented
1461                  */
1462                 dm->dc->debug.disable_stutter = true;
1463                 break;
1464 #endif
1465         default:
1466                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1467                 goto fail;
1468         }
1469
1470         return 0;
1471 fail:
1472         kfree(aencoder);
1473         kfree(aconnector);
1474         for (i = 0; i < dm->dc->caps.max_planes; i++)
1475                 kfree(mode_info->planes[i]);
1476         return -1;
1477 }
1478
1479 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1480 {
1481         drm_mode_config_cleanup(dm->ddev);
1482         return;
1483 }
1484
1485 /******************************************************************************
1486  * amdgpu_display_funcs functions
1487  *****************************************************************************/
1488
1489 /**
1490  * dm_bandwidth_update - program display watermarks
1491  *
1492  * @adev: amdgpu_device pointer
1493  *
1494  * Calculate and program the display watermarks and line buffer allocation.
1495  */
1496 static void dm_bandwidth_update(struct amdgpu_device *adev)
1497 {
1498         /* TODO: implement later */
1499 }
1500
1501 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1502                                      u8 level)
1503 {
1504         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1505 }
1506
1507 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1508 {
1509         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1510         return 0;
1511 }
1512
1513 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1514                                 struct drm_file *filp)
1515 {
1516         struct mod_freesync_params freesync_params;
1517         uint8_t num_streams;
1518         uint8_t i;
1519
1520         struct amdgpu_device *adev = dev->dev_private;
1521         int r = 0;
1522
1523         /* Get freesync enable flag from DRM */
1524
1525         num_streams = dc_get_current_stream_count(adev->dm.dc);
1526
1527         for (i = 0; i < num_streams; i++) {
1528                 struct dc_stream_state *stream;
1529                 stream = dc_get_stream_at_index(adev->dm.dc, i);
1530
1531                 mod_freesync_update_state(adev->dm.freesync_module,
1532                                           &stream, 1, &freesync_params);
1533         }
1534
1535         return r;
1536 }
1537
1538 static const struct amdgpu_display_funcs dm_display_funcs = {
1539         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1540         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1541         .vblank_wait = NULL,
1542         .backlight_set_level =
1543                 dm_set_backlight_level,/* called unconditionally */
1544         .backlight_get_level =
1545                 dm_get_backlight_level,/* called unconditionally */
1546         .hpd_sense = NULL,/* called unconditionally */
1547         .hpd_set_polarity = NULL, /* called unconditionally */
1548         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1549         .page_flip_get_scanoutpos =
1550                 dm_crtc_get_scanoutpos,/* called unconditionally */
1551         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1552         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1553         .notify_freesync = amdgpu_notify_freesync,
1554
1555 };
1556
1557 #if defined(CONFIG_DEBUG_KERNEL_DC)
1558
1559 static ssize_t s3_debug_store(struct device *device,
1560                               struct device_attribute *attr,
1561                               const char *buf,
1562                               size_t count)
1563 {
1564         int ret;
1565         int s3_state;
1566         struct pci_dev *pdev = to_pci_dev(device);
1567         struct drm_device *drm_dev = pci_get_drvdata(pdev);
1568         struct amdgpu_device *adev = drm_dev->dev_private;
1569
1570         ret = kstrtoint(buf, 0, &s3_state);
1571
1572         if (ret == 0) {
1573                 if (s3_state) {
1574                         dm_resume(adev);
1575                         amdgpu_dm_display_resume(adev);
1576                         drm_kms_helper_hotplug_event(adev->ddev);
1577                 } else
1578                         dm_suspend(adev);
1579         }
1580
1581         return ret == 0 ? count : 0;
1582 }
1583
1584 DEVICE_ATTR_WO(s3_debug);
1585
1586 #endif
1587
1588 static int dm_early_init(void *handle)
1589 {
1590         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1591
1592         adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
1593         amdgpu_dm_set_irq_funcs(adev);
1594
1595         switch (adev->asic_type) {
1596         case CHIP_BONAIRE:
1597         case CHIP_HAWAII:
1598                 adev->mode_info.num_crtc = 6;
1599                 adev->mode_info.num_hpd = 6;
1600                 adev->mode_info.num_dig = 6;
1601                 adev->mode_info.plane_type = dm_plane_type_default;
1602                 break;
1603         case CHIP_KAVERI:
1604                 adev->mode_info.num_crtc = 4;
1605                 adev->mode_info.num_hpd = 6;
1606                 adev->mode_info.num_dig = 7;
1607                 adev->mode_info.plane_type = dm_plane_type_default;
1608                 break;
1609         case CHIP_KABINI:
1610         case CHIP_MULLINS:
1611                 adev->mode_info.num_crtc = 2;
1612                 adev->mode_info.num_hpd = 6;
1613                 adev->mode_info.num_dig = 6;
1614                 adev->mode_info.plane_type = dm_plane_type_default;
1615                 break;
1616         case CHIP_FIJI:
1617         case CHIP_TONGA:
1618                 adev->mode_info.num_crtc = 6;
1619                 adev->mode_info.num_hpd = 6;
1620                 adev->mode_info.num_dig = 7;
1621                 adev->mode_info.plane_type = dm_plane_type_default;
1622                 break;
1623         case CHIP_CARRIZO:
1624                 adev->mode_info.num_crtc = 3;
1625                 adev->mode_info.num_hpd = 6;
1626                 adev->mode_info.num_dig = 9;
1627                 adev->mode_info.plane_type = dm_plane_type_carizzo;
1628                 break;
1629         case CHIP_STONEY:
1630                 adev->mode_info.num_crtc = 2;
1631                 adev->mode_info.num_hpd = 6;
1632                 adev->mode_info.num_dig = 9;
1633                 adev->mode_info.plane_type = dm_plane_type_stoney;
1634                 break;
1635         case CHIP_POLARIS11:
1636         case CHIP_POLARIS12:
1637                 adev->mode_info.num_crtc = 5;
1638                 adev->mode_info.num_hpd = 5;
1639                 adev->mode_info.num_dig = 5;
1640                 adev->mode_info.plane_type = dm_plane_type_default;
1641                 break;
1642         case CHIP_POLARIS10:
1643                 adev->mode_info.num_crtc = 6;
1644                 adev->mode_info.num_hpd = 6;
1645                 adev->mode_info.num_dig = 6;
1646                 adev->mode_info.plane_type = dm_plane_type_default;
1647                 break;
1648         case CHIP_VEGA10:
1649                 adev->mode_info.num_crtc = 6;
1650                 adev->mode_info.num_hpd = 6;
1651                 adev->mode_info.num_dig = 6;
1652                 adev->mode_info.plane_type = dm_plane_type_default;
1653                 break;
1654 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1655         case CHIP_RAVEN:
1656                 adev->mode_info.num_crtc = 4;
1657                 adev->mode_info.num_hpd = 4;
1658                 adev->mode_info.num_dig = 4;
1659                 adev->mode_info.plane_type = dm_plane_type_default;
1660                 break;
1661 #endif
1662         default:
1663                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1664                 return -EINVAL;
1665         }
1666
1667         if (adev->mode_info.funcs == NULL)
1668                 adev->mode_info.funcs = &dm_display_funcs;
1669
1670         /* Note: Do NOT change adev->audio_endpt_rreg and
1671          * adev->audio_endpt_wreg because they are initialised in
1672          * amdgpu_device_init() */
1673 #if defined(CONFIG_DEBUG_KERNEL_DC)
1674         device_create_file(
1675                 adev->ddev->dev,
1676                 &dev_attr_s3_debug);
1677 #endif
1678
1679         return 0;
1680 }
1681
1682 struct dm_connector_state {
1683         struct drm_connector_state base;
1684
1685         enum amdgpu_rmx_type scaling;
1686         uint8_t underscan_vborder;
1687         uint8_t underscan_hborder;
1688         bool underscan_enable;
1689 };
1690
1691 #define to_dm_connector_state(x)\
1692         container_of((x), struct dm_connector_state, base)
1693
1694 static bool modeset_required(struct drm_crtc_state *crtc_state,
1695                              struct dc_stream_state *new_stream,
1696                              struct dc_stream_state *old_stream)
1697 {
1698         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1699                 return false;
1700
1701         if (!crtc_state->enable)
1702                 return false;
1703
1704         return crtc_state->active;
1705 }
1706
1707 static bool modereset_required(struct drm_crtc_state *crtc_state)
1708 {
1709         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1710                 return false;
1711
1712         return !crtc_state->enable || !crtc_state->active;
1713 }
1714
1715 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1716 {
1717         drm_encoder_cleanup(encoder);
1718         kfree(encoder);
1719 }
1720
1721 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1722         .destroy = amdgpu_dm_encoder_destroy,
1723 };
1724
1725 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1726                                         struct dc_plane_state *plane_state)
1727 {
1728         plane_state->src_rect.x = state->src_x >> 16;
1729         plane_state->src_rect.y = state->src_y >> 16;
1730         /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1731         plane_state->src_rect.width = state->src_w >> 16;
1732
1733         if (plane_state->src_rect.width == 0)
1734                 return false;
1735
1736         plane_state->src_rect.height = state->src_h >> 16;
1737         if (plane_state->src_rect.height == 0)
1738                 return false;
1739
1740         plane_state->dst_rect.x = state->crtc_x;
1741         plane_state->dst_rect.y = state->crtc_y;
1742
1743         if (state->crtc_w == 0)
1744                 return false;
1745
1746         plane_state->dst_rect.width = state->crtc_w;
1747
1748         if (state->crtc_h == 0)
1749                 return false;
1750
1751         plane_state->dst_rect.height = state->crtc_h;
1752
1753         plane_state->clip_rect = plane_state->dst_rect;
1754
1755         switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1756         case DRM_MODE_ROTATE_0:
1757                 plane_state->rotation = ROTATION_ANGLE_0;
1758                 break;
1759         case DRM_MODE_ROTATE_90:
1760                 plane_state->rotation = ROTATION_ANGLE_90;
1761                 break;
1762         case DRM_MODE_ROTATE_180:
1763                 plane_state->rotation = ROTATION_ANGLE_180;
1764                 break;
1765         case DRM_MODE_ROTATE_270:
1766                 plane_state->rotation = ROTATION_ANGLE_270;
1767                 break;
1768         default:
1769                 plane_state->rotation = ROTATION_ANGLE_0;
1770                 break;
1771         }
1772
1773         return true;
1774 }
1775 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1776                        uint64_t *tiling_flags,
1777                        uint64_t *fb_location)
1778 {
1779         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1780         int r = amdgpu_bo_reserve(rbo, false);
1781
1782         if (unlikely(r)) {
1783                 // Don't show error msg. when return -ERESTARTSYS
1784                 if (r != -ERESTARTSYS)
1785                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
1786                 return r;
1787         }
1788
1789         if (fb_location)
1790                 *fb_location = amdgpu_bo_gpu_offset(rbo);
1791
1792         if (tiling_flags)
1793                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1794
1795         amdgpu_bo_unreserve(rbo);
1796
1797         return r;
1798 }
1799
1800 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1801                                          struct dc_plane_state *plane_state,
1802                                          const struct amdgpu_framebuffer *amdgpu_fb,
1803                                          bool addReq)
1804 {
1805         uint64_t tiling_flags;
1806         uint64_t fb_location = 0;
1807         uint64_t chroma_addr = 0;
1808         unsigned int awidth;
1809         const struct drm_framebuffer *fb = &amdgpu_fb->base;
1810         int ret = 0;
1811         struct drm_format_name_buf format_name;
1812
1813         ret = get_fb_info(
1814                 amdgpu_fb,
1815                 &tiling_flags,
1816                 addReq == true ? &fb_location:NULL);
1817
1818         if (ret)
1819                 return ret;
1820
1821         switch (fb->format->format) {
1822         case DRM_FORMAT_C8:
1823                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1824                 break;
1825         case DRM_FORMAT_RGB565:
1826                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1827                 break;
1828         case DRM_FORMAT_XRGB8888:
1829         case DRM_FORMAT_ARGB8888:
1830                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1831                 break;
1832         case DRM_FORMAT_XRGB2101010:
1833         case DRM_FORMAT_ARGB2101010:
1834                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1835                 break;
1836         case DRM_FORMAT_XBGR2101010:
1837         case DRM_FORMAT_ABGR2101010:
1838                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1839                 break;
1840         case DRM_FORMAT_NV21:
1841                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1842                 break;
1843         case DRM_FORMAT_NV12:
1844                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1845                 break;
1846         default:
1847                 DRM_ERROR("Unsupported screen format %s\n",
1848                           drm_get_format_name(fb->format->format, &format_name));
1849                 return -EINVAL;
1850         }
1851
1852         if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1853                 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1854                 plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
1855                 plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
1856                 plane_state->plane_size.grph.surface_size.x = 0;
1857                 plane_state->plane_size.grph.surface_size.y = 0;
1858                 plane_state->plane_size.grph.surface_size.width = fb->width;
1859                 plane_state->plane_size.grph.surface_size.height = fb->height;
1860                 plane_state->plane_size.grph.surface_pitch =
1861                                 fb->pitches[0] / fb->format->cpp[0];
1862                 /* TODO: unhardcode */
1863                 plane_state->color_space = COLOR_SPACE_SRGB;
1864
1865         } else {
1866                 awidth = ALIGN(fb->width, 64);
1867                 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1868                 plane_state->address.video_progressive.luma_addr.low_part
1869                                                 = lower_32_bits(fb_location);
1870                 plane_state->address.video_progressive.luma_addr.high_part
1871                                                 = upper_32_bits(fb_location);
1872                 chroma_addr = fb_location + (u64)(awidth * fb->height);
1873                 plane_state->address.video_progressive.chroma_addr.low_part
1874                                                 = lower_32_bits(chroma_addr);
1875                 plane_state->address.video_progressive.chroma_addr.high_part
1876                                                 = upper_32_bits(chroma_addr);
1877                 plane_state->plane_size.video.luma_size.x = 0;
1878                 plane_state->plane_size.video.luma_size.y = 0;
1879                 plane_state->plane_size.video.luma_size.width = awidth;
1880                 plane_state->plane_size.video.luma_size.height = fb->height;
1881                 /* TODO: unhardcode */
1882                 plane_state->plane_size.video.luma_pitch = awidth;
1883
1884                 plane_state->plane_size.video.chroma_size.x = 0;
1885                 plane_state->plane_size.video.chroma_size.y = 0;
1886                 plane_state->plane_size.video.chroma_size.width = awidth;
1887                 plane_state->plane_size.video.chroma_size.height = fb->height;
1888                 plane_state->plane_size.video.chroma_pitch = awidth / 2;
1889
1890                 /* TODO: unhardcode */
1891                 plane_state->color_space = COLOR_SPACE_YCBCR709;
1892         }
1893
1894         memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1895
1896         /* Fill GFX8 params */
1897         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1898                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1899
1900                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1901                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1902                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1903                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1904                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1905
1906                 /* XXX fix me for VI */
1907                 plane_state->tiling_info.gfx8.num_banks = num_banks;
1908                 plane_state->tiling_info.gfx8.array_mode =
1909                                 DC_ARRAY_2D_TILED_THIN1;
1910                 plane_state->tiling_info.gfx8.tile_split = tile_split;
1911                 plane_state->tiling_info.gfx8.bank_width = bankw;
1912                 plane_state->tiling_info.gfx8.bank_height = bankh;
1913                 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1914                 plane_state->tiling_info.gfx8.tile_mode =
1915                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1916         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1917                         == DC_ARRAY_1D_TILED_THIN1) {
1918                 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1919         }
1920
1921         plane_state->tiling_info.gfx8.pipe_config =
1922                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1923
1924         if (adev->asic_type == CHIP_VEGA10 ||
1925             adev->asic_type == CHIP_RAVEN) {
1926                 /* Fill GFX9 params */
1927                 plane_state->tiling_info.gfx9.num_pipes =
1928                         adev->gfx.config.gb_addr_config_fields.num_pipes;
1929                 plane_state->tiling_info.gfx9.num_banks =
1930                         adev->gfx.config.gb_addr_config_fields.num_banks;
1931                 plane_state->tiling_info.gfx9.pipe_interleave =
1932                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1933                 plane_state->tiling_info.gfx9.num_shader_engines =
1934                         adev->gfx.config.gb_addr_config_fields.num_se;
1935                 plane_state->tiling_info.gfx9.max_compressed_frags =
1936                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1937                 plane_state->tiling_info.gfx9.num_rb_per_se =
1938                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1939                 plane_state->tiling_info.gfx9.swizzle =
1940                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1941                 plane_state->tiling_info.gfx9.shaderEnable = 1;
1942         }
1943
1944         plane_state->visible = true;
1945         plane_state->scaling_quality.h_taps_c = 0;
1946         plane_state->scaling_quality.v_taps_c = 0;
1947
1948         /* is this needed? is plane_state zeroed at allocation? */
1949         plane_state->scaling_quality.h_taps = 0;
1950         plane_state->scaling_quality.v_taps = 0;
1951         plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1952
1953         return ret;
1954
1955 }
1956
1957 static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
1958                                        struct dc_plane_state *plane_state)
1959 {
1960         int i;
1961         struct dc_gamma *gamma;
1962         struct drm_color_lut *lut =
1963                         (struct drm_color_lut *) crtc_state->gamma_lut->data;
1964
1965         gamma = dc_create_gamma();
1966
1967         if (gamma == NULL) {
1968                 WARN_ON(1);
1969                 return;
1970         }
1971
1972         gamma->type = GAMMA_RGB_256;
1973         gamma->num_entries = GAMMA_RGB_256_ENTRIES;
1974         for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1975                 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1976                 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1977                 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
1978         }
1979
1980         plane_state->gamma_correction = gamma;
1981 }
1982
1983 static int fill_plane_attributes(struct amdgpu_device *adev,
1984                                  struct dc_plane_state *dc_plane_state,
1985                                  struct drm_plane_state *plane_state,
1986                                  struct drm_crtc_state *crtc_state,
1987                                  bool addrReq)
1988 {
1989         const struct amdgpu_framebuffer *amdgpu_fb =
1990                 to_amdgpu_framebuffer(plane_state->fb);
1991         const struct drm_crtc *crtc = plane_state->crtc;
1992         struct dc_transfer_func *input_tf;
1993         int ret = 0;
1994
1995         if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
1996                 return -EINVAL;
1997
1998         ret = fill_plane_attributes_from_fb(
1999                 crtc->dev->dev_private,
2000                 dc_plane_state,
2001                 amdgpu_fb,
2002                 addrReq);
2003
2004         if (ret)
2005                 return ret;
2006
2007         input_tf = dc_create_transfer_func();
2008
2009         if (input_tf == NULL)
2010                 return -ENOMEM;
2011
2012         input_tf->type = TF_TYPE_PREDEFINED;
2013         input_tf->tf = TRANSFER_FUNCTION_SRGB;
2014
2015         dc_plane_state->in_transfer_func = input_tf;
2016
2017         /* In case of gamma set, update gamma value */
2018         if (crtc_state->gamma_lut)
2019                 fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
2020
2021         return ret;
2022 }
2023
2024 /*****************************************************************************/
2025
2026 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2027                                            const struct dm_connector_state *dm_state,
2028                                            struct dc_stream_state *stream)
2029 {
2030         enum amdgpu_rmx_type rmx_type;
2031
2032         struct rect src = { 0 }; /* viewport in composition space*/
2033         struct rect dst = { 0 }; /* stream addressable area */
2034
2035         /* no mode. nothing to be done */
2036         if (!mode)
2037                 return;
2038
2039         /* Full screen scaling by default */
2040         src.width = mode->hdisplay;
2041         src.height = mode->vdisplay;
2042         dst.width = stream->timing.h_addressable;
2043         dst.height = stream->timing.v_addressable;
2044
2045         rmx_type = dm_state->scaling;
2046         if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2047                 if (src.width * dst.height <
2048                                 src.height * dst.width) {
2049                         /* height needs less upscaling/more downscaling */
2050                         dst.width = src.width *
2051                                         dst.height / src.height;
2052                 } else {
2053                         /* width needs less upscaling/more downscaling */
2054                         dst.height = src.height *
2055                                         dst.width / src.width;
2056                 }
2057         } else if (rmx_type == RMX_CENTER) {
2058                 dst = src;
2059         }
2060
2061         dst.x = (stream->timing.h_addressable - dst.width) / 2;
2062         dst.y = (stream->timing.v_addressable - dst.height) / 2;
2063
2064         if (dm_state->underscan_enable) {
2065                 dst.x += dm_state->underscan_hborder / 2;
2066                 dst.y += dm_state->underscan_vborder / 2;
2067                 dst.width -= dm_state->underscan_hborder;
2068                 dst.height -= dm_state->underscan_vborder;
2069         }
2070
2071         stream->src = src;
2072         stream->dst = dst;
2073
2074         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2075                         dst.x, dst.y, dst.width, dst.height);
2076
2077 }
2078
2079 static enum dc_color_depth
2080 convert_color_depth_from_display_info(const struct drm_connector *connector)
2081 {
2082         uint32_t bpc = connector->display_info.bpc;
2083
2084         /* Limited color depth to 8bit
2085          * TODO: Still need to handle deep color
2086          */
2087         if (bpc > 8)
2088                 bpc = 8;
2089
2090         switch (bpc) {
2091         case 0:
2092                 /* Temporary Work around, DRM don't parse color depth for
2093                  * EDID revision before 1.4
2094                  * TODO: Fix edid parsing
2095                  */
2096                 return COLOR_DEPTH_888;
2097         case 6:
2098                 return COLOR_DEPTH_666;
2099         case 8:
2100                 return COLOR_DEPTH_888;
2101         case 10:
2102                 return COLOR_DEPTH_101010;
2103         case 12:
2104                 return COLOR_DEPTH_121212;
2105         case 14:
2106                 return COLOR_DEPTH_141414;
2107         case 16:
2108                 return COLOR_DEPTH_161616;
2109         default:
2110                 return COLOR_DEPTH_UNDEFINED;
2111         }
2112 }
2113
2114 static enum dc_aspect_ratio
2115 get_aspect_ratio(const struct drm_display_mode *mode_in)
2116 {
2117         int32_t width = mode_in->crtc_hdisplay * 9;
2118         int32_t height = mode_in->crtc_vdisplay * 16;
2119
2120         if ((width - height) < 10 && (width - height) > -10)
2121                 return ASPECT_RATIO_16_9;
2122         else
2123                 return ASPECT_RATIO_4_3;
2124 }
2125
2126 static enum dc_color_space
2127 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2128 {
2129         enum dc_color_space color_space = COLOR_SPACE_SRGB;
2130
2131         switch (dc_crtc_timing->pixel_encoding) {
2132         case PIXEL_ENCODING_YCBCR422:
2133         case PIXEL_ENCODING_YCBCR444:
2134         case PIXEL_ENCODING_YCBCR420:
2135         {
2136                 /*
2137                  * 27030khz is the separation point between HDTV and SDTV
2138                  * according to HDMI spec, we use YCbCr709 and YCbCr601
2139                  * respectively
2140                  */
2141                 if (dc_crtc_timing->pix_clk_khz > 27030) {
2142                         if (dc_crtc_timing->flags.Y_ONLY)
2143                                 color_space =
2144                                         COLOR_SPACE_YCBCR709_LIMITED;
2145                         else
2146                                 color_space = COLOR_SPACE_YCBCR709;
2147                 } else {
2148                         if (dc_crtc_timing->flags.Y_ONLY)
2149                                 color_space =
2150                                         COLOR_SPACE_YCBCR601_LIMITED;
2151                         else
2152                                 color_space = COLOR_SPACE_YCBCR601;
2153                 }
2154
2155         }
2156         break;
2157         case PIXEL_ENCODING_RGB:
2158                 color_space = COLOR_SPACE_SRGB;
2159                 break;
2160
2161         default:
2162                 WARN_ON(1);
2163                 break;
2164         }
2165
2166         return color_space;
2167 }
2168
2169 /*****************************************************************************/
2170
2171 static void
2172 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2173                                              const struct drm_display_mode *mode_in,
2174                                              const struct drm_connector *connector)
2175 {
2176         struct dc_crtc_timing *timing_out = &stream->timing;
2177
2178         memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2179
2180         timing_out->h_border_left = 0;
2181         timing_out->h_border_right = 0;
2182         timing_out->v_border_top = 0;
2183         timing_out->v_border_bottom = 0;
2184         /* TODO: un-hardcode */
2185
2186         if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2187                         && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2188                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2189         else
2190                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2191
2192         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2193         timing_out->display_color_depth = convert_color_depth_from_display_info(
2194                         connector);
2195         timing_out->scan_type = SCANNING_TYPE_NODATA;
2196         timing_out->hdmi_vic = 0;
2197         timing_out->vic = drm_match_cea_mode(mode_in);
2198
2199         timing_out->h_addressable = mode_in->crtc_hdisplay;
2200         timing_out->h_total = mode_in->crtc_htotal;
2201         timing_out->h_sync_width =
2202                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2203         timing_out->h_front_porch =
2204                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2205         timing_out->v_total = mode_in->crtc_vtotal;
2206         timing_out->v_addressable = mode_in->crtc_vdisplay;
2207         timing_out->v_front_porch =
2208                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2209         timing_out->v_sync_width =
2210                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2211         timing_out->pix_clk_khz = mode_in->crtc_clock;
2212         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2213         if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2214                 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2215         if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2216                 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2217
2218         stream->output_color_space = get_output_color_space(timing_out);
2219
2220         {
2221                 struct dc_transfer_func *tf = dc_create_transfer_func();
2222
2223                 tf->type = TF_TYPE_PREDEFINED;
2224                 tf->tf = TRANSFER_FUNCTION_SRGB;
2225                 stream->out_transfer_func = tf;
2226         }
2227 }
2228
2229 static void fill_audio_info(struct audio_info *audio_info,
2230                             const struct drm_connector *drm_connector,
2231                             const struct dc_sink *dc_sink)
2232 {
2233         int i = 0;
2234         int cea_revision = 0;
2235         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2236
2237         audio_info->manufacture_id = edid_caps->manufacturer_id;
2238         audio_info->product_id = edid_caps->product_id;
2239
2240         cea_revision = drm_connector->display_info.cea_rev;
2241
2242         strncpy(audio_info->display_name,
2243                 edid_caps->display_name,
2244                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2245
2246         if (cea_revision >= 3) {
2247                 audio_info->mode_count = edid_caps->audio_mode_count;
2248
2249                 for (i = 0; i < audio_info->mode_count; ++i) {
2250                         audio_info->modes[i].format_code =
2251                                         (enum audio_format_code)
2252                                         (edid_caps->audio_modes[i].format_code);
2253                         audio_info->modes[i].channel_count =
2254                                         edid_caps->audio_modes[i].channel_count;
2255                         audio_info->modes[i].sample_rates.all =
2256                                         edid_caps->audio_modes[i].sample_rate;
2257                         audio_info->modes[i].sample_size =
2258                                         edid_caps->audio_modes[i].sample_size;
2259                 }
2260         }
2261
2262         audio_info->flags.all = edid_caps->speaker_flags;
2263
2264         /* TODO: We only check for the progressive mode, check for interlace mode too */
2265         if (drm_connector->latency_present[0]) {
2266                 audio_info->video_latency = drm_connector->video_latency[0];
2267                 audio_info->audio_latency = drm_connector->audio_latency[0];
2268         }
2269
2270         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2271
2272 }
2273
2274 static void
2275 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2276                                       struct drm_display_mode *dst_mode)
2277 {
2278         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2279         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2280         dst_mode->crtc_clock = src_mode->crtc_clock;
2281         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2282         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2283         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2284         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2285         dst_mode->crtc_htotal = src_mode->crtc_htotal;
2286         dst_mode->crtc_hskew = src_mode->crtc_hskew;
2287         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2288         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2289         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2290         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2291         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2292 }
2293
2294 static void
2295 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2296                                         const struct drm_display_mode *native_mode,
2297                                         bool scale_enabled)
2298 {
2299         if (scale_enabled) {
2300                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2301         } else if (native_mode->clock == drm_mode->clock &&
2302                         native_mode->htotal == drm_mode->htotal &&
2303                         native_mode->vtotal == drm_mode->vtotal) {
2304                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2305         } else {
2306                 /* no scaling nor amdgpu inserted, no need to patch */
2307         }
2308 }
2309
2310 static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
2311 {
2312         struct dc_sink *sink = NULL;
2313         struct dc_sink_init_data sink_init_data = { 0 };
2314
2315         sink_init_data.link = aconnector->dc_link;
2316         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2317
2318         sink = dc_sink_create(&sink_init_data);
2319         if (!sink) {
2320                 DRM_ERROR("Failed to create sink!\n");
2321                 return -ENOMEM;
2322         }
2323
2324         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2325         aconnector->fake_enable = true;
2326
2327         aconnector->dc_sink = sink;
2328         aconnector->dc_link->local_sink = sink;
2329
2330         return 0;
2331 }
2332
2333 static struct dc_stream_state *
2334 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2335                        const struct drm_display_mode *drm_mode,
2336                        const struct dm_connector_state *dm_state)
2337 {
2338         struct drm_display_mode *preferred_mode = NULL;
2339         struct drm_connector *drm_connector;
2340         struct dc_stream_state *stream = NULL;
2341         struct drm_display_mode mode = *drm_mode;
2342         bool native_mode_found = false;
2343
2344         if (aconnector == NULL) {
2345                 DRM_ERROR("aconnector is NULL!\n");
2346                 goto drm_connector_null;
2347         }
2348
2349         if (dm_state == NULL) {
2350                 DRM_ERROR("dm_state is NULL!\n");
2351                 goto dm_state_null;
2352         }
2353
2354         drm_connector = &aconnector->base;
2355
2356         if (!aconnector->dc_sink) {
2357                 /*
2358                  * Create dc_sink when necessary to MST
2359                  * Don't apply fake_sink to MST
2360                  */
2361                 if (aconnector->mst_port) {
2362                         dm_dp_mst_dc_sink_create(drm_connector);
2363                         goto mst_dc_sink_create_done;
2364                 }
2365
2366                 if (create_fake_sink(aconnector))
2367                         goto stream_create_fail;
2368         }
2369
2370         stream = dc_create_stream_for_sink(aconnector->dc_sink);
2371
2372         if (stream == NULL) {
2373                 DRM_ERROR("Failed to create stream for sink!\n");
2374                 goto stream_create_fail;
2375         }
2376
2377         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2378                 /* Search for preferred mode */
2379                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2380                         native_mode_found = true;
2381                         break;
2382                 }
2383         }
2384         if (!native_mode_found)
2385                 preferred_mode = list_first_entry_or_null(
2386                                 &aconnector->base.modes,
2387                                 struct drm_display_mode,
2388                                 head);
2389
2390         if (preferred_mode == NULL) {
2391                 /* This may not be an error, the use case is when we we have no
2392                  * usermode calls to reset and set mode upon hotplug. In this
2393                  * case, we call set mode ourselves to restore the previous mode
2394                  * and the modelist may not be filled in in time.
2395                  */
2396                 DRM_DEBUG_DRIVER("No preferred mode found\n");
2397         } else {
2398                 decide_crtc_timing_for_drm_display_mode(
2399                                 &mode, preferred_mode,
2400                                 dm_state->scaling != RMX_OFF);
2401         }
2402
2403         fill_stream_properties_from_drm_display_mode(stream,
2404                         &mode, &aconnector->base);
2405         update_stream_scaling_settings(&mode, dm_state, stream);
2406
2407         fill_audio_info(
2408                 &stream->audio_info,
2409                 drm_connector,
2410                 aconnector->dc_sink);
2411
2412 stream_create_fail:
2413 dm_state_null:
2414 drm_connector_null:
2415 mst_dc_sink_create_done:
2416         return stream;
2417 }
2418
2419 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2420 {
2421         drm_crtc_cleanup(crtc);
2422         kfree(crtc);
2423 }
2424
2425 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2426                                   struct drm_crtc_state *state)
2427 {
2428         struct dm_crtc_state *cur = to_dm_crtc_state(state);
2429
2430         /* TODO Destroy dc_stream objects are stream object is flattened */
2431         if (cur->stream)
2432                 dc_stream_release(cur->stream);
2433
2434
2435         __drm_atomic_helper_crtc_destroy_state(state);
2436
2437
2438         kfree(state);
2439 }
2440
2441 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2442 {
2443         struct dm_crtc_state *state;
2444
2445         if (crtc->state)
2446                 dm_crtc_destroy_state(crtc, crtc->state);
2447
2448         state = kzalloc(sizeof(*state), GFP_KERNEL);
2449         if (WARN_ON(!state))
2450                 return;
2451
2452         crtc->state = &state->base;
2453         crtc->state->crtc = crtc;
2454
2455 }
2456
2457 static struct drm_crtc_state *
2458 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2459 {
2460         struct dm_crtc_state *state, *cur;
2461
2462         cur = to_dm_crtc_state(crtc->state);
2463
2464         if (WARN_ON(!crtc->state))
2465                 return NULL;
2466
2467         state = kzalloc(sizeof(*state), GFP_KERNEL);
2468         if (!state)
2469                 return NULL;
2470
2471         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2472
2473         if (cur->stream) {
2474                 state->stream = cur->stream;
2475                 dc_stream_retain(state->stream);
2476         }
2477
2478         /* TODO Duplicate dc_stream after objects are stream object is flattened */
2479
2480         return &state->base;
2481 }
2482
2483 /* Implemented only the options currently availible for the driver */
2484 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2485         .reset = dm_crtc_reset_state,
2486         .destroy = amdgpu_dm_crtc_destroy,
2487         .gamma_set = drm_atomic_helper_legacy_gamma_set,
2488         .set_config = drm_atomic_helper_set_config,
2489         .page_flip = drm_atomic_helper_page_flip,
2490         .atomic_duplicate_state = dm_crtc_duplicate_state,
2491         .atomic_destroy_state = dm_crtc_destroy_state,
2492 };
2493
2494 static enum drm_connector_status
2495 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2496 {
2497         bool connected;
2498         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2499
2500         /* Notes:
2501          * 1. This interface is NOT called in context of HPD irq.
2502          * 2. This interface *is called* in context of user-mode ioctl. Which
2503          * makes it a bad place for *any* MST-related activit. */
2504
2505         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2506             !aconnector->fake_enable)
2507                 connected = (aconnector->dc_sink != NULL);
2508         else
2509                 connected = (aconnector->base.force == DRM_FORCE_ON);
2510
2511         return (connected ? connector_status_connected :
2512                         connector_status_disconnected);
2513 }
2514
2515 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2516                                             struct drm_connector_state *connector_state,
2517                                             struct drm_property *property,
2518                                             uint64_t val)
2519 {
2520         struct drm_device *dev = connector->dev;
2521         struct amdgpu_device *adev = dev->dev_private;
2522         struct dm_connector_state *dm_old_state =
2523                 to_dm_connector_state(connector->state);
2524         struct dm_connector_state *dm_new_state =
2525                 to_dm_connector_state(connector_state);
2526
2527         int ret = -EINVAL;
2528
2529         if (property == dev->mode_config.scaling_mode_property) {
2530                 enum amdgpu_rmx_type rmx_type;
2531
2532                 switch (val) {
2533                 case DRM_MODE_SCALE_CENTER:
2534                         rmx_type = RMX_CENTER;
2535                         break;
2536                 case DRM_MODE_SCALE_ASPECT:
2537                         rmx_type = RMX_ASPECT;
2538                         break;
2539                 case DRM_MODE_SCALE_FULLSCREEN:
2540                         rmx_type = RMX_FULL;
2541                         break;
2542                 case DRM_MODE_SCALE_NONE:
2543                 default:
2544                         rmx_type = RMX_OFF;
2545                         break;
2546                 }
2547
2548                 if (dm_old_state->scaling == rmx_type)
2549                         return 0;
2550
2551                 dm_new_state->scaling = rmx_type;
2552                 ret = 0;
2553         } else if (property == adev->mode_info.underscan_hborder_property) {
2554                 dm_new_state->underscan_hborder = val;
2555                 ret = 0;
2556         } else if (property == adev->mode_info.underscan_vborder_property) {
2557                 dm_new_state->underscan_vborder = val;
2558                 ret = 0;
2559         } else if (property == adev->mode_info.underscan_property) {
2560                 dm_new_state->underscan_enable = val;
2561                 ret = 0;
2562         }
2563
2564         return ret;
2565 }
2566
2567 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2568                                             const struct drm_connector_state *state,
2569                                             struct drm_property *property,
2570                                             uint64_t *val)
2571 {
2572         struct drm_device *dev = connector->dev;
2573         struct amdgpu_device *adev = dev->dev_private;
2574         struct dm_connector_state *dm_state =
2575                 to_dm_connector_state(state);
2576         int ret = -EINVAL;
2577
2578         if (property == dev->mode_config.scaling_mode_property) {
2579                 switch (dm_state->scaling) {
2580                 case RMX_CENTER:
2581                         *val = DRM_MODE_SCALE_CENTER;
2582                         break;
2583                 case RMX_ASPECT:
2584                         *val = DRM_MODE_SCALE_ASPECT;
2585                         break;
2586                 case RMX_FULL:
2587                         *val = DRM_MODE_SCALE_FULLSCREEN;
2588                         break;
2589                 case RMX_OFF:
2590                 default:
2591                         *val = DRM_MODE_SCALE_NONE;
2592                         break;
2593                 }
2594                 ret = 0;
2595         } else if (property == adev->mode_info.underscan_hborder_property) {
2596                 *val = dm_state->underscan_hborder;
2597                 ret = 0;
2598         } else if (property == adev->mode_info.underscan_vborder_property) {
2599                 *val = dm_state->underscan_vborder;
2600                 ret = 0;
2601         } else if (property == adev->mode_info.underscan_property) {
2602                 *val = dm_state->underscan_enable;
2603                 ret = 0;
2604         }
2605         return ret;
2606 }
2607
2608 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2609 {
2610         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2611         const struct dc_link *link = aconnector->dc_link;
2612         struct amdgpu_device *adev = connector->dev->dev_private;
2613         struct amdgpu_display_manager *dm = &adev->dm;
2614 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2615         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2616
2617         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2618                 amdgpu_dm_register_backlight_device(dm);
2619
2620                 if (dm->backlight_dev) {
2621                         backlight_device_unregister(dm->backlight_dev);
2622                         dm->backlight_dev = NULL;
2623                 }
2624
2625         }
2626 #endif
2627         drm_connector_unregister(connector);
2628         drm_connector_cleanup(connector);
2629         kfree(connector);
2630 }
2631
2632 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2633 {
2634         struct dm_connector_state *state =
2635                 to_dm_connector_state(connector->state);
2636
2637         kfree(state);
2638
2639         state = kzalloc(sizeof(*state), GFP_KERNEL);
2640
2641         if (state) {
2642                 state->scaling = RMX_OFF;
2643                 state->underscan_enable = false;
2644                 state->underscan_hborder = 0;
2645                 state->underscan_vborder = 0;
2646
2647                 connector->state = &state->base;
2648                 connector->state->connector = connector;
2649         }
2650 }
2651
2652 struct drm_connector_state *
2653 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2654 {
2655         struct dm_connector_state *state =
2656                 to_dm_connector_state(connector->state);
2657
2658         struct dm_connector_state *new_state =
2659                         kmemdup(state, sizeof(*state), GFP_KERNEL);
2660
2661         if (new_state) {
2662                 __drm_atomic_helper_connector_duplicate_state(connector,
2663                                                               &new_state->base);
2664                 return &new_state->base;
2665         }
2666
2667         return NULL;
2668 }
2669
2670 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2671         .reset = amdgpu_dm_connector_funcs_reset,
2672         .detect = amdgpu_dm_connector_detect,
2673         .fill_modes = drm_helper_probe_single_connector_modes,
2674         .destroy = amdgpu_dm_connector_destroy,
2675         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2676         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2677         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2678         .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2679 };
2680
2681 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2682 {
2683         int enc_id = connector->encoder_ids[0];
2684         struct drm_mode_object *obj;
2685         struct drm_encoder *encoder;
2686
2687         DRM_DEBUG_DRIVER("Finding the best encoder\n");
2688
2689         /* pick the encoder ids */
2690         if (enc_id) {
2691                 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2692                 if (!obj) {
2693                         DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2694                         return NULL;
2695                 }
2696                 encoder = obj_to_encoder(obj);
2697                 return encoder;
2698         }
2699         DRM_ERROR("No encoder id\n");
2700         return NULL;
2701 }
2702
2703 static int get_modes(struct drm_connector *connector)
2704 {
2705         return amdgpu_dm_connector_get_modes(connector);
2706 }
2707
2708 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2709 {
2710         struct dc_sink_init_data init_params = {
2711                         .link = aconnector->dc_link,
2712                         .sink_signal = SIGNAL_TYPE_VIRTUAL
2713         };
2714         struct edid *edid;
2715
2716         if (!aconnector->base.edid_blob_ptr ||
2717                 !aconnector->base.edid_blob_ptr->data) {
2718                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2719                                 aconnector->base.name);
2720
2721                 aconnector->base.force = DRM_FORCE_OFF;
2722                 aconnector->base.override_edid = false;
2723                 return;
2724         }
2725
2726         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2727
2728         aconnector->edid = edid;
2729
2730         aconnector->dc_em_sink = dc_link_add_remote_sink(
2731                 aconnector->dc_link,
2732                 (uint8_t *)edid,
2733                 (edid->extensions + 1) * EDID_LENGTH,
2734                 &init_params);
2735
2736         if (aconnector->base.force == DRM_FORCE_ON)
2737                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2738                 aconnector->dc_link->local_sink :
2739                 aconnector->dc_em_sink;
2740 }
2741
2742 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2743 {
2744         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2745
2746         /* In case of headless boot with force on for DP managed connector
2747          * Those settings have to be != 0 to get initial modeset
2748          */
2749         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2750                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2751                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2752         }
2753
2754
2755         aconnector->base.override_edid = true;
2756         create_eml_sink(aconnector);
2757 }
2758
2759 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2760                                    struct drm_display_mode *mode)
2761 {
2762         int result = MODE_ERROR;
2763         struct dc_sink *dc_sink;
2764         struct amdgpu_device *adev = connector->dev->dev_private;
2765         /* TODO: Unhardcode stream count */
2766         struct dc_stream_state *stream;
2767         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2768
2769         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2770                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2771                 return result;
2772
2773         /* Only run this the first time mode_valid is called to initilialize
2774          * EDID mgmt
2775          */
2776         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2777                 !aconnector->dc_em_sink)
2778                 handle_edid_mgmt(aconnector);
2779
2780         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2781
2782         if (dc_sink == NULL) {
2783                 DRM_ERROR("dc_sink is NULL!\n");
2784                 goto fail;
2785         }
2786
2787         stream = dc_create_stream_for_sink(dc_sink);
2788         if (stream == NULL) {
2789                 DRM_ERROR("Failed to create stream for sink!\n");
2790                 goto fail;
2791         }
2792
2793         drm_mode_set_crtcinfo(mode, 0);
2794         fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2795
2796         stream->src.width = mode->hdisplay;
2797         stream->src.height = mode->vdisplay;
2798         stream->dst = stream->src;
2799
2800         if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
2801                 result = MODE_OK;
2802
2803         dc_stream_release(stream);
2804
2805 fail:
2806         /* TODO: error handling*/
2807         return result;
2808 }
2809
2810 static const struct drm_connector_helper_funcs
2811 amdgpu_dm_connector_helper_funcs = {
2812         /*
2813          * If hotplug a second bigger display in FB Con mode, bigger resolution
2814          * modes will be filtered by drm_mode_validate_size(), and those modes
2815          * is missing after user start lightdm. So we need to renew modes list.
2816          * in get_modes call back, not just return the modes count
2817          */
2818         .get_modes = get_modes,
2819         .mode_valid = amdgpu_dm_connector_mode_valid,
2820         .best_encoder = best_encoder
2821 };
2822
2823 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2824 {
2825 }
2826
2827 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2828                                        struct drm_crtc_state *state)
2829 {
2830         struct amdgpu_device *adev = crtc->dev->dev_private;
2831         struct dc *dc = adev->dm.dc;
2832         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2833         int ret = -EINVAL;
2834
2835         if (unlikely(!dm_crtc_state->stream &&
2836                      modeset_required(state, NULL, dm_crtc_state->stream))) {
2837                 WARN_ON(1);
2838                 return ret;
2839         }
2840
2841         /* In some use cases, like reset, no stream  is attached */
2842         if (!dm_crtc_state->stream)
2843                 return 0;
2844
2845         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2846                 return 0;
2847
2848         return ret;
2849 }
2850
2851 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2852                                       const struct drm_display_mode *mode,
2853                                       struct drm_display_mode *adjusted_mode)
2854 {
2855         return true;
2856 }
2857
2858 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2859         .disable = dm_crtc_helper_disable,
2860         .atomic_check = dm_crtc_helper_atomic_check,
2861         .mode_fixup = dm_crtc_helper_mode_fixup
2862 };
2863
2864 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2865 {
2866
2867 }
2868
2869 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2870                                           struct drm_crtc_state *crtc_state,
2871                                           struct drm_connector_state *conn_state)
2872 {
2873         return 0;
2874 }
2875
2876 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2877         .disable = dm_encoder_helper_disable,
2878         .atomic_check = dm_encoder_helper_atomic_check
2879 };
2880
2881 static void dm_drm_plane_reset(struct drm_plane *plane)
2882 {
2883         struct dm_plane_state *amdgpu_state = NULL;
2884
2885         if (plane->state)
2886                 plane->funcs->atomic_destroy_state(plane, plane->state);
2887
2888         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2889         WARN_ON(amdgpu_state == NULL);
2890         
2891         if (amdgpu_state) {
2892                 plane->state = &amdgpu_state->base;
2893                 plane->state->plane = plane;
2894                 plane->state->rotation = DRM_MODE_ROTATE_0;
2895         }
2896 }
2897
2898 static struct drm_plane_state *
2899 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2900 {
2901         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2902
2903         old_dm_plane_state = to_dm_plane_state(plane->state);
2904         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2905         if (!dm_plane_state)
2906                 return NULL;
2907
2908         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2909
2910         if (old_dm_plane_state->dc_state) {
2911                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2912                 dc_plane_state_retain(dm_plane_state->dc_state);
2913         }
2914
2915         return &dm_plane_state->base;
2916 }
2917
2918 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2919                                 struct drm_plane_state *state)
2920 {
2921         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2922
2923         if (dm_plane_state->dc_state)
2924                 dc_plane_state_release(dm_plane_state->dc_state);
2925
2926         drm_atomic_helper_plane_destroy_state(plane, state);
2927 }
2928
2929 static const struct drm_plane_funcs dm_plane_funcs = {
2930         .update_plane   = drm_atomic_helper_update_plane,
2931         .disable_plane  = drm_atomic_helper_disable_plane,
2932         .destroy        = drm_plane_cleanup,
2933         .reset = dm_drm_plane_reset,
2934         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
2935         .atomic_destroy_state = dm_drm_plane_destroy_state,
2936 };
2937
2938 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2939                                       struct drm_plane_state *new_state)
2940 {
2941         struct amdgpu_framebuffer *afb;
2942         struct drm_gem_object *obj;
2943         struct amdgpu_bo *rbo;
2944         uint64_t chroma_addr = 0;
2945         int r;
2946         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2947         unsigned int awidth;
2948
2949         dm_plane_state_old = to_dm_plane_state(plane->state);
2950         dm_plane_state_new = to_dm_plane_state(new_state);
2951
2952         if (!new_state->fb) {
2953                 DRM_DEBUG_DRIVER("No FB bound\n");
2954                 return 0;
2955         }
2956
2957         afb = to_amdgpu_framebuffer(new_state->fb);
2958
2959         obj = afb->obj;
2960         rbo = gem_to_amdgpu_bo(obj);
2961         r = amdgpu_bo_reserve(rbo, false);
2962         if (unlikely(r != 0))
2963                 return r;
2964
2965         r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
2966
2967
2968         amdgpu_bo_unreserve(rbo);
2969
2970         if (unlikely(r != 0)) {
2971                 if (r != -ERESTARTSYS)
2972                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
2973                 return r;
2974         }
2975
2976         amdgpu_bo_ref(rbo);
2977
2978         if (dm_plane_state_new->dc_state &&
2979                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
2980                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
2981
2982                 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2983                         plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
2984                         plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
2985                 } else {
2986                         awidth = ALIGN(new_state->fb->width, 64);
2987                         plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2988                         plane_state->address.video_progressive.luma_addr.low_part
2989                                                         = lower_32_bits(afb->address);
2990                         plane_state->address.video_progressive.luma_addr.high_part
2991                                                         = upper_32_bits(afb->address);
2992                         chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
2993                         plane_state->address.video_progressive.chroma_addr.low_part
2994                                                         = lower_32_bits(chroma_addr);
2995                         plane_state->address.video_progressive.chroma_addr.high_part
2996                                                         = upper_32_bits(chroma_addr);
2997                 }
2998         }
2999
3000         /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
3001          * prepare and cleanup in drm_atomic_helper_prepare_planes
3002          * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
3003          * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
3004          * code touching fram buffers should be avoided for DC.
3005          */
3006         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3007                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
3008
3009                 acrtc->cursor_bo = obj;
3010         }
3011         return 0;
3012 }
3013
3014 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3015                                        struct drm_plane_state *old_state)
3016 {
3017         struct amdgpu_bo *rbo;
3018         struct amdgpu_framebuffer *afb;
3019         int r;
3020
3021         if (!old_state->fb)
3022                 return;
3023
3024         afb = to_amdgpu_framebuffer(old_state->fb);
3025         rbo = gem_to_amdgpu_bo(afb->obj);
3026         r = amdgpu_bo_reserve(rbo, false);
3027         if (unlikely(r)) {
3028                 DRM_ERROR("failed to reserve rbo before unpin\n");
3029                 return;
3030         }
3031
3032         amdgpu_bo_unpin(rbo);
3033         amdgpu_bo_unreserve(rbo);
3034         amdgpu_bo_unref(&rbo);
3035 }
3036
3037 static int dm_plane_atomic_check(struct drm_plane *plane,
3038                                  struct drm_plane_state *state)
3039 {
3040         struct amdgpu_device *adev = plane->dev->dev_private;
3041         struct dc *dc = adev->dm.dc;
3042         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3043
3044         if (!dm_plane_state->dc_state)
3045                 return 0;
3046
3047         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3048                 return 0;
3049
3050         return -EINVAL;
3051 }
3052
3053 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3054         .prepare_fb = dm_plane_helper_prepare_fb,
3055         .cleanup_fb = dm_plane_helper_cleanup_fb,
3056         .atomic_check = dm_plane_atomic_check,
3057 };
3058
3059 /*
3060  * TODO: these are currently initialized to rgb formats only.
3061  * For future use cases we should either initialize them dynamically based on
3062  * plane capabilities, or initialize this array to all formats, so internal drm
3063  * check will succeed, and let DC to implement proper check
3064  */
3065 static const uint32_t rgb_formats[] = {
3066         DRM_FORMAT_RGB888,
3067         DRM_FORMAT_XRGB8888,
3068         DRM_FORMAT_ARGB8888,
3069         DRM_FORMAT_RGBA8888,
3070         DRM_FORMAT_XRGB2101010,
3071         DRM_FORMAT_XBGR2101010,
3072         DRM_FORMAT_ARGB2101010,
3073         DRM_FORMAT_ABGR2101010,
3074 };
3075
3076 static const uint32_t yuv_formats[] = {
3077         DRM_FORMAT_NV12,
3078         DRM_FORMAT_NV21,
3079 };
3080
3081 static const u32 cursor_formats[] = {
3082         DRM_FORMAT_ARGB8888
3083 };
3084
3085 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3086                                 struct amdgpu_plane *aplane,
3087                                 unsigned long possible_crtcs)
3088 {
3089         int res = -EPERM;
3090
3091         switch (aplane->base.type) {
3092         case DRM_PLANE_TYPE_PRIMARY:
3093                 aplane->base.format_default = true;
3094
3095                 res = drm_universal_plane_init(
3096                                 dm->adev->ddev,
3097                                 &aplane->base,
3098                                 possible_crtcs,
3099                                 &dm_plane_funcs,
3100                                 rgb_formats,
3101                                 ARRAY_SIZE(rgb_formats),
3102                                 NULL, aplane->base.type, NULL);
3103                 break;
3104         case DRM_PLANE_TYPE_OVERLAY:
3105                 res = drm_universal_plane_init(
3106                                 dm->adev->ddev,
3107                                 &aplane->base,
3108                                 possible_crtcs,
3109                                 &dm_plane_funcs,
3110                                 yuv_formats,
3111                                 ARRAY_SIZE(yuv_formats),
3112                                 NULL, aplane->base.type, NULL);
3113                 break;
3114         case DRM_PLANE_TYPE_CURSOR:
3115                 res = drm_universal_plane_init(
3116                                 dm->adev->ddev,
3117                                 &aplane->base,
3118                                 possible_crtcs,
3119                                 &dm_plane_funcs,
3120                                 cursor_formats,
3121                                 ARRAY_SIZE(cursor_formats),
3122                                 NULL, aplane->base.type, NULL);
3123                 break;
3124         }
3125
3126         drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3127
3128         /* Create (reset) the plane state */
3129         if (aplane->base.funcs->reset)
3130                 aplane->base.funcs->reset(&aplane->base);
3131
3132
3133         return res;
3134 }
3135
3136 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3137                                struct drm_plane *plane,
3138                                uint32_t crtc_index)
3139 {
3140         struct amdgpu_crtc *acrtc = NULL;
3141         struct amdgpu_plane *cursor_plane;
3142
3143         int res = -ENOMEM;
3144
3145         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3146         if (!cursor_plane)
3147                 goto fail;
3148
3149         cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3150         res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3151
3152         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3153         if (!acrtc)
3154                 goto fail;
3155
3156         res = drm_crtc_init_with_planes(
3157                         dm->ddev,
3158                         &acrtc->base,
3159                         plane,
3160                         &cursor_plane->base,
3161                         &amdgpu_dm_crtc_funcs, NULL);
3162
3163         if (res)
3164                 goto fail;
3165
3166         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3167
3168         /* Create (reset) the plane state */
3169         if (acrtc->base.funcs->reset)
3170                 acrtc->base.funcs->reset(&acrtc->base);
3171
3172         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3173         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3174
3175         acrtc->crtc_id = crtc_index;
3176         acrtc->base.enabled = false;
3177
3178         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3179         drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
3180
3181         return 0;
3182
3183 fail:
3184         kfree(acrtc);
3185         kfree(cursor_plane);
3186         return res;
3187 }
3188
3189
3190 static int to_drm_connector_type(enum signal_type st)
3191 {
3192         switch (st) {
3193         case SIGNAL_TYPE_HDMI_TYPE_A:
3194                 return DRM_MODE_CONNECTOR_HDMIA;
3195         case SIGNAL_TYPE_EDP:
3196                 return DRM_MODE_CONNECTOR_eDP;
3197         case SIGNAL_TYPE_RGB:
3198                 return DRM_MODE_CONNECTOR_VGA;
3199         case SIGNAL_TYPE_DISPLAY_PORT:
3200         case SIGNAL_TYPE_DISPLAY_PORT_MST:
3201                 return DRM_MODE_CONNECTOR_DisplayPort;
3202         case SIGNAL_TYPE_DVI_DUAL_LINK:
3203         case SIGNAL_TYPE_DVI_SINGLE_LINK:
3204                 return DRM_MODE_CONNECTOR_DVID;
3205         case SIGNAL_TYPE_VIRTUAL:
3206                 return DRM_MODE_CONNECTOR_VIRTUAL;
3207
3208         default:
3209                 return DRM_MODE_CONNECTOR_Unknown;
3210         }
3211 }
3212
3213 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3214 {
3215         const struct drm_connector_helper_funcs *helper =
3216                 connector->helper_private;
3217         struct drm_encoder *encoder;
3218         struct amdgpu_encoder *amdgpu_encoder;
3219
3220         encoder = helper->best_encoder(connector);
3221
3222         if (encoder == NULL)
3223                 return;
3224
3225         amdgpu_encoder = to_amdgpu_encoder(encoder);
3226
3227         amdgpu_encoder->native_mode.clock = 0;
3228
3229         if (!list_empty(&connector->probed_modes)) {
3230                 struct drm_display_mode *preferred_mode = NULL;
3231
3232                 list_for_each_entry(preferred_mode,
3233                                     &connector->probed_modes,
3234                                     head) {
3235                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3236                                 amdgpu_encoder->native_mode = *preferred_mode;
3237
3238                         break;
3239                 }
3240
3241         }
3242 }
3243
3244 static struct drm_display_mode *
3245 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3246                              char *name,
3247                              int hdisplay, int vdisplay)
3248 {
3249         struct drm_device *dev = encoder->dev;
3250         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3251         struct drm_display_mode *mode = NULL;
3252         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3253
3254         mode = drm_mode_duplicate(dev, native_mode);
3255
3256         if (mode == NULL)
3257                 return NULL;
3258
3259         mode->hdisplay = hdisplay;
3260         mode->vdisplay = vdisplay;
3261         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3262         strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3263
3264         return mode;
3265
3266 }
3267
3268 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3269                                                  struct drm_connector *connector)
3270 {
3271         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3272         struct drm_display_mode *mode = NULL;
3273         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3274         struct amdgpu_dm_connector *amdgpu_dm_connector =
3275                                 to_amdgpu_dm_connector(connector);
3276         int i;
3277         int n;
3278         struct mode_size {
3279                 char name[DRM_DISPLAY_MODE_LEN];
3280                 int w;
3281                 int h;
3282         } common_modes[] = {
3283                 {  "640x480",  640,  480},
3284                 {  "800x600",  800,  600},
3285                 { "1024x768", 1024,  768},
3286                 { "1280x720", 1280,  720},
3287                 { "1280x800", 1280,  800},
3288                 {"1280x1024", 1280, 1024},
3289                 { "1440x900", 1440,  900},
3290                 {"1680x1050", 1680, 1050},
3291                 {"1600x1200", 1600, 1200},
3292                 {"1920x1080", 1920, 1080},
3293                 {"1920x1200", 1920, 1200}
3294         };
3295
3296         n = ARRAY_SIZE(common_modes);
3297
3298         for (i = 0; i < n; i++) {
3299                 struct drm_display_mode *curmode = NULL;
3300                 bool mode_existed = false;
3301
3302                 if (common_modes[i].w > native_mode->hdisplay ||
3303                     common_modes[i].h > native_mode->vdisplay ||
3304                    (common_modes[i].w == native_mode->hdisplay &&
3305                     common_modes[i].h == native_mode->vdisplay))
3306                         continue;
3307
3308                 list_for_each_entry(curmode, &connector->probed_modes, head) {
3309                         if (common_modes[i].w == curmode->hdisplay &&
3310                             common_modes[i].h == curmode->vdisplay) {
3311                                 mode_existed = true;
3312                                 break;
3313                         }
3314                 }
3315
3316                 if (mode_existed)
3317                         continue;
3318
3319                 mode = amdgpu_dm_create_common_mode(encoder,
3320                                 common_modes[i].name, common_modes[i].w,
3321                                 common_modes[i].h);
3322                 drm_mode_probed_add(connector, mode);
3323                 amdgpu_dm_connector->num_modes++;
3324         }
3325 }
3326
3327 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3328                                               struct edid *edid)
3329 {
3330         struct amdgpu_dm_connector *amdgpu_dm_connector =
3331                         to_amdgpu_dm_connector(connector);
3332
3333         if (edid) {
3334                 /* empty probed_modes */
3335                 INIT_LIST_HEAD(&connector->probed_modes);
3336                 amdgpu_dm_connector->num_modes =
3337                                 drm_add_edid_modes(connector, edid);
3338
3339                 drm_edid_to_eld(connector, edid);
3340
3341                 amdgpu_dm_get_native_mode(connector);
3342         } else {
3343                 amdgpu_dm_connector->num_modes = 0;
3344         }
3345 }
3346
3347 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3348 {
3349         const struct drm_connector_helper_funcs *helper =
3350                         connector->helper_private;
3351         struct amdgpu_dm_connector *amdgpu_dm_connector =
3352                         to_amdgpu_dm_connector(connector);
3353         struct drm_encoder *encoder;
3354         struct edid *edid = amdgpu_dm_connector->edid;
3355
3356         encoder = helper->best_encoder(connector);
3357
3358         amdgpu_dm_connector_ddc_get_modes(connector, edid);
3359         amdgpu_dm_connector_add_common_modes(encoder, connector);
3360         return amdgpu_dm_connector->num_modes;
3361 }
3362
3363 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3364                                      struct amdgpu_dm_connector *aconnector,
3365                                      int connector_type,
3366                                      struct dc_link *link,
3367                                      int link_index)
3368 {
3369         struct amdgpu_device *adev = dm->ddev->dev_private;
3370
3371         aconnector->connector_id = link_index;
3372         aconnector->dc_link = link;
3373         aconnector->base.interlace_allowed = false;
3374         aconnector->base.doublescan_allowed = false;
3375         aconnector->base.stereo_allowed = false;
3376         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3377         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3378
3379         mutex_init(&aconnector->hpd_lock);
3380
3381         /* configure support HPD hot plug connector_>polled default value is 0
3382          * which means HPD hot plug not supported
3383          */
3384         switch (connector_type) {
3385         case DRM_MODE_CONNECTOR_HDMIA:
3386                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3387                 break;
3388         case DRM_MODE_CONNECTOR_DisplayPort:
3389                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3390                 break;
3391         case DRM_MODE_CONNECTOR_DVID:
3392                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3393                 break;
3394         default:
3395                 break;
3396         }
3397
3398         drm_object_attach_property(&aconnector->base.base,
3399                                 dm->ddev->mode_config.scaling_mode_property,
3400                                 DRM_MODE_SCALE_NONE);
3401
3402         drm_object_attach_property(&aconnector->base.base,
3403                                 adev->mode_info.underscan_property,
3404                                 UNDERSCAN_OFF);
3405         drm_object_attach_property(&aconnector->base.base,
3406                                 adev->mode_info.underscan_hborder_property,
3407                                 0);
3408         drm_object_attach_property(&aconnector->base.base,
3409                                 adev->mode_info.underscan_vborder_property,
3410                                 0);
3411
3412 }
3413
3414 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3415                               struct i2c_msg *msgs, int num)
3416 {
3417         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3418         struct ddc_service *ddc_service = i2c->ddc_service;
3419         struct i2c_command cmd;
3420         int i;
3421         int result = -EIO;
3422
3423         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3424
3425         if (!cmd.payloads)
3426                 return result;
3427
3428         cmd.number_of_payloads = num;
3429         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3430         cmd.speed = 100;
3431
3432         for (i = 0; i < num; i++) {
3433                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3434                 cmd.payloads[i].address = msgs[i].addr;
3435                 cmd.payloads[i].length = msgs[i].len;
3436                 cmd.payloads[i].data = msgs[i].buf;
3437         }
3438
3439         if (dal_i2caux_submit_i2c_command(
3440                         ddc_service->ctx->i2caux,
3441                         ddc_service->ddc_pin,
3442                         &cmd))
3443                 result = num;
3444
3445         kfree(cmd.payloads);
3446         return result;
3447 }
3448
3449 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3450 {
3451         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3452 }
3453
3454 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3455         .master_xfer = amdgpu_dm_i2c_xfer,
3456         .functionality = amdgpu_dm_i2c_func,
3457 };
3458
3459 static struct amdgpu_i2c_adapter *
3460 create_i2c(struct ddc_service *ddc_service,
3461            int link_index,
3462            int *res)
3463 {
3464         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3465         struct amdgpu_i2c_adapter *i2c;
3466
3467         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3468         if (!i2c)
3469                 return NULL;
3470         i2c->base.owner = THIS_MODULE;
3471         i2c->base.class = I2C_CLASS_DDC;
3472         i2c->base.dev.parent = &adev->pdev->dev;
3473         i2c->base.algo = &amdgpu_dm_i2c_algo;
3474         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3475         i2c_set_adapdata(&i2c->base, i2c);
3476         i2c->ddc_service = ddc_service;
3477
3478         return i2c;
3479 }
3480
3481 /* Note: this function assumes that dc_link_detect() was called for the
3482  * dc_link which will be represented by this aconnector.
3483  */
3484 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3485                                     struct amdgpu_dm_connector *aconnector,
3486                                     uint32_t link_index,
3487                                     struct amdgpu_encoder *aencoder)
3488 {
3489         int res = 0;
3490         int connector_type;
3491         struct dc *dc = dm->dc;
3492         struct dc_link *link = dc_get_link_at_index(dc, link_index);
3493         struct amdgpu_i2c_adapter *i2c;
3494
3495         link->priv = aconnector;
3496
3497         DRM_DEBUG_DRIVER("%s()\n", __func__);
3498
3499         i2c = create_i2c(link->ddc, link->link_index, &res);
3500         if (!i2c) {
3501                 DRM_ERROR("Failed to create i2c adapter data\n");
3502                 return -ENOMEM;
3503         }
3504
3505         aconnector->i2c = i2c;
3506         res = i2c_add_adapter(&i2c->base);
3507
3508         if (res) {
3509                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3510                 goto out_free;
3511         }
3512
3513         connector_type = to_drm_connector_type(link->connector_signal);
3514
3515         res = drm_connector_init(
3516                         dm->ddev,
3517                         &aconnector->base,
3518                         &amdgpu_dm_connector_funcs,
3519                         connector_type);
3520
3521         if (res) {
3522                 DRM_ERROR("connector_init failed\n");
3523                 aconnector->connector_id = -1;
3524                 goto out_free;
3525         }
3526
3527         drm_connector_helper_add(
3528                         &aconnector->base,
3529                         &amdgpu_dm_connector_helper_funcs);
3530
3531         if (aconnector->base.funcs->reset)
3532                 aconnector->base.funcs->reset(&aconnector->base);
3533
3534         amdgpu_dm_connector_init_helper(
3535                 dm,
3536                 aconnector,
3537                 connector_type,
3538                 link,
3539                 link_index);
3540
3541         drm_mode_connector_attach_encoder(
3542                 &aconnector->base, &aencoder->base);
3543
3544         drm_connector_register(&aconnector->base);
3545
3546         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3547                 || connector_type == DRM_MODE_CONNECTOR_eDP)
3548                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3549
3550 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3551         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3552
3553         /* NOTE: this currently will create backlight device even if a panel
3554          * is not connected to the eDP/LVDS connector.
3555          *
3556          * This is less than ideal but we don't have sink information at this
3557          * stage since detection happens after. We can't do detection earlier
3558          * since MST detection needs connectors to be created first.
3559          */
3560         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3561                 /* Event if registration failed, we should continue with
3562                  * DM initialization because not having a backlight control
3563                  * is better then a black screen.
3564                  */
3565                 amdgpu_dm_register_backlight_device(dm);
3566
3567                 if (dm->backlight_dev)
3568                         dm->backlight_link = link;
3569         }
3570 #endif
3571
3572 out_free:
3573         if (res) {
3574                 kfree(i2c);
3575                 aconnector->i2c = NULL;
3576         }
3577         return res;
3578 }
3579
3580 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3581 {
3582         switch (adev->mode_info.num_crtc) {
3583         case 1:
3584                 return 0x1;
3585         case 2:
3586                 return 0x3;
3587         case 3:
3588                 return 0x7;
3589         case 4:
3590                 return 0xf;
3591         case 5:
3592                 return 0x1f;
3593         case 6:
3594         default:
3595                 return 0x3f;
3596         }
3597 }
3598
3599 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3600                                   struct amdgpu_encoder *aencoder,
3601                                   uint32_t link_index)
3602 {
3603         struct amdgpu_device *adev = dev->dev_private;
3604
3605         int res = drm_encoder_init(dev,
3606                                    &aencoder->base,
3607                                    &amdgpu_dm_encoder_funcs,
3608                                    DRM_MODE_ENCODER_TMDS,
3609                                    NULL);
3610
3611         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3612
3613         if (!res)
3614                 aencoder->encoder_id = link_index;
3615         else
3616                 aencoder->encoder_id = -1;
3617
3618         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3619
3620         return res;
3621 }
3622
3623 static void manage_dm_interrupts(struct amdgpu_device *adev,
3624                                  struct amdgpu_crtc *acrtc,
3625                                  bool enable)
3626 {
3627         /*
3628          * this is not correct translation but will work as soon as VBLANK
3629          * constant is the same as PFLIP
3630          */
3631         int irq_type =
3632                 amdgpu_crtc_idx_to_irq_type(
3633                         adev,
3634                         acrtc->crtc_id);
3635
3636         if (enable) {
3637                 drm_crtc_vblank_on(&acrtc->base);
3638                 amdgpu_irq_get(
3639                         adev,
3640                         &adev->pageflip_irq,
3641                         irq_type);
3642         } else {
3643
3644                 amdgpu_irq_put(
3645                         adev,
3646                         &adev->pageflip_irq,
3647                         irq_type);
3648                 drm_crtc_vblank_off(&acrtc->base);
3649         }
3650 }
3651
3652 static bool
3653 is_scaling_state_different(const struct dm_connector_state *dm_state,
3654                            const struct dm_connector_state *old_dm_state)
3655 {
3656         if (dm_state->scaling != old_dm_state->scaling)
3657                 return true;
3658         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3659                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3660                         return true;
3661         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3662                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3663                         return true;
3664         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3665                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3666                 return true;
3667         return false;
3668 }
3669
3670 static void remove_stream(struct amdgpu_device *adev,
3671                           struct amdgpu_crtc *acrtc,
3672                           struct dc_stream_state *stream)
3673 {
3674         /* this is the update mode case */
3675         if (adev->dm.freesync_module)
3676                 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3677
3678         acrtc->otg_inst = -1;
3679         acrtc->enabled = false;
3680 }
3681
3682 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3683                                struct dc_cursor_position *position)
3684 {
3685         struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3686         int x, y;
3687         int xorigin = 0, yorigin = 0;
3688
3689         if (!crtc || !plane->state->fb) {
3690                 position->enable = false;
3691                 position->x = 0;
3692                 position->y = 0;
3693                 return 0;
3694         }
3695
3696         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3697             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3698                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3699                           __func__,
3700                           plane->state->crtc_w,
3701                           plane->state->crtc_h);
3702                 return -EINVAL;
3703         }
3704
3705         x = plane->state->crtc_x;
3706         y = plane->state->crtc_y;
3707         /* avivo cursor are offset into the total surface */
3708         x += crtc->primary->state->src_x >> 16;
3709         y += crtc->primary->state->src_y >> 16;
3710         if (x < 0) {
3711                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3712                 x = 0;
3713         }
3714         if (y < 0) {
3715                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3716                 y = 0;
3717         }
3718         position->enable = true;
3719         position->x = x;
3720         position->y = y;
3721         position->x_hotspot = xorigin;
3722         position->y_hotspot = yorigin;
3723
3724         return 0;
3725 }
3726
3727 static void handle_cursor_update(struct drm_plane *plane,
3728                                  struct drm_plane_state *old_plane_state)
3729 {
3730         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3731         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3732         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3733         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3734         uint64_t address = afb ? afb->address : 0;
3735         struct dc_cursor_position position;
3736         struct dc_cursor_attributes attributes;
3737         int ret;
3738
3739         if (!plane->state->fb && !old_plane_state->fb)
3740                 return;
3741
3742         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3743                          __func__,
3744                          amdgpu_crtc->crtc_id,
3745                          plane->state->crtc_w,
3746                          plane->state->crtc_h);
3747
3748         ret = get_cursor_position(plane, crtc, &position);
3749         if (ret)
3750                 return;
3751
3752         if (!position.enable) {
3753                 /* turn off cursor */
3754                 if (crtc_state && crtc_state->stream)
3755                         dc_stream_set_cursor_position(crtc_state->stream,
3756                                                       &position);
3757                 return;
3758         }
3759
3760         amdgpu_crtc->cursor_width = plane->state->crtc_w;
3761         amdgpu_crtc->cursor_height = plane->state->crtc_h;
3762
3763         attributes.address.high_part = upper_32_bits(address);
3764         attributes.address.low_part  = lower_32_bits(address);
3765         attributes.width             = plane->state->crtc_w;
3766         attributes.height            = plane->state->crtc_h;
3767         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3768         attributes.rotation_angle    = 0;
3769         attributes.attribute_flags.value = 0;
3770
3771         attributes.pitch = attributes.width;
3772
3773         if (crtc_state->stream) {
3774                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3775                                                          &attributes))
3776                         DRM_ERROR("DC failed to set cursor attributes\n");
3777
3778                 if (!dc_stream_set_cursor_position(crtc_state->stream,
3779                                                    &position))
3780                         DRM_ERROR("DC failed to set cursor position\n");
3781         }
3782 }
3783
3784 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3785 {
3786
3787         assert_spin_locked(&acrtc->base.dev->event_lock);
3788         WARN_ON(acrtc->event);
3789
3790         acrtc->event = acrtc->base.state->event;
3791
3792         /* Set the flip status */
3793         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3794
3795         /* Mark this event as consumed */
3796         acrtc->base.state->event = NULL;
3797
3798         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3799                                                  acrtc->crtc_id);
3800 }
3801
3802 /*
3803  * Executes flip
3804  *
3805  * Waits on all BO's fences and for proper vblank count
3806  */
3807 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3808                               struct drm_framebuffer *fb,
3809                               uint32_t target,
3810                               struct dc_state *state)
3811 {
3812         unsigned long flags;
3813         uint32_t target_vblank;
3814         int r, vpos, hpos;
3815         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3816         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3817         struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
3818         struct amdgpu_device *adev = crtc->dev->dev_private;
3819         bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3820         struct dc_flip_addrs addr = { {0} };
3821         /* TODO eliminate or rename surface_update */
3822         struct dc_surface_update surface_updates[1] = { {0} };
3823         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3824
3825
3826         /* Prepare wait for target vblank early - before the fence-waits */
3827         target_vblank = target - drm_crtc_vblank_count(crtc) +
3828                         amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3829
3830         /* TODO This might fail and hence better not used, wait
3831          * explicitly on fences instead
3832          * and in general should be called for
3833          * blocking commit to as per framework helpers
3834          */
3835         r = amdgpu_bo_reserve(abo, true);
3836         if (unlikely(r != 0)) {
3837                 DRM_ERROR("failed to reserve buffer before flip\n");
3838                 WARN_ON(1);
3839         }
3840
3841         /* Wait for all fences on this FB */
3842         WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3843                                                                     MAX_SCHEDULE_TIMEOUT) < 0);
3844
3845         amdgpu_bo_unreserve(abo);
3846
3847         /* Wait until we're out of the vertical blank period before the one
3848          * targeted by the flip
3849          */
3850         while ((acrtc->enabled &&
3851                 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
3852                                         &vpos, &hpos, NULL, NULL,
3853                                         &crtc->hwmode)
3854                  & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3855                 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3856                 (int)(target_vblank -
3857                   amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3858                 usleep_range(1000, 1100);
3859         }
3860
3861         /* Flip */
3862         spin_lock_irqsave(&crtc->dev->event_lock, flags);
3863         /* update crtc fb */
3864         crtc->primary->fb = fb;
3865
3866         WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3867         WARN_ON(!acrtc_state->stream);
3868
3869         addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3870         addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3871         addr.flip_immediate = async_flip;
3872
3873
3874         if (acrtc->base.state->event)
3875                 prepare_flip_isr(acrtc);
3876
3877         surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3878         surface_updates->flip_addr = &addr;
3879
3880
3881         dc_commit_updates_for_stream(adev->dm.dc,
3882                                              surface_updates,
3883                                              1,
3884                                              acrtc_state->stream,
3885                                              NULL,
3886                                              &surface_updates->surface,
3887                                              state);
3888
3889         DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3890                          __func__,
3891                          addr.address.grph.addr.high_part,
3892                          addr.address.grph.addr.low_part);
3893
3894
3895         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3896 }
3897
3898 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3899                                     struct drm_device *dev,
3900                                     struct amdgpu_display_manager *dm,
3901                                     struct drm_crtc *pcrtc,
3902                                     bool *wait_for_vblank)
3903 {
3904         uint32_t i;
3905         struct drm_plane *plane;
3906         struct drm_plane_state *old_plane_state, *new_plane_state;
3907         struct dc_stream_state *dc_stream_attach;
3908         struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
3909         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
3910         struct drm_crtc_state *new_pcrtc_state =
3911                         drm_atomic_get_new_crtc_state(state, pcrtc);
3912         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
3913         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3914         int planes_count = 0;
3915         unsigned long flags;
3916
3917         /* update planes when needed */
3918         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3919                 struct drm_crtc *crtc = new_plane_state->crtc;
3920                 struct drm_crtc_state *new_crtc_state;
3921                 struct drm_framebuffer *fb = new_plane_state->fb;
3922                 bool pflip_needed;
3923                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
3924
3925                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3926                         handle_cursor_update(plane, old_plane_state);
3927                         continue;
3928                 }
3929
3930                 if (!fb || !crtc || pcrtc != crtc)
3931                         continue;
3932
3933                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3934                 if (!new_crtc_state->active)
3935                         continue;
3936
3937                 pflip_needed = !state->allow_modeset;
3938
3939                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3940                 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3941                         DRM_ERROR("%s: acrtc %d, already busy\n",
3942                                   __func__,
3943                                   acrtc_attach->crtc_id);
3944                         /* In commit tail framework this cannot happen */
3945                         WARN_ON(1);
3946                 }
3947                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3948
3949                 if (!pflip_needed) {
3950                         WARN_ON(!dm_new_plane_state->dc_state);
3951
3952                         plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
3953
3954                         dc_stream_attach = acrtc_state->stream;
3955                         planes_count++;
3956
3957                 } else if (new_crtc_state->planes_changed) {
3958                         /* Assume even ONE crtc with immediate flip means
3959                          * entire can't wait for VBLANK
3960                          * TODO Check if it's correct
3961                          */
3962                         *wait_for_vblank =
3963                                         new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
3964                                 false : true;
3965
3966                         /* TODO: Needs rework for multiplane flip */
3967                         if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3968                                 drm_crtc_vblank_get(crtc);
3969
3970                         amdgpu_dm_do_flip(
3971                                 crtc,
3972                                 fb,
3973                                 drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3974                                 dm_state->context);
3975                 }
3976
3977         }
3978
3979         if (planes_count) {
3980                 unsigned long flags;
3981
3982                 if (new_pcrtc_state->event) {
3983
3984                         drm_crtc_vblank_get(pcrtc);
3985
3986                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
3987                         prepare_flip_isr(acrtc_attach);
3988                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
3989                 }
3990
3991                 if (false == dc_commit_planes_to_stream(dm->dc,
3992                                                         plane_states_constructed,
3993                                                         planes_count,
3994                                                         dc_stream_attach,
3995                                                         dm_state->context))
3996                         dm_error("%s: Failed to attach plane!\n", __func__);
3997         } else {
3998                 /*TODO BUG Here should go disable planes on CRTC. */
3999         }
4000 }
4001
4002
4003 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
4004                                    struct drm_atomic_state *state,
4005                                    bool nonblock)
4006 {
4007         struct drm_crtc *crtc;
4008         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4009         struct amdgpu_device *adev = dev->dev_private;
4010         int i;
4011
4012         /*
4013          * We evade vblanks and pflips on crtc that
4014          * should be changed. We do it here to flush & disable
4015          * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4016          * it will update crtc->dm_crtc_state->stream pointer which is used in
4017          * the ISRs.
4018          */
4019         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4020                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4021                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4022
4023                 if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
4024                         manage_dm_interrupts(adev, acrtc, false);
4025         }
4026         /* Add check here for SoC's that support hardware cursor plane, to
4027          * unset legacy_cursor_update */
4028
4029         return drm_atomic_helper_commit(dev, state, nonblock);
4030
4031         /*TODO Handle EINTR, reenable IRQ*/
4032 }
4033
4034 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4035 {
4036         struct drm_device *dev = state->dev;
4037         struct amdgpu_device *adev = dev->dev_private;
4038         struct amdgpu_display_manager *dm = &adev->dm;
4039         struct dm_atomic_state *dm_state;
4040         uint32_t i, j;
4041         uint32_t new_crtcs_count = 0;
4042         struct drm_crtc *crtc;
4043         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4044         struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
4045         struct dc_stream_state *new_stream = NULL;
4046         unsigned long flags;
4047         bool wait_for_vblank = true;
4048         struct drm_connector *connector;
4049         struct drm_connector_state *old_con_state, *new_con_state;
4050         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4051
4052         drm_atomic_helper_update_legacy_modeset_state(dev, state);
4053
4054         dm_state = to_dm_atomic_state(state);
4055
4056         /* update changed items */
4057         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4058                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4059
4060                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4061                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4062
4063                 DRM_DEBUG_DRIVER(
4064                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4065                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4066                         "connectors_changed:%d\n",
4067                         acrtc->crtc_id,
4068                         new_crtc_state->enable,
4069                         new_crtc_state->active,
4070                         new_crtc_state->planes_changed,
4071                         new_crtc_state->mode_changed,
4072                         new_crtc_state->active_changed,
4073                         new_crtc_state->connectors_changed);
4074
4075                 /* handles headless hotplug case, updating new_state and
4076                  * aconnector as needed
4077                  */
4078
4079                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4080
4081                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4082
4083                         if (!dm_new_crtc_state->stream) {
4084                                 /*
4085                                  * this could happen because of issues with
4086                                  * userspace notifications delivery.
4087                                  * In this case userspace tries to set mode on
4088                                  * display which is disconnect in fact.
4089                                  * dc_sink in NULL in this case on aconnector.
4090                                  * We expect reset mode will come soon.
4091                                  *
4092                                  * This can also happen when unplug is done
4093                                  * during resume sequence ended
4094                                  *
4095                                  * In this case, we want to pretend we still
4096                                  * have a sink to keep the pipe running so that
4097                                  * hw state is consistent with the sw state
4098                                  */
4099                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4100                                                 __func__, acrtc->base.base.id);
4101                                 continue;
4102                         }
4103
4104
4105                         if (dm_old_crtc_state->stream)
4106                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4107
4108
4109                         /*
4110                          * this loop saves set mode crtcs
4111                          * we needed to enable vblanks once all
4112                          * resources acquired in dc after dc_commit_streams
4113                          */
4114
4115                         /*TODO move all this into dm_crtc_state, get rid of
4116                          * new_crtcs array and use old and new atomic states
4117                          * instead
4118                          */
4119                         new_crtcs[new_crtcs_count] = acrtc;
4120                         new_crtcs_count++;
4121
4122                         new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4123                         acrtc->enabled = true;
4124                         acrtc->hw_mode = new_crtc_state->mode;
4125                         crtc->hwmode = new_crtc_state->mode;
4126                 } else if (modereset_required(new_crtc_state)) {
4127                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4128
4129                         /* i.e. reset mode */
4130                         if (dm_old_crtc_state->stream)
4131                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4132                 }
4133         } /* for_each_crtc_in_state() */
4134
4135         /*
4136          * Add streams after required streams from new and replaced streams
4137          * are removed from freesync module
4138          */
4139         if (adev->dm.freesync_module) {
4140                 for (i = 0; i < new_crtcs_count; i++) {
4141                         struct amdgpu_dm_connector *aconnector = NULL;
4142
4143                         new_crtc_state = drm_atomic_get_new_crtc_state(state,
4144                                         &new_crtcs[i]->base);
4145                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4146
4147                         new_stream = dm_new_crtc_state->stream;
4148                         aconnector = amdgpu_dm_find_first_crtc_matching_connector(
4149                                         state,
4150                                         &new_crtcs[i]->base);
4151                         if (!aconnector) {
4152                                 DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
4153                                          "skipping freesync init\n",
4154                                          new_crtcs[i]->crtc_id);
4155                                 continue;
4156                         }
4157
4158                         mod_freesync_add_stream(adev->dm.freesync_module,
4159                                                 new_stream, &aconnector->caps);
4160                 }
4161         }
4162
4163         if (dm_state->context)
4164                 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4165
4166         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4167                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4168
4169                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4170
4171                 if (dm_new_crtc_state->stream != NULL) {
4172                         const struct dc_stream_status *status =
4173                                         dc_stream_get_status(dm_new_crtc_state->stream);
4174
4175                         if (!status)
4176                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4177                         else
4178                                 acrtc->otg_inst = status->primary_otg_inst;
4179                 }
4180         }
4181
4182         /* Handle scaling and underscan changes*/
4183         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4184                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4185                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4186                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4187                 struct dc_stream_status *status = NULL;
4188
4189                 if (acrtc)
4190                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4191
4192                 /* Skip any modesets/resets */
4193                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4194                         continue;
4195
4196                 /* Skip any thing not scale or underscan changes */
4197                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4198                         continue;
4199
4200                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4201
4202                 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4203                                 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4204
4205                 if (!dm_new_crtc_state->stream)
4206                         continue;
4207
4208                 status = dc_stream_get_status(dm_new_crtc_state->stream);
4209                 WARN_ON(!status);
4210                 WARN_ON(!status->plane_count);
4211
4212                 /*TODO How it works with MPO ?*/
4213                 if (!dc_commit_planes_to_stream(
4214                                 dm->dc,
4215                                 status->plane_states,
4216                                 status->plane_count,
4217                                 dm_new_crtc_state->stream,
4218                                 dm_state->context))
4219                         dm_error("%s: Failed to update stream scaling!\n", __func__);
4220         }
4221
4222         for (i = 0; i < new_crtcs_count; i++) {
4223                 /*
4224                  * loop to enable interrupts on newly arrived crtc
4225                  */
4226                 struct amdgpu_crtc *acrtc = new_crtcs[i];
4227
4228                 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4229                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4230
4231                 if (adev->dm.freesync_module)
4232                         mod_freesync_notify_mode_change(
4233                                 adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
4234
4235                 manage_dm_interrupts(adev, acrtc, true);
4236         }
4237
4238         /* update planes when needed per crtc*/
4239         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4240                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4241
4242                 if (dm_new_crtc_state->stream)
4243                         amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4244         }
4245
4246
4247         /*
4248          * send vblank event on all events not handled in flip and
4249          * mark consumed event for drm_atomic_helper_commit_hw_done
4250          */
4251         spin_lock_irqsave(&adev->ddev->event_lock, flags);
4252         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4253
4254                 if (new_crtc_state->event)
4255                         drm_send_event_locked(dev, &new_crtc_state->event->base);
4256
4257                 new_crtc_state->event = NULL;
4258         }
4259         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4260
4261         /* Signal HW programming completion */
4262         drm_atomic_helper_commit_hw_done(state);
4263
4264         if (wait_for_vblank)
4265                 drm_atomic_helper_wait_for_flip_done(dev, state);
4266
4267         drm_atomic_helper_cleanup_planes(dev, state);
4268 }
4269
4270
4271 static int dm_force_atomic_commit(struct drm_connector *connector)
4272 {
4273         int ret = 0;
4274         struct drm_device *ddev = connector->dev;
4275         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4276         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4277         struct drm_plane *plane = disconnected_acrtc->base.primary;
4278         struct drm_connector_state *conn_state;
4279         struct drm_crtc_state *crtc_state;
4280         struct drm_plane_state *plane_state;
4281
4282         if (!state)
4283                 return -ENOMEM;
4284
4285         state->acquire_ctx = ddev->mode_config.acquire_ctx;
4286
4287         /* Construct an atomic state to restore previous display setting */
4288
4289         /*
4290          * Attach connectors to drm_atomic_state
4291          */
4292         conn_state = drm_atomic_get_connector_state(state, connector);
4293
4294         ret = PTR_ERR_OR_ZERO(conn_state);
4295         if (ret)
4296                 goto err;
4297
4298         /* Attach crtc to drm_atomic_state*/
4299         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4300
4301         ret = PTR_ERR_OR_ZERO(crtc_state);
4302         if (ret)
4303                 goto err;
4304
4305         /* force a restore */
4306         crtc_state->mode_changed = true;
4307
4308         /* Attach plane to drm_atomic_state */
4309         plane_state = drm_atomic_get_plane_state(state, plane);
4310
4311         ret = PTR_ERR_OR_ZERO(plane_state);
4312         if (ret)
4313                 goto err;
4314
4315
4316         /* Call commit internally with the state we just constructed */
4317         ret = drm_atomic_commit(state);
4318         if (!ret)
4319                 return 0;
4320
4321 err:
4322         DRM_ERROR("Restoring old state failed with %i\n", ret);
4323         drm_atomic_state_put(state);
4324
4325         return ret;
4326 }
4327
4328 /*
4329  * This functions handle all cases when set mode does not come upon hotplug.
4330  * This include when the same display is unplugged then plugged back into the
4331  * same port and when we are running without usermode desktop manager supprot
4332  */
4333 void dm_restore_drm_connector_state(struct drm_device *dev,
4334                                     struct drm_connector *connector)
4335 {
4336         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4337         struct amdgpu_crtc *disconnected_acrtc;
4338         struct dm_crtc_state *acrtc_state;
4339
4340         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4341                 return;
4342
4343         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4344         if (!disconnected_acrtc)
4345                 return;
4346
4347         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4348         if (!acrtc_state->stream)
4349                 return;
4350
4351         /*
4352          * If the previous sink is not released and different from the current,
4353          * we deduce we are in a state where we can not rely on usermode call
4354          * to turn on the display, so we do it here
4355          */
4356         if (acrtc_state->stream->sink != aconnector->dc_sink)
4357                 dm_force_atomic_commit(&aconnector->base);
4358 }
4359
4360 /*`
4361  * Grabs all modesetting locks to serialize against any blocking commits,
4362  * Waits for completion of all non blocking commits.
4363  */
4364 static int do_aquire_global_lock(struct drm_device *dev,
4365                                  struct drm_atomic_state *state)
4366 {
4367         struct drm_crtc *crtc;
4368         struct drm_crtc_commit *commit;
4369         long ret;
4370
4371         /* Adding all modeset locks to aquire_ctx will
4372          * ensure that when the framework release it the
4373          * extra locks we are locking here will get released to
4374          */
4375         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4376         if (ret)
4377                 return ret;
4378
4379         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4380                 spin_lock(&crtc->commit_lock);
4381                 commit = list_first_entry_or_null(&crtc->commit_list,
4382                                 struct drm_crtc_commit, commit_entry);
4383                 if (commit)
4384                         drm_crtc_commit_get(commit);
4385                 spin_unlock(&crtc->commit_lock);
4386
4387                 if (!commit)
4388                         continue;
4389
4390                 /* Make sure all pending HW programming completed and
4391                  * page flips done
4392                  */
4393                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4394
4395                 if (ret > 0)
4396                         ret = wait_for_completion_interruptible_timeout(
4397                                         &commit->flip_done, 10*HZ);
4398
4399                 if (ret == 0)
4400                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4401                                   "timed out\n", crtc->base.id, crtc->name);
4402
4403                 drm_crtc_commit_put(commit);
4404         }
4405
4406         return ret < 0 ? ret : 0;
4407 }
4408
4409 static int dm_update_crtcs_state(struct dc *dc,
4410                                  struct drm_atomic_state *state,
4411                                  bool enable,
4412                                  bool *lock_and_validation_needed)
4413 {
4414         struct drm_crtc *crtc;
4415         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4416         int i;
4417         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4418         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4419         struct dc_stream_state *new_stream;
4420         int ret = 0;
4421
4422         /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4423         /* update changed items */
4424         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4425                 struct amdgpu_crtc *acrtc = NULL;
4426                 struct amdgpu_dm_connector *aconnector = NULL;
4427                 struct drm_connector_state *new_con_state = NULL;
4428                 struct dm_connector_state *dm_conn_state = NULL;
4429
4430                 new_stream = NULL;
4431
4432                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4433                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4434                 acrtc = to_amdgpu_crtc(crtc);
4435
4436                 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4437
4438                 /* TODO This hack should go away */
4439                 if (aconnector && enable) {
4440                         // Make sure fake sink is created in plug-in scenario
4441                         new_con_state = drm_atomic_get_connector_state(state,
4442                                                                     &aconnector->base);
4443
4444                         if (IS_ERR(new_con_state)) {
4445                                 ret = PTR_ERR_OR_ZERO(new_con_state);
4446                                 break;
4447                         }
4448
4449                         dm_conn_state = to_dm_connector_state(new_con_state);
4450
4451                         new_stream = create_stream_for_sink(aconnector,
4452                                                              &new_crtc_state->mode,
4453                                                             dm_conn_state);
4454
4455                         /*
4456                          * we can have no stream on ACTION_SET if a display
4457                          * was disconnected during S3, in this case it not and
4458                          * error, the OS will be updated after detection, and
4459                          * do the right thing on next atomic commit
4460                          */
4461
4462                         if (!new_stream) {
4463                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4464                                                 __func__, acrtc->base.base.id);
4465                                 break;
4466                         }
4467                 }
4468
4469                 if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4470                                 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4471
4472                         new_crtc_state->mode_changed = false;
4473
4474                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4475                                          new_crtc_state->mode_changed);
4476                 }
4477
4478
4479                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4480                         goto next_crtc;
4481
4482                 DRM_DEBUG_DRIVER(
4483                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4484                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4485                         "connectors_changed:%d\n",
4486                         acrtc->crtc_id,
4487                         new_crtc_state->enable,
4488                         new_crtc_state->active,
4489                         new_crtc_state->planes_changed,
4490                         new_crtc_state->mode_changed,
4491                         new_crtc_state->active_changed,
4492                         new_crtc_state->connectors_changed);
4493
4494                 /* Remove stream for any changed/disabled CRTC */
4495                 if (!enable) {
4496
4497                         if (!dm_old_crtc_state->stream)
4498                                 goto next_crtc;
4499
4500                         DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4501                                         crtc->base.id);
4502
4503                         /* i.e. reset mode */
4504                         if (dc_remove_stream_from_ctx(
4505                                         dc,
4506                                         dm_state->context,
4507                                         dm_old_crtc_state->stream) != DC_OK) {
4508                                 ret = -EINVAL;
4509                                 goto fail;
4510                         }
4511
4512                         dc_stream_release(dm_old_crtc_state->stream);
4513                         dm_new_crtc_state->stream = NULL;
4514
4515                         *lock_and_validation_needed = true;
4516
4517                 } else {/* Add stream for any updated/enabled CRTC */
4518                         /*
4519                          * Quick fix to prevent NULL pointer on new_stream when
4520                          * added MST connectors not found in existing crtc_state in the chained mode
4521                          * TODO: need to dig out the root cause of that
4522                          */
4523                         if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4524                                 goto next_crtc;
4525
4526                         if (modereset_required(new_crtc_state))
4527                                 goto next_crtc;
4528
4529                         if (modeset_required(new_crtc_state, new_stream,
4530                                              dm_old_crtc_state->stream)) {
4531
4532                                 WARN_ON(dm_new_crtc_state->stream);
4533
4534                                 dm_new_crtc_state->stream = new_stream;
4535                                 dc_stream_retain(new_stream);
4536
4537                                 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4538                                                         crtc->base.id);
4539
4540                                 if (dc_add_stream_to_ctx(
4541                                                 dc,
4542                                                 dm_state->context,
4543                                                 dm_new_crtc_state->stream) != DC_OK) {
4544                                         ret = -EINVAL;
4545                                         goto fail;
4546                                 }
4547
4548                                 *lock_and_validation_needed = true;
4549                         }
4550                 }
4551
4552 next_crtc:
4553                 /* Release extra reference */
4554                 if (new_stream)
4555                          dc_stream_release(new_stream);
4556         }
4557
4558         return ret;
4559
4560 fail:
4561         if (new_stream)
4562                 dc_stream_release(new_stream);
4563         return ret;
4564 }
4565
4566 static int dm_update_planes_state(struct dc *dc,
4567                                   struct drm_atomic_state *state,
4568                                   bool enable,
4569                                   bool *lock_and_validation_needed)
4570 {
4571         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4572         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4573         struct drm_plane *plane;
4574         struct drm_plane_state *old_plane_state, *new_plane_state;
4575         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
4576         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4577         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
4578         int i ;
4579         /* TODO return page_flip_needed() function */
4580         bool pflip_needed  = !state->allow_modeset;
4581         int ret = 0;
4582
4583         if (pflip_needed)
4584                 return ret;
4585
4586         /* Add new planes */
4587         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4588                 new_plane_crtc = new_plane_state->crtc;
4589                 old_plane_crtc = old_plane_state->crtc;
4590                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
4591                 dm_old_plane_state = to_dm_plane_state(old_plane_state);
4592
4593                 /*TODO Implement atomic check for cursor plane */
4594                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4595                         continue;
4596
4597                 /* Remove any changed/removed planes */
4598                 if (!enable) {
4599
4600                         if (!old_plane_crtc)
4601                                 continue;
4602
4603                         old_crtc_state = drm_atomic_get_old_crtc_state(
4604                                         state, old_plane_crtc);
4605                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4606
4607                         if (!dm_old_crtc_state->stream)
4608                                 continue;
4609
4610                         DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4611                                         plane->base.id, old_plane_crtc->base.id);
4612
4613                         if (!dc_remove_plane_from_context(
4614                                         dc,
4615                                         dm_old_crtc_state->stream,
4616                                         dm_old_plane_state->dc_state,
4617                                         dm_state->context)) {
4618
4619                                 ret = EINVAL;
4620                                 return ret;
4621                         }
4622
4623
4624                         dc_plane_state_release(dm_old_plane_state->dc_state);
4625                         dm_new_plane_state->dc_state = NULL;
4626
4627                         *lock_and_validation_needed = true;
4628
4629                 } else { /* Add new planes */
4630
4631                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4632                                 continue;
4633
4634                         if (!new_plane_crtc)
4635                                 continue;
4636
4637                         new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4638                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4639
4640                         if (!dm_new_crtc_state->stream)
4641                                 continue;
4642
4643
4644                         WARN_ON(dm_new_plane_state->dc_state);
4645
4646                         dm_new_plane_state->dc_state = dc_create_plane_state(dc);
4647
4648                         DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4649                                         plane->base.id, new_plane_crtc->base.id);
4650
4651                         if (!dm_new_plane_state->dc_state) {
4652                                 ret = -EINVAL;
4653                                 return ret;
4654                         }
4655
4656                         ret = fill_plane_attributes(
4657                                 new_plane_crtc->dev->dev_private,
4658                                 dm_new_plane_state->dc_state,
4659                                 new_plane_state,
4660                                 new_crtc_state,
4661                                 false);
4662                         if (ret)
4663                                 return ret;
4664
4665
4666                         if (!dc_add_plane_to_context(
4667                                         dc,
4668                                         dm_new_crtc_state->stream,
4669                                         dm_new_plane_state->dc_state,
4670                                         dm_state->context)) {
4671
4672                                 ret = -EINVAL;
4673                                 return ret;
4674                         }
4675
4676                         *lock_and_validation_needed = true;
4677                 }
4678         }
4679
4680
4681         return ret;
4682 }
4683
4684 static int amdgpu_dm_atomic_check(struct drm_device *dev,
4685                                   struct drm_atomic_state *state)
4686 {
4687         int i;
4688         int ret;
4689         struct amdgpu_device *adev = dev->dev_private;
4690         struct dc *dc = adev->dm.dc;
4691         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4692         struct drm_connector *connector;
4693         struct drm_connector_state *old_con_state, *new_con_state;
4694         struct drm_crtc *crtc;
4695         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4696
4697         /*
4698          * This bool will be set for true for any modeset/reset
4699          * or plane update which implies non fast surface update.
4700          */
4701         bool lock_and_validation_needed = false;
4702
4703         ret = drm_atomic_helper_check_modeset(dev, state);
4704         if (ret)
4705                 goto fail;
4706
4707         /*
4708          * legacy_cursor_update should be made false for SoC's having
4709          * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
4710          * otherwise for software cursor plane,
4711          * we should not add it to list of affected planes.
4712          */
4713         if (state->legacy_cursor_update) {
4714                 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4715                         if (new_crtc_state->color_mgmt_changed) {
4716                                 ret = drm_atomic_add_affected_planes(state, crtc);
4717                                 if (ret)
4718                                         goto fail;
4719                         }
4720                 }
4721         } else {
4722                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4723                         if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4724                                         !new_crtc_state->color_mgmt_changed)
4725                                 continue;
4726
4727                         if (!new_crtc_state->enable)
4728                                 continue;
4729
4730                         ret = drm_atomic_add_affected_connectors(state, crtc);
4731                         if (ret)
4732                                 return ret;
4733
4734                         ret = drm_atomic_add_affected_planes(state, crtc);
4735                         if (ret)
4736                                 goto fail;
4737                 }
4738         }
4739
4740         dm_state->context = dc_create_state();
4741         ASSERT(dm_state->context);
4742         dc_resource_state_copy_construct_current(dc, dm_state->context);
4743
4744         /* Remove exiting planes if they are modified */
4745         ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4746         if (ret) {
4747                 goto fail;
4748         }
4749
4750         /* Disable all crtcs which require disable */
4751         ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4752         if (ret) {
4753                 goto fail;
4754         }
4755
4756         /* Enable all crtcs which require enable */
4757         ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4758         if (ret) {
4759                 goto fail;
4760         }
4761
4762         /* Add new/modified planes */
4763         ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4764         if (ret) {
4765                 goto fail;
4766         }
4767
4768         /* Run this here since we want to validate the streams we created */
4769         ret = drm_atomic_helper_check_planes(dev, state);
4770         if (ret)
4771                 goto fail;
4772
4773         /* Check scaling and underscan changes*/
4774         /*TODO Removed scaling changes validation due to inability to commit
4775          * new stream into context w\o causing full reset. Need to
4776          * decide how to handle.
4777          */
4778         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4779                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4780                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4781                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4782
4783                 /* Skip any modesets/resets */
4784                 if (!acrtc || drm_atomic_crtc_needs_modeset(
4785                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
4786                         continue;
4787
4788                 /* Skip any thing not scale or underscan changes */
4789                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4790                         continue;
4791
4792                 lock_and_validation_needed = true;
4793         }
4794
4795         /*
4796          * For full updates case when
4797          * removing/adding/updating  streams on once CRTC while flipping
4798          * on another CRTC,
4799          * acquiring global lock  will guarantee that any such full
4800          * update commit
4801          * will wait for completion of any outstanding flip using DRMs
4802          * synchronization events.
4803          */
4804
4805         if (lock_and_validation_needed) {
4806
4807                 ret = do_aquire_global_lock(dev, state);
4808                 if (ret)
4809                         goto fail;
4810
4811                 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
4812                         ret = -EINVAL;
4813                         goto fail;
4814                 }
4815         }
4816
4817         /* Must be success */
4818         WARN_ON(ret);
4819         return ret;
4820
4821 fail:
4822         if (ret == -EDEADLK)
4823                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
4824         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
4825                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
4826         else
4827                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
4828
4829         return ret;
4830 }
4831
4832 static bool is_dp_capable_without_timing_msa(struct dc *dc,
4833                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
4834 {
4835         uint8_t dpcd_data;
4836         bool capable = false;
4837
4838         if (amdgpu_dm_connector->dc_link &&
4839                 dm_helpers_dp_read_dpcd(
4840                                 NULL,
4841                                 amdgpu_dm_connector->dc_link,
4842                                 DP_DOWN_STREAM_PORT_COUNT,
4843                                 &dpcd_data,
4844                                 sizeof(dpcd_data))) {
4845                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4846         }
4847
4848         return capable;
4849 }
4850 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
4851                                            struct edid *edid)
4852 {
4853         int i;
4854         uint64_t val_capable;
4855         bool edid_check_required;
4856         struct detailed_timing *timing;
4857         struct detailed_non_pixel *data;
4858         struct detailed_data_monitor_range *range;
4859         struct amdgpu_dm_connector *amdgpu_dm_connector =
4860                         to_amdgpu_dm_connector(connector);
4861
4862         struct drm_device *dev = connector->dev;
4863         struct amdgpu_device *adev = dev->dev_private;
4864
4865         edid_check_required = false;
4866         if (!amdgpu_dm_connector->dc_sink) {
4867                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4868                 return;
4869         }
4870         if (!adev->dm.freesync_module)
4871                 return;
4872         /*
4873          * if edid non zero restrict freesync only for dp and edp
4874          */
4875         if (edid) {
4876                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
4877                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
4878                         edid_check_required = is_dp_capable_without_timing_msa(
4879                                                 adev->dm.dc,
4880                                                 amdgpu_dm_connector);
4881                 }
4882         }
4883         val_capable = 0;
4884         if (edid_check_required == true && (edid->version > 1 ||
4885            (edid->version == 1 && edid->revision > 1))) {
4886                 for (i = 0; i < 4; i++) {
4887
4888                         timing  = &edid->detailed_timings[i];
4889                         data    = &timing->data.other_data;
4890                         range   = &data->data.range;
4891                         /*
4892                          * Check if monitor has continuous frequency mode
4893                          */
4894                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
4895                                 continue;
4896                         /*
4897                          * Check for flag range limits only. If flag == 1 then
4898                          * no additional timing information provided.
4899                          * Default GTF, GTF Secondary curve and CVT are not
4900                          * supported
4901                          */
4902                         if (range->flags != 1)
4903                                 continue;
4904
4905                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
4906                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
4907                         amdgpu_dm_connector->pixel_clock_mhz =
4908                                 range->pixel_clock_mhz * 10;
4909                         break;
4910                 }
4911
4912                 if (amdgpu_dm_connector->max_vfreq -
4913                                 amdgpu_dm_connector->min_vfreq > 10) {
4914                         amdgpu_dm_connector->caps.supported = true;
4915                         amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
4916                                         amdgpu_dm_connector->min_vfreq * 1000000;
4917                         amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
4918                                         amdgpu_dm_connector->max_vfreq * 1000000;
4919                                 val_capable = 1;
4920                 }
4921         }
4922
4923         /*
4924          * TODO figure out how to notify user-mode or DRM of freesync caps
4925          * once we figure out how to deal with freesync in an upstreamable
4926          * fashion
4927          */
4928
4929 }
4930
4931 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
4932 {
4933         /*
4934          * TODO fill in once we figure out how to deal with freesync in
4935          * an upstreamable fashion
4936          */
4937 }