drm/amdgpu: convert amdgpu_display_supported_domains() to IP versions
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_display.c
1 /*
2  * Copyright 2007-8 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie
24  *          Alex Deucher
25  */
26
27 #include <drm/amdgpu_drm.h>
28 #include "amdgpu.h"
29 #include "amdgpu_i2c.h"
30 #include "atom.h"
31 #include "amdgpu_connectors.h"
32 #include "amdgpu_display.h"
33 #include <asm/div64.h>
34
35 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <drm/drm_crtc_helper.h>
38 #include <drm/drm_edid.h>
39 #include <drm/drm_gem_framebuffer_helper.h>
40 #include <drm/drm_fb_helper.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_vblank.h>
43
44 static void amdgpu_display_flip_callback(struct dma_fence *f,
45                                          struct dma_fence_cb *cb)
46 {
47         struct amdgpu_flip_work *work =
48                 container_of(cb, struct amdgpu_flip_work, cb);
49
50         dma_fence_put(f);
51         schedule_work(&work->flip_work.work);
52 }
53
54 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
55                                              struct dma_fence **f)
56 {
57         struct dma_fence *fence= *f;
58
59         if (fence == NULL)
60                 return false;
61
62         *f = NULL;
63
64         if (!dma_fence_add_callback(fence, &work->cb,
65                                     amdgpu_display_flip_callback))
66                 return true;
67
68         dma_fence_put(fence);
69         return false;
70 }
71
72 static void amdgpu_display_flip_work_func(struct work_struct *__work)
73 {
74         struct delayed_work *delayed_work =
75                 container_of(__work, struct delayed_work, work);
76         struct amdgpu_flip_work *work =
77                 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
78         struct amdgpu_device *adev = work->adev;
79         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
80
81         struct drm_crtc *crtc = &amdgpu_crtc->base;
82         unsigned long flags;
83         unsigned i;
84         int vpos, hpos;
85
86         for (i = 0; i < work->shared_count; ++i)
87                 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
88                         return;
89
90         /* Wait until we're out of the vertical blank period before the one
91          * targeted by the flip
92          */
93         if (amdgpu_crtc->enabled &&
94             (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
95                                                 &vpos, &hpos, NULL, NULL,
96                                                 &crtc->hwmode)
97              & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
98             (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
99             (int)(work->target_vblank -
100                   amdgpu_get_vblank_counter_kms(crtc)) > 0) {
101                 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
102                 return;
103         }
104
105         /* We borrow the event spin lock for protecting flip_status */
106         spin_lock_irqsave(&crtc->dev->event_lock, flags);
107
108         /* Do the flip (mmio) */
109         adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
110
111         /* Set the flip status */
112         amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
113         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
114
115
116         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
117                                          amdgpu_crtc->crtc_id, amdgpu_crtc, work);
118
119 }
120
121 /*
122  * Handle unpin events outside the interrupt handler proper.
123  */
124 static void amdgpu_display_unpin_work_func(struct work_struct *__work)
125 {
126         struct amdgpu_flip_work *work =
127                 container_of(__work, struct amdgpu_flip_work, unpin_work);
128         int r;
129
130         /* unpin of the old buffer */
131         r = amdgpu_bo_reserve(work->old_abo, true);
132         if (likely(r == 0)) {
133                 amdgpu_bo_unpin(work->old_abo);
134                 amdgpu_bo_unreserve(work->old_abo);
135         } else
136                 DRM_ERROR("failed to reserve buffer after flip\n");
137
138         amdgpu_bo_unref(&work->old_abo);
139         kfree(work->shared);
140         kfree(work);
141 }
142
143 int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
144                                 struct drm_framebuffer *fb,
145                                 struct drm_pending_vblank_event *event,
146                                 uint32_t page_flip_flags, uint32_t target,
147                                 struct drm_modeset_acquire_ctx *ctx)
148 {
149         struct drm_device *dev = crtc->dev;
150         struct amdgpu_device *adev = drm_to_adev(dev);
151         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
152         struct drm_gem_object *obj;
153         struct amdgpu_flip_work *work;
154         struct amdgpu_bo *new_abo;
155         unsigned long flags;
156         u64 tiling_flags;
157         int i, r;
158
159         work = kzalloc(sizeof *work, GFP_KERNEL);
160         if (work == NULL)
161                 return -ENOMEM;
162
163         INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
164         INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
165
166         work->event = event;
167         work->adev = adev;
168         work->crtc_id = amdgpu_crtc->crtc_id;
169         work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
170
171         /* schedule unpin of the old buffer */
172         obj = crtc->primary->fb->obj[0];
173
174         /* take a reference to the old object */
175         work->old_abo = gem_to_amdgpu_bo(obj);
176         amdgpu_bo_ref(work->old_abo);
177
178         obj = fb->obj[0];
179         new_abo = gem_to_amdgpu_bo(obj);
180
181         /* pin the new buffer */
182         r = amdgpu_bo_reserve(new_abo, false);
183         if (unlikely(r != 0)) {
184                 DRM_ERROR("failed to reserve new abo buffer before flip\n");
185                 goto cleanup;
186         }
187
188         if (!adev->enable_virtual_display) {
189                 r = amdgpu_bo_pin(new_abo,
190                                   amdgpu_display_supported_domains(adev, new_abo->flags));
191                 if (unlikely(r != 0)) {
192                         DRM_ERROR("failed to pin new abo buffer before flip\n");
193                         goto unreserve;
194                 }
195         }
196
197         r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
198         if (unlikely(r != 0)) {
199                 DRM_ERROR("%p bind failed\n", new_abo);
200                 goto unpin;
201         }
202
203         r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
204                                 &work->shared_count, &work->shared);
205         if (unlikely(r != 0)) {
206                 DRM_ERROR("failed to get fences for buffer\n");
207                 goto unpin;
208         }
209
210         amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
211         amdgpu_bo_unreserve(new_abo);
212
213         if (!adev->enable_virtual_display)
214                 work->base = amdgpu_bo_gpu_offset(new_abo);
215         work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
216                 amdgpu_get_vblank_counter_kms(crtc);
217
218         /* we borrow the event spin lock for protecting flip_wrok */
219         spin_lock_irqsave(&crtc->dev->event_lock, flags);
220         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
221                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
222                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
223                 r = -EBUSY;
224                 goto pflip_cleanup;
225         }
226
227         amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
228         amdgpu_crtc->pflip_works = work;
229
230
231         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
232                                          amdgpu_crtc->crtc_id, amdgpu_crtc, work);
233         /* update crtc fb */
234         crtc->primary->fb = fb;
235         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
236         amdgpu_display_flip_work_func(&work->flip_work.work);
237         return 0;
238
239 pflip_cleanup:
240         if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
241                 DRM_ERROR("failed to reserve new abo in error path\n");
242                 goto cleanup;
243         }
244 unpin:
245         if (!adev->enable_virtual_display)
246                 amdgpu_bo_unpin(new_abo);
247
248 unreserve:
249         amdgpu_bo_unreserve(new_abo);
250
251 cleanup:
252         amdgpu_bo_unref(&work->old_abo);
253         for (i = 0; i < work->shared_count; ++i)
254                 dma_fence_put(work->shared[i]);
255         kfree(work->shared);
256         kfree(work);
257
258         return r;
259 }
260
261 int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
262                                    struct drm_modeset_acquire_ctx *ctx)
263 {
264         struct drm_device *dev;
265         struct amdgpu_device *adev;
266         struct drm_crtc *crtc;
267         bool active = false;
268         int ret;
269
270         if (!set || !set->crtc)
271                 return -EINVAL;
272
273         dev = set->crtc->dev;
274
275         ret = pm_runtime_get_sync(dev->dev);
276         if (ret < 0)
277                 goto out;
278
279         ret = drm_crtc_helper_set_config(set, ctx);
280
281         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
282                 if (crtc->enabled)
283                         active = true;
284
285         pm_runtime_mark_last_busy(dev->dev);
286
287         adev = drm_to_adev(dev);
288         /* if we have active crtcs and we don't have a power ref,
289            take the current one */
290         if (active && !adev->have_disp_power_ref) {
291                 adev->have_disp_power_ref = true;
292                 return ret;
293         }
294         /* if we have no active crtcs, then drop the power ref
295            we got before */
296         if (!active && adev->have_disp_power_ref) {
297                 pm_runtime_put_autosuspend(dev->dev);
298                 adev->have_disp_power_ref = false;
299         }
300
301 out:
302         /* drop the power reference we got coming in here */
303         pm_runtime_put_autosuspend(dev->dev);
304         return ret;
305 }
306
307 static const char *encoder_names[41] = {
308         "NONE",
309         "INTERNAL_LVDS",
310         "INTERNAL_TMDS1",
311         "INTERNAL_TMDS2",
312         "INTERNAL_DAC1",
313         "INTERNAL_DAC2",
314         "INTERNAL_SDVOA",
315         "INTERNAL_SDVOB",
316         "SI170B",
317         "CH7303",
318         "CH7301",
319         "INTERNAL_DVO1",
320         "EXTERNAL_SDVOA",
321         "EXTERNAL_SDVOB",
322         "TITFP513",
323         "INTERNAL_LVTM1",
324         "VT1623",
325         "HDMI_SI1930",
326         "HDMI_INTERNAL",
327         "INTERNAL_KLDSCP_TMDS1",
328         "INTERNAL_KLDSCP_DVO1",
329         "INTERNAL_KLDSCP_DAC1",
330         "INTERNAL_KLDSCP_DAC2",
331         "SI178",
332         "MVPU_FPGA",
333         "INTERNAL_DDI",
334         "VT1625",
335         "HDMI_SI1932",
336         "DP_AN9801",
337         "DP_DP501",
338         "INTERNAL_UNIPHY",
339         "INTERNAL_KLDSCP_LVTMA",
340         "INTERNAL_UNIPHY1",
341         "INTERNAL_UNIPHY2",
342         "NUTMEG",
343         "TRAVIS",
344         "INTERNAL_VCE",
345         "INTERNAL_UNIPHY3",
346         "HDMI_ANX9805",
347         "INTERNAL_AMCLK",
348         "VIRTUAL",
349 };
350
351 static const char *hpd_names[6] = {
352         "HPD1",
353         "HPD2",
354         "HPD3",
355         "HPD4",
356         "HPD5",
357         "HPD6",
358 };
359
360 void amdgpu_display_print_display_setup(struct drm_device *dev)
361 {
362         struct drm_connector *connector;
363         struct amdgpu_connector *amdgpu_connector;
364         struct drm_encoder *encoder;
365         struct amdgpu_encoder *amdgpu_encoder;
366         struct drm_connector_list_iter iter;
367         uint32_t devices;
368         int i = 0;
369
370         drm_connector_list_iter_begin(dev, &iter);
371         DRM_INFO("AMDGPU Display Connectors\n");
372         drm_for_each_connector_iter(connector, &iter) {
373                 amdgpu_connector = to_amdgpu_connector(connector);
374                 DRM_INFO("Connector %d:\n", i);
375                 DRM_INFO("  %s\n", connector->name);
376                 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
377                         DRM_INFO("  %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
378                 if (amdgpu_connector->ddc_bus) {
379                         DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
380                                  amdgpu_connector->ddc_bus->rec.mask_clk_reg,
381                                  amdgpu_connector->ddc_bus->rec.mask_data_reg,
382                                  amdgpu_connector->ddc_bus->rec.a_clk_reg,
383                                  amdgpu_connector->ddc_bus->rec.a_data_reg,
384                                  amdgpu_connector->ddc_bus->rec.en_clk_reg,
385                                  amdgpu_connector->ddc_bus->rec.en_data_reg,
386                                  amdgpu_connector->ddc_bus->rec.y_clk_reg,
387                                  amdgpu_connector->ddc_bus->rec.y_data_reg);
388                         if (amdgpu_connector->router.ddc_valid)
389                                 DRM_INFO("  DDC Router 0x%x/0x%x\n",
390                                          amdgpu_connector->router.ddc_mux_control_pin,
391                                          amdgpu_connector->router.ddc_mux_state);
392                         if (amdgpu_connector->router.cd_valid)
393                                 DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
394                                          amdgpu_connector->router.cd_mux_control_pin,
395                                          amdgpu_connector->router.cd_mux_state);
396                 } else {
397                         if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
398                             connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
399                             connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
400                             connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
401                             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
402                             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
403                                 DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
404                 }
405                 DRM_INFO("  Encoders:\n");
406                 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
407                         amdgpu_encoder = to_amdgpu_encoder(encoder);
408                         devices = amdgpu_encoder->devices & amdgpu_connector->devices;
409                         if (devices) {
410                                 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
411                                         DRM_INFO("    CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
412                                 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
413                                         DRM_INFO("    CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
414                                 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
415                                         DRM_INFO("    LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
416                                 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
417                                         DRM_INFO("    DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
418                                 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
419                                         DRM_INFO("    DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
420                                 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
421                                         DRM_INFO("    DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
422                                 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
423                                         DRM_INFO("    DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
424                                 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
425                                         DRM_INFO("    DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
426                                 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
427                                         DRM_INFO("    DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
428                                 if (devices & ATOM_DEVICE_TV1_SUPPORT)
429                                         DRM_INFO("    TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
430                                 if (devices & ATOM_DEVICE_CV_SUPPORT)
431                                         DRM_INFO("    CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
432                         }
433                 }
434                 i++;
435         }
436         drm_connector_list_iter_end(&iter);
437 }
438
439 bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
440                               bool use_aux)
441 {
442         u8 out = 0x0;
443         u8 buf[8];
444         int ret;
445         struct i2c_msg msgs[] = {
446                 {
447                         .addr = DDC_ADDR,
448                         .flags = 0,
449                         .len = 1,
450                         .buf = &out,
451                 },
452                 {
453                         .addr = DDC_ADDR,
454                         .flags = I2C_M_RD,
455                         .len = 8,
456                         .buf = buf,
457                 }
458         };
459
460         /* on hw with routers, select right port */
461         if (amdgpu_connector->router.ddc_valid)
462                 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
463
464         if (use_aux) {
465                 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
466         } else {
467                 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
468         }
469
470         if (ret != 2)
471                 /* Couldn't find an accessible DDC on this connector */
472                 return false;
473         /* Probe also for valid EDID header
474          * EDID header starts with:
475          * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
476          * Only the first 6 bytes must be valid as
477          * drm_edid_block_valid() can fix the last 2 bytes */
478         if (drm_edid_header_is_valid(buf) < 6) {
479                 /* Couldn't find an accessible EDID on this
480                  * connector */
481                 return false;
482         }
483         return true;
484 }
485
486 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
487         .destroy = drm_gem_fb_destroy,
488         .create_handle = drm_gem_fb_create_handle,
489 };
490
491 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
492                                           uint64_t bo_flags)
493 {
494         uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
495
496 #if defined(CONFIG_DRM_AMD_DC)
497         /*
498          * if amdgpu_bo_support_uswc returns false it means that USWC mappings
499          * is not supported for this board. But this mapping is required
500          * to avoid hang caused by placement of scanout BO in GTT on certain
501          * APUs. So force the BO placement to VRAM in case this architecture
502          * will not allow USWC mappings.
503          * Also, don't allow GTT domain if the BO doesn't have USWC flag set.
504          */
505         if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
506             amdgpu_bo_support_uswc(bo_flags) &&
507             amdgpu_device_asic_has_dc_support(adev->asic_type)) {
508                 switch (adev->asic_type) {
509                 case CHIP_CARRIZO:
510                 case CHIP_STONEY:
511                         domain |= AMDGPU_GEM_DOMAIN_GTT;
512                         break;
513                 default:
514                         switch (adev->ip_versions[DCE_HWIP][0]) {
515                         case IP_VERSION(1, 0, 0):
516                         case IP_VERSION(1, 0, 1):
517                                 /* enable S/G on PCO and RV2 */
518                                 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
519                                     (adev->apu_flags & AMD_APU_IS_PICASSO))
520                                         domain |= AMDGPU_GEM_DOMAIN_GTT;
521                                 break;
522                         case IP_VERSION(2, 1, 0):
523                         case IP_VERSION(3, 0, 1):
524                         case IP_VERSION(3, 1, 2):
525                         case IP_VERSION(3, 1, 3):
526                                 domain |= AMDGPU_GEM_DOMAIN_GTT;
527                                 break;
528                         default:
529                                 break;
530                         }
531                         break;
532                 }
533         }
534 #endif
535
536         return domain;
537 }
538
539 static const struct drm_format_info dcc_formats[] = {
540         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
541           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
542          { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
543           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
544         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
545           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
546            .has_alpha = true, },
547         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
548           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
549           .has_alpha = true, },
550         { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
551           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
552           .has_alpha = true, },
553         { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
554           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
555         { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
556           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
557         { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
558           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
559           .has_alpha = true, },
560         { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
561           .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
562           .has_alpha = true, },
563         { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
564           .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
565 };
566
567 static const struct drm_format_info dcc_retile_formats[] = {
568         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
569           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
570          { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
571           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
572         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
573           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
574            .has_alpha = true, },
575         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
576           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
577           .has_alpha = true, },
578         { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
579           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
580           .has_alpha = true, },
581         { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
582           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
583         { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
584           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
585         { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
586           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
587           .has_alpha = true, },
588         { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
589           .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
590           .has_alpha = true, },
591         { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
592           .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
593 };
594
595 static const struct drm_format_info *
596 lookup_format_info(const struct drm_format_info formats[],
597                   int num_formats, u32 format)
598 {
599         int i;
600
601         for (i = 0; i < num_formats; i++) {
602                 if (formats[i].format == format)
603                         return &formats[i];
604         }
605
606         return NULL;
607 }
608
609 const struct drm_format_info *
610 amdgpu_lookup_format_info(u32 format, uint64_t modifier)
611 {
612         if (!IS_AMD_FMT_MOD(modifier))
613                 return NULL;
614
615         if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
616                 return lookup_format_info(dcc_retile_formats,
617                                           ARRAY_SIZE(dcc_retile_formats),
618                                           format);
619
620         if (AMD_FMT_MOD_GET(DCC, modifier))
621                 return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
622                                           format);
623
624         /* returning NULL will cause the default format structs to be used. */
625         return NULL;
626 }
627
628
629 /*
630  * Tries to extract the renderable DCC offset from the opaque metadata attached
631  * to the buffer.
632  */
633 static int
634 extract_render_dcc_offset(struct amdgpu_device *adev,
635                           struct drm_gem_object *obj,
636                           uint64_t *offset)
637 {
638         struct amdgpu_bo *rbo;
639         int r = 0;
640         uint32_t metadata[10]; /* Something that fits a descriptor + header. */
641         uint32_t size;
642
643         rbo = gem_to_amdgpu_bo(obj);
644         r = amdgpu_bo_reserve(rbo, false);
645
646         if (unlikely(r)) {
647                 /* Don't show error message when returning -ERESTARTSYS */
648                 if (r != -ERESTARTSYS)
649                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
650                 return r;
651         }
652
653         r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
654         amdgpu_bo_unreserve(rbo);
655
656         if (r)
657                 return r;
658
659         /*
660          * The first word is the metadata version, and we need space for at least
661          * the version + pci vendor+device id + 8 words for a descriptor.
662          */
663         if (size < 40  || metadata[0] != 1)
664                 return -EINVAL;
665
666         if (adev->family >= AMDGPU_FAMILY_NV) {
667                 /* resource word 6/7 META_DATA_ADDRESS{_LO} */
668                 *offset = ((u64)metadata[9] << 16u) |
669                           ((metadata[8] & 0xFF000000u) >> 16);
670         } else {
671                 /* resource word 5/7 META_DATA_ADDRESS */
672                 *offset = ((u64)metadata[9] << 8u) |
673                           ((u64)(metadata[7] & 0x1FE0000u) << 23);
674         }
675
676         return 0;
677 }
678
679 static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
680 {
681         struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
682         uint64_t modifier = 0;
683
684         if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
685                 modifier = DRM_FORMAT_MOD_LINEAR;
686         } else {
687                 int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
688                 bool has_xor = swizzle >= 16;
689                 int block_size_bits;
690                 int version;
691                 int pipe_xor_bits = 0;
692                 int bank_xor_bits = 0;
693                 int packers = 0;
694                 int rb = 0;
695                 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
696                 uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
697
698                 switch (swizzle >> 2) {
699                 case 0: /* 256B */
700                         block_size_bits = 8;
701                         break;
702                 case 1: /* 4KiB */
703                 case 5: /* 4KiB _X */
704                         block_size_bits = 12;
705                         break;
706                 case 2: /* 64KiB */
707                 case 4: /* 64 KiB _T */
708                 case 6: /* 64 KiB _X */
709                         block_size_bits = 16;
710                         break;
711                 default:
712                         /* RESERVED or VAR */
713                         return -EINVAL;
714                 }
715
716                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
717                         version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
718                 else if (adev->family == AMDGPU_FAMILY_NV)
719                         version = AMD_FMT_MOD_TILE_VER_GFX10;
720                 else
721                         version = AMD_FMT_MOD_TILE_VER_GFX9;
722
723                 switch (swizzle & 3) {
724                 case 0: /* Z microtiling */
725                         return -EINVAL;
726                 case 1: /* S microtiling */
727                         if (!has_xor)
728                                 version = AMD_FMT_MOD_TILE_VER_GFX9;
729                         break;
730                 case 2:
731                         if (!has_xor && afb->base.format->cpp[0] != 4)
732                                 version = AMD_FMT_MOD_TILE_VER_GFX9;
733                         break;
734                 case 3:
735                         break;
736                 }
737
738                 if (has_xor) {
739                         switch (version) {
740                         case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
741                                 pipe_xor_bits = min(block_size_bits - 8, pipes);
742                                 packers = min(block_size_bits - 8 - pipe_xor_bits,
743                                               ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
744                                 break;
745                         case AMD_FMT_MOD_TILE_VER_GFX10:
746                                 pipe_xor_bits = min(block_size_bits - 8, pipes);
747                                 break;
748                         case AMD_FMT_MOD_TILE_VER_GFX9:
749                                 rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
750                                      ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
751                                 pipe_xor_bits = min(block_size_bits - 8, pipes +
752                                                     ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
753                                 bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
754                                                     ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
755                                 break;
756                         }
757                 }
758
759                 modifier = AMD_FMT_MOD |
760                            AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
761                            AMD_FMT_MOD_SET(TILE_VERSION, version) |
762                            AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
763                            AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
764                            AMD_FMT_MOD_SET(PACKERS, packers);
765
766                 if (dcc_offset != 0) {
767                         bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
768                         bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
769                         const struct drm_format_info *format_info;
770                         u64 render_dcc_offset;
771
772                         /* Enable constant encode on RAVEN2 and later. */
773                         bool dcc_constant_encode = adev->asic_type > CHIP_RAVEN ||
774                                                    (adev->asic_type == CHIP_RAVEN &&
775                                                     adev->external_rev_id >= 0x81);
776
777                         int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
778                                               dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
779                                               AMD_FMT_MOD_DCC_BLOCK_256B;
780
781                         modifier |= AMD_FMT_MOD_SET(DCC, 1) |
782                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
783                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
784                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
785                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
786
787                         afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
788                         afb->base.pitches[1] =
789                                 AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
790
791                         /*
792                          * If the userspace driver uses retiling the tiling flags do not contain
793                          * info on the renderable DCC buffer. Luckily the opaque metadata contains
794                          * the info so we can try to extract it. The kernel does not use this info
795                          * but we should convert it to a modifier plane for getfb2, so the
796                          * userspace driver that gets it doesn't have to juggle around another DCC
797                          * plane internally.
798                          */
799                         if (extract_render_dcc_offset(adev, afb->base.obj[0],
800                                                       &render_dcc_offset) == 0 &&
801                             render_dcc_offset != 0 &&
802                             render_dcc_offset != afb->base.offsets[1] &&
803                             render_dcc_offset < UINT_MAX) {
804                                 uint32_t dcc_block_bits;  /* of base surface data */
805
806                                 modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
807                                 afb->base.offsets[2] = render_dcc_offset;
808
809                                 if (adev->family >= AMDGPU_FAMILY_NV) {
810                                         int extra_pipe = 0;
811
812                                         if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
813                                             pipes == packers && pipes > 1)
814                                                 extra_pipe = 1;
815
816                                         dcc_block_bits = max(20, 16 + pipes + extra_pipe);
817                                 } else {
818                                         modifier |= AMD_FMT_MOD_SET(RB, rb) |
819                                                     AMD_FMT_MOD_SET(PIPE, pipes);
820                                         dcc_block_bits = max(20, 18 + rb);
821                                 }
822
823                                 dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
824                                 afb->base.pitches[2] = ALIGN(afb->base.width,
825                                                              1u << ((dcc_block_bits + 1) / 2));
826                         }
827                         format_info = amdgpu_lookup_format_info(afb->base.format->format,
828                                                                 modifier);
829                         if (!format_info)
830                                 return -EINVAL;
831
832                         afb->base.format = format_info;
833                 }
834         }
835
836         afb->base.modifier = modifier;
837         afb->base.flags |= DRM_MODE_FB_MODIFIERS;
838         return 0;
839 }
840
841 /* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
842 static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
843 {
844         u64 micro_tile_mode;
845
846         /* Zero swizzle mode means linear */
847         if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
848                 return 0;
849
850         micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
851         switch (micro_tile_mode) {
852         case 0: /* DISPLAY */
853         case 3: /* RENDER */
854                 return 0;
855         default:
856                 drm_dbg_kms(afb->base.dev,
857                             "Micro tile mode %llu not supported for scanout\n",
858                             micro_tile_mode);
859                 return -EINVAL;
860         }
861 }
862
863 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
864                                  unsigned int *width, unsigned int *height)
865 {
866         unsigned int cpp_log2 = ilog2(cpp);
867         unsigned int pixel_log2 = block_log2 - cpp_log2;
868         unsigned int width_log2 = (pixel_log2 + 1) / 2;
869         unsigned int height_log2 = pixel_log2 - width_log2;
870
871         *width = 1 << width_log2;
872         *height = 1 << height_log2;
873 }
874
875 static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
876                                        bool pipe_aligned)
877 {
878         unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
879
880         switch (ver) {
881         case AMD_FMT_MOD_TILE_VER_GFX9: {
882                 /*
883                  * TODO: for pipe aligned we may need to check the alignment of the
884                  * total size of the surface, which may need to be bigger than the
885                  * natural alignment due to some HW workarounds
886                  */
887                 return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
888         }
889         case AMD_FMT_MOD_TILE_VER_GFX10:
890         case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
891                 int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
892
893                 if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
894                     AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
895                         ++pipes_log2;
896
897                 return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
898         }
899         default:
900                 return 0;
901         }
902 }
903
904 static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
905                                        const struct drm_format_info *format,
906                                        unsigned int block_width, unsigned int block_height,
907                                        unsigned int block_size_log2)
908 {
909         unsigned int width = rfb->base.width /
910                 ((plane && plane < format->num_planes) ? format->hsub : 1);
911         unsigned int height = rfb->base.height /
912                 ((plane && plane < format->num_planes) ? format->vsub : 1);
913         unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
914         unsigned int block_pitch = block_width * cpp;
915         unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
916         unsigned int block_size = 1 << block_size_log2;
917         uint64_t size;
918
919         if (rfb->base.pitches[plane] % block_pitch) {
920                 drm_dbg_kms(rfb->base.dev,
921                             "pitch %d for plane %d is not a multiple of block pitch %d\n",
922                             rfb->base.pitches[plane], plane, block_pitch);
923                 return -EINVAL;
924         }
925         if (rfb->base.pitches[plane] < min_pitch) {
926                 drm_dbg_kms(rfb->base.dev,
927                             "pitch %d for plane %d is less than minimum pitch %d\n",
928                             rfb->base.pitches[plane], plane, min_pitch);
929                 return -EINVAL;
930         }
931
932         /* Force at least natural alignment. */
933         if (rfb->base.offsets[plane] % block_size) {
934                 drm_dbg_kms(rfb->base.dev,
935                             "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
936                             rfb->base.offsets[plane], plane, block_size);
937                 return -EINVAL;
938         }
939
940         size = rfb->base.offsets[plane] +
941                 (uint64_t)rfb->base.pitches[plane] / block_pitch *
942                 block_size * DIV_ROUND_UP(height, block_height);
943
944         if (rfb->base.obj[0]->size < size) {
945                 drm_dbg_kms(rfb->base.dev,
946                             "BO size 0x%zx is less than 0x%llx required for plane %d\n",
947                             rfb->base.obj[0]->size, size, plane);
948                 return -EINVAL;
949         }
950
951         return 0;
952 }
953
954
955 static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
956 {
957         const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
958         uint64_t modifier = rfb->base.modifier;
959         int ret;
960         unsigned int i, block_width, block_height, block_size_log2;
961
962         if (!rfb->base.dev->mode_config.allow_fb_modifiers)
963                 return 0;
964
965         for (i = 0; i < format_info->num_planes; ++i) {
966                 if (modifier == DRM_FORMAT_MOD_LINEAR) {
967                         block_width = 256 / format_info->cpp[i];
968                         block_height = 1;
969                         block_size_log2 = 8;
970                 } else {
971                         int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
972
973                         switch ((swizzle & ~3) + 1) {
974                         case DC_SW_256B_S:
975                                 block_size_log2 = 8;
976                                 break;
977                         case DC_SW_4KB_S:
978                         case DC_SW_4KB_S_X:
979                                 block_size_log2 = 12;
980                                 break;
981                         case DC_SW_64KB_S:
982                         case DC_SW_64KB_S_T:
983                         case DC_SW_64KB_S_X:
984                                 block_size_log2 = 16;
985                                 break;
986                         default:
987                                 drm_dbg_kms(rfb->base.dev,
988                                             "Swizzle mode with unknown block size: %d\n", swizzle);
989                                 return -EINVAL;
990                         }
991
992                         get_block_dimensions(block_size_log2, format_info->cpp[i],
993                                              &block_width, &block_height);
994                 }
995
996                 ret = amdgpu_display_verify_plane(rfb, i, format_info,
997                                                   block_width, block_height, block_size_log2);
998                 if (ret)
999                         return ret;
1000         }
1001
1002         if (AMD_FMT_MOD_GET(DCC, modifier)) {
1003                 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
1004                         block_size_log2 = get_dcc_block_size(modifier, false, false);
1005                         get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1006                                              &block_width, &block_height);
1007                         ret = amdgpu_display_verify_plane(rfb, i, format_info,
1008                                                           block_width, block_height,
1009                                                           block_size_log2);
1010                         if (ret)
1011                                 return ret;
1012
1013                         ++i;
1014                         block_size_log2 = get_dcc_block_size(modifier, true, true);
1015                 } else {
1016                         bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1017
1018                         block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1019                 }
1020                 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1021                                      &block_width, &block_height);
1022                 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1023                                                   block_width, block_height, block_size_log2);
1024                 if (ret)
1025                         return ret;
1026         }
1027
1028         return 0;
1029 }
1030
1031 static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1032                                       uint64_t *tiling_flags, bool *tmz_surface)
1033 {
1034         struct amdgpu_bo *rbo;
1035         int r;
1036
1037         if (!amdgpu_fb) {
1038                 *tiling_flags = 0;
1039                 *tmz_surface = false;
1040                 return 0;
1041         }
1042
1043         rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1044         r = amdgpu_bo_reserve(rbo, false);
1045
1046         if (unlikely(r)) {
1047                 /* Don't show error message when returning -ERESTARTSYS */
1048                 if (r != -ERESTARTSYS)
1049                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
1050                 return r;
1051         }
1052
1053         if (tiling_flags)
1054                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1055
1056         if (tmz_surface)
1057                 *tmz_surface = amdgpu_bo_encrypted(rbo);
1058
1059         amdgpu_bo_unreserve(rbo);
1060
1061         return r;
1062 }
1063
1064 int amdgpu_display_gem_fb_init(struct drm_device *dev,
1065                                struct amdgpu_framebuffer *rfb,
1066                                const struct drm_mode_fb_cmd2 *mode_cmd,
1067                                struct drm_gem_object *obj)
1068 {
1069         int ret;
1070
1071         rfb->base.obj[0] = obj;
1072         drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1073
1074         ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1075         if (ret)
1076                 goto err;
1077
1078         ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1079         if (ret)
1080                 goto err;
1081
1082         return 0;
1083 err:
1084         drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
1085         rfb->base.obj[0] = NULL;
1086         return ret;
1087 }
1088
1089 int amdgpu_display_gem_fb_verify_and_init(
1090         struct drm_device *dev, struct amdgpu_framebuffer *rfb,
1091         struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
1092         struct drm_gem_object *obj)
1093 {
1094         int ret;
1095
1096         rfb->base.obj[0] = obj;
1097         drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1098         /* Verify that the modifier is supported. */
1099         if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1100                                       mode_cmd->modifier[0])) {
1101                 drm_dbg_kms(dev,
1102                             "unsupported pixel format %p4cc / modifier 0x%llx\n",
1103                             &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1104
1105                 ret = -EINVAL;
1106                 goto err;
1107         }
1108
1109         ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1110         if (ret)
1111                 goto err;
1112
1113         ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1114         if (ret)
1115                 goto err;
1116
1117         return 0;
1118 err:
1119         drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1120         rfb->base.obj[0] = NULL;
1121         return ret;
1122 }
1123
1124 int amdgpu_display_framebuffer_init(struct drm_device *dev,
1125                                     struct amdgpu_framebuffer *rfb,
1126                                     const struct drm_mode_fb_cmd2 *mode_cmd,
1127                                     struct drm_gem_object *obj)
1128 {
1129         struct amdgpu_device *adev = drm_to_adev(dev);
1130         int ret, i;
1131
1132         /*
1133          * This needs to happen before modifier conversion as that might change
1134          * the number of planes.
1135          */
1136         for (i = 1; i < rfb->base.format->num_planes; ++i) {
1137                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1138                         drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1139                                     i, mode_cmd->handles[0], mode_cmd->handles[i]);
1140                         ret = -EINVAL;
1141                         return ret;
1142                 }
1143         }
1144
1145         ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
1146         if (ret)
1147                 return ret;
1148
1149         if (!dev->mode_config.allow_fb_modifiers) {
1150                 drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1151                               "GFX9+ requires FB check based on format modifier\n");
1152                 ret = check_tiling_flags_gfx6(rfb);
1153                 if (ret)
1154                         return ret;
1155         }
1156
1157         if (dev->mode_config.allow_fb_modifiers &&
1158             !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1159                 ret = convert_tiling_flags_to_modifier(rfb);
1160                 if (ret) {
1161                         drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1162                                     rfb->tiling_flags);
1163                         return ret;
1164                 }
1165         }
1166
1167         ret = amdgpu_display_verify_sizes(rfb);
1168         if (ret)
1169                 return ret;
1170
1171         for (i = 0; i < rfb->base.format->num_planes; ++i) {
1172                 drm_gem_object_get(rfb->base.obj[0]);
1173                 rfb->base.obj[i] = rfb->base.obj[0];
1174         }
1175
1176         return 0;
1177 }
1178
1179 struct drm_framebuffer *
1180 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1181                                        struct drm_file *file_priv,
1182                                        const struct drm_mode_fb_cmd2 *mode_cmd)
1183 {
1184         struct amdgpu_framebuffer *amdgpu_fb;
1185         struct drm_gem_object *obj;
1186         struct amdgpu_bo *bo;
1187         uint32_t domains;
1188         int ret;
1189
1190         obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1191         if (obj ==  NULL) {
1192                 drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
1193                             "can't create framebuffer\n", mode_cmd->handles[0]);
1194                 return ERR_PTR(-ENOENT);
1195         }
1196
1197         /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1198         bo = gem_to_amdgpu_bo(obj);
1199         domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1200         if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1201                 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1202                 drm_gem_object_put(obj);
1203                 return ERR_PTR(-EINVAL);
1204         }
1205
1206         amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1207         if (amdgpu_fb == NULL) {
1208                 drm_gem_object_put(obj);
1209                 return ERR_PTR(-ENOMEM);
1210         }
1211
1212         ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1213                                                     mode_cmd, obj);
1214         if (ret) {
1215                 kfree(amdgpu_fb);
1216                 drm_gem_object_put(obj);
1217                 return ERR_PTR(ret);
1218         }
1219
1220         drm_gem_object_put(obj);
1221         return &amdgpu_fb->base;
1222 }
1223
1224 const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1225         .fb_create = amdgpu_display_user_framebuffer_create,
1226         .output_poll_changed = drm_fb_helper_output_poll_changed,
1227 };
1228
1229 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
1230 {       { UNDERSCAN_OFF, "off" },
1231         { UNDERSCAN_ON, "on" },
1232         { UNDERSCAN_AUTO, "auto" },
1233 };
1234
1235 static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
1236 {       { AMDGPU_AUDIO_DISABLE, "off" },
1237         { AMDGPU_AUDIO_ENABLE, "on" },
1238         { AMDGPU_AUDIO_AUTO, "auto" },
1239 };
1240
1241 /* XXX support different dither options? spatial, temporal, both, etc. */
1242 static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
1243 {       { AMDGPU_FMT_DITHER_DISABLE, "off" },
1244         { AMDGPU_FMT_DITHER_ENABLE, "on" },
1245 };
1246
1247 int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1248 {
1249         int sz;
1250
1251         adev->mode_info.coherent_mode_property =
1252                 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1253         if (!adev->mode_info.coherent_mode_property)
1254                 return -ENOMEM;
1255
1256         adev->mode_info.load_detect_property =
1257                 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1258         if (!adev->mode_info.load_detect_property)
1259                 return -ENOMEM;
1260
1261         drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1262
1263         sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1264         adev->mode_info.underscan_property =
1265                 drm_property_create_enum(adev_to_drm(adev), 0,
1266                                          "underscan",
1267                                          amdgpu_underscan_enum_list, sz);
1268
1269         adev->mode_info.underscan_hborder_property =
1270                 drm_property_create_range(adev_to_drm(adev), 0,
1271                                           "underscan hborder", 0, 128);
1272         if (!adev->mode_info.underscan_hborder_property)
1273                 return -ENOMEM;
1274
1275         adev->mode_info.underscan_vborder_property =
1276                 drm_property_create_range(adev_to_drm(adev), 0,
1277                                           "underscan vborder", 0, 128);
1278         if (!adev->mode_info.underscan_vborder_property)
1279                 return -ENOMEM;
1280
1281         sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1282         adev->mode_info.audio_property =
1283                 drm_property_create_enum(adev_to_drm(adev), 0,
1284                                          "audio",
1285                                          amdgpu_audio_enum_list, sz);
1286
1287         sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1288         adev->mode_info.dither_property =
1289                 drm_property_create_enum(adev_to_drm(adev), 0,
1290                                          "dither",
1291                                          amdgpu_dither_enum_list, sz);
1292
1293         if (amdgpu_device_has_dc_support(adev)) {
1294                 adev->mode_info.abm_level_property =
1295                         drm_property_create_range(adev_to_drm(adev), 0,
1296                                                   "abm level", 0, 4);
1297                 if (!adev->mode_info.abm_level_property)
1298                         return -ENOMEM;
1299         }
1300
1301         return 0;
1302 }
1303
1304 void amdgpu_display_update_priority(struct amdgpu_device *adev)
1305 {
1306         /* adjustment options for the display watermarks */
1307         if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1308                 adev->mode_info.disp_priority = 0;
1309         else
1310                 adev->mode_info.disp_priority = amdgpu_disp_priority;
1311
1312 }
1313
1314 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1315 {
1316         /* try and guess if this is a tv or a monitor */
1317         if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1318             (mode->vdisplay == 576) || /* 576p */
1319             (mode->vdisplay == 720) || /* 720p */
1320             (mode->vdisplay == 1080)) /* 1080p */
1321                 return true;
1322         else
1323                 return false;
1324 }
1325
1326 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1327                                         const struct drm_display_mode *mode,
1328                                         struct drm_display_mode *adjusted_mode)
1329 {
1330         struct drm_device *dev = crtc->dev;
1331         struct drm_encoder *encoder;
1332         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1333         struct amdgpu_encoder *amdgpu_encoder;
1334         struct drm_connector *connector;
1335         u32 src_v = 1, dst_v = 1;
1336         u32 src_h = 1, dst_h = 1;
1337
1338         amdgpu_crtc->h_border = 0;
1339         amdgpu_crtc->v_border = 0;
1340
1341         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1342                 if (encoder->crtc != crtc)
1343                         continue;
1344                 amdgpu_encoder = to_amdgpu_encoder(encoder);
1345                 connector = amdgpu_get_connector_for_encoder(encoder);
1346
1347                 /* set scaling */
1348                 if (amdgpu_encoder->rmx_type == RMX_OFF)
1349                         amdgpu_crtc->rmx_type = RMX_OFF;
1350                 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1351                          mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1352                         amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1353                 else
1354                         amdgpu_crtc->rmx_type = RMX_OFF;
1355                 /* copy native mode */
1356                 memcpy(&amdgpu_crtc->native_mode,
1357                        &amdgpu_encoder->native_mode,
1358                        sizeof(struct drm_display_mode));
1359                 src_v = crtc->mode.vdisplay;
1360                 dst_v = amdgpu_crtc->native_mode.vdisplay;
1361                 src_h = crtc->mode.hdisplay;
1362                 dst_h = amdgpu_crtc->native_mode.hdisplay;
1363
1364                 /* fix up for overscan on hdmi */
1365                 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1366                     ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1367                      ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1368                       connector->display_info.is_hdmi &&
1369                       amdgpu_display_is_hdtv_mode(mode)))) {
1370                         if (amdgpu_encoder->underscan_hborder != 0)
1371                                 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1372                         else
1373                                 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1374                         if (amdgpu_encoder->underscan_vborder != 0)
1375                                 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1376                         else
1377                                 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1378                         amdgpu_crtc->rmx_type = RMX_FULL;
1379                         src_v = crtc->mode.vdisplay;
1380                         dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1381                         src_h = crtc->mode.hdisplay;
1382                         dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1383                 }
1384         }
1385         if (amdgpu_crtc->rmx_type != RMX_OFF) {
1386                 fixed20_12 a, b;
1387                 a.full = dfixed_const(src_v);
1388                 b.full = dfixed_const(dst_v);
1389                 amdgpu_crtc->vsc.full = dfixed_div(a, b);
1390                 a.full = dfixed_const(src_h);
1391                 b.full = dfixed_const(dst_h);
1392                 amdgpu_crtc->hsc.full = dfixed_div(a, b);
1393         } else {
1394                 amdgpu_crtc->vsc.full = dfixed_const(1);
1395                 amdgpu_crtc->hsc.full = dfixed_const(1);
1396         }
1397         return true;
1398 }
1399
1400 /*
1401  * Retrieve current video scanout position of crtc on a given gpu, and
1402  * an optional accurate timestamp of when query happened.
1403  *
1404  * \param dev Device to query.
1405  * \param pipe Crtc to query.
1406  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1407  *              For driver internal use only also supports these flags:
1408  *
1409  *              USE_REAL_VBLANKSTART to use the real start of vblank instead
1410  *              of a fudged earlier start of vblank.
1411  *
1412  *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
1413  *              fudged earlier start of vblank in *vpos and the distance
1414  *              to true start of vblank in *hpos.
1415  *
1416  * \param *vpos Location where vertical scanout position should be stored.
1417  * \param *hpos Location where horizontal scanout position should go.
1418  * \param *stime Target location for timestamp taken immediately before
1419  *               scanout position query. Can be NULL to skip timestamp.
1420  * \param *etime Target location for timestamp taken immediately after
1421  *               scanout position query. Can be NULL to skip timestamp.
1422  *
1423  * Returns vpos as a positive number while in active scanout area.
1424  * Returns vpos as a negative number inside vblank, counting the number
1425  * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1426  * until start of active scanout / end of vblank."
1427  *
1428  * \return Flags, or'ed together as follows:
1429  *
1430  * DRM_SCANOUTPOS_VALID = Query successful.
1431  * DRM_SCANOUTPOS_INVBL = Inside vblank.
1432  * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1433  * this flag means that returned position may be offset by a constant but
1434  * unknown small number of scanlines wrt. real scanout position.
1435  *
1436  */
1437 int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1438                         unsigned int pipe, unsigned int flags, int *vpos,
1439                         int *hpos, ktime_t *stime, ktime_t *etime,
1440                         const struct drm_display_mode *mode)
1441 {
1442         u32 vbl = 0, position = 0;
1443         int vbl_start, vbl_end, vtotal, ret = 0;
1444         bool in_vbl = true;
1445
1446         struct amdgpu_device *adev = drm_to_adev(dev);
1447
1448         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1449
1450         /* Get optional system timestamp before query. */
1451         if (stime)
1452                 *stime = ktime_get();
1453
1454         if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1455                 ret |= DRM_SCANOUTPOS_VALID;
1456
1457         /* Get optional system timestamp after query. */
1458         if (etime)
1459                 *etime = ktime_get();
1460
1461         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1462
1463         /* Decode into vertical and horizontal scanout position. */
1464         *vpos = position & 0x1fff;
1465         *hpos = (position >> 16) & 0x1fff;
1466
1467         /* Valid vblank area boundaries from gpu retrieved? */
1468         if (vbl > 0) {
1469                 /* Yes: Decode. */
1470                 ret |= DRM_SCANOUTPOS_ACCURATE;
1471                 vbl_start = vbl & 0x1fff;
1472                 vbl_end = (vbl >> 16) & 0x1fff;
1473         }
1474         else {
1475                 /* No: Fake something reasonable which gives at least ok results. */
1476                 vbl_start = mode->crtc_vdisplay;
1477                 vbl_end = 0;
1478         }
1479
1480         /* Called from driver internal vblank counter query code? */
1481         if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1482             /* Caller wants distance from real vbl_start in *hpos */
1483             *hpos = *vpos - vbl_start;
1484         }
1485
1486         /* Fudge vblank to start a few scanlines earlier to handle the
1487          * problem that vblank irqs fire a few scanlines before start
1488          * of vblank. Some driver internal callers need the true vblank
1489          * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1490          *
1491          * The cause of the "early" vblank irq is that the irq is triggered
1492          * by the line buffer logic when the line buffer read position enters
1493          * the vblank, whereas our crtc scanout position naturally lags the
1494          * line buffer read position.
1495          */
1496         if (!(flags & USE_REAL_VBLANKSTART))
1497                 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1498
1499         /* Test scanout position against vblank region. */
1500         if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1501                 in_vbl = false;
1502
1503         /* In vblank? */
1504         if (in_vbl)
1505             ret |= DRM_SCANOUTPOS_IN_VBLANK;
1506
1507         /* Called from driver internal vblank counter query code? */
1508         if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1509                 /* Caller wants distance from fudged earlier vbl_start */
1510                 *vpos -= vbl_start;
1511                 return ret;
1512         }
1513
1514         /* Check if inside vblank area and apply corrective offsets:
1515          * vpos will then be >=0 in video scanout area, but negative
1516          * within vblank area, counting down the number of lines until
1517          * start of scanout.
1518          */
1519
1520         /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1521         if (in_vbl && (*vpos >= vbl_start)) {
1522                 vtotal = mode->crtc_vtotal;
1523
1524                 /* With variable refresh rate displays the vpos can exceed
1525                  * the vtotal value. Clamp to 0 to return -vbl_end instead
1526                  * of guessing the remaining number of lines until scanout.
1527                  */
1528                 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1529         }
1530
1531         /* Correct for shifted end of vbl at vbl_end. */
1532         *vpos = *vpos - vbl_end;
1533
1534         return ret;
1535 }
1536
1537 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1538 {
1539         if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1540                 return AMDGPU_CRTC_IRQ_NONE;
1541
1542         switch (crtc) {
1543         case 0:
1544                 return AMDGPU_CRTC_IRQ_VBLANK1;
1545         case 1:
1546                 return AMDGPU_CRTC_IRQ_VBLANK2;
1547         case 2:
1548                 return AMDGPU_CRTC_IRQ_VBLANK3;
1549         case 3:
1550                 return AMDGPU_CRTC_IRQ_VBLANK4;
1551         case 4:
1552                 return AMDGPU_CRTC_IRQ_VBLANK5;
1553         case 5:
1554                 return AMDGPU_CRTC_IRQ_VBLANK6;
1555         default:
1556                 return AMDGPU_CRTC_IRQ_NONE;
1557         }
1558 }
1559
1560 bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1561                         bool in_vblank_irq, int *vpos,
1562                         int *hpos, ktime_t *stime, ktime_t *etime,
1563                         const struct drm_display_mode *mode)
1564 {
1565         struct drm_device *dev = crtc->dev;
1566         unsigned int pipe = crtc->index;
1567
1568         return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1569                                                   stime, etime, mode);
1570 }
1571
1572 int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1573 {
1574         struct drm_device *dev = adev_to_drm(adev);
1575         struct drm_crtc *crtc;
1576         struct drm_connector *connector;
1577         struct drm_connector_list_iter iter;
1578         int r;
1579
1580         /* turn off display hw */
1581         drm_modeset_lock_all(dev);
1582         drm_connector_list_iter_begin(dev, &iter);
1583         drm_for_each_connector_iter(connector, &iter)
1584                 drm_helper_connector_dpms(connector,
1585                                           DRM_MODE_DPMS_OFF);
1586         drm_connector_list_iter_end(&iter);
1587         drm_modeset_unlock_all(dev);
1588         /* unpin the front buffers and cursors */
1589         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1590                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1591                 struct drm_framebuffer *fb = crtc->primary->fb;
1592                 struct amdgpu_bo *robj;
1593
1594                 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1595                         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1596                         r = amdgpu_bo_reserve(aobj, true);
1597                         if (r == 0) {
1598                                 amdgpu_bo_unpin(aobj);
1599                                 amdgpu_bo_unreserve(aobj);
1600                         }
1601                 }
1602
1603                 if (fb == NULL || fb->obj[0] == NULL) {
1604                         continue;
1605                 }
1606                 robj = gem_to_amdgpu_bo(fb->obj[0]);
1607                 r = amdgpu_bo_reserve(robj, true);
1608                 if (r == 0) {
1609                         amdgpu_bo_unpin(robj);
1610                         amdgpu_bo_unreserve(robj);
1611                 }
1612         }
1613         return 0;
1614 }
1615
1616 int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1617 {
1618         struct drm_device *dev = adev_to_drm(adev);
1619         struct drm_connector *connector;
1620         struct drm_connector_list_iter iter;
1621         struct drm_crtc *crtc;
1622         int r;
1623
1624         /* pin cursors */
1625         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1626                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1627
1628                 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1629                         struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1630                         r = amdgpu_bo_reserve(aobj, true);
1631                         if (r == 0) {
1632                                 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1633                                 if (r != 0)
1634                                         dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1635                                 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1636                                 amdgpu_bo_unreserve(aobj);
1637                         }
1638                 }
1639         }
1640
1641         drm_helper_resume_force_mode(dev);
1642
1643         /* turn on display hw */
1644         drm_modeset_lock_all(dev);
1645
1646         drm_connector_list_iter_begin(dev, &iter);
1647         drm_for_each_connector_iter(connector, &iter)
1648                 drm_helper_connector_dpms(connector,
1649                                           DRM_MODE_DPMS_ON);
1650         drm_connector_list_iter_end(&iter);
1651
1652         drm_modeset_unlock_all(dev);
1653
1654         return 0;
1655 }
1656