2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
47 #include "amdgpu_pm.h"
49 #include "amd_shared.h"
50 #include "amdgpu_dm_irq.h"
51 #include "dm_helpers.h"
52 #include "amdgpu_dm_mst_types.h"
53 #if defined(CONFIG_DEBUG_FS)
54 #include "amdgpu_dm_debugfs.h"
57 #include "ivsrcid/ivsrcid_vislands30.h"
59 #include <linux/module.h>
60 #include <linux/moduleparam.h>
61 #include <linux/version.h>
62 #include <linux/types.h>
63 #include <linux/pm_runtime.h>
64 #include <linux/pci.h>
65 #include <linux/firmware.h>
66 #include <linux/component.h>
68 #include <drm/drm_atomic.h>
69 #include <drm/drm_atomic_uapi.h>
70 #include <drm/drm_atomic_helper.h>
71 #include <drm/drm_dp_mst_helper.h>
72 #include <drm/drm_fb_helper.h>
73 #include <drm/drm_fourcc.h>
74 #include <drm/drm_edid.h>
75 #include <drm/drm_vblank.h>
76 #include <drm/drm_audio_component.h>
77 #include <drm/drm_hdcp.h>
79 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
80 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 #include "dcn/dcn_1_0_offset.h"
83 #include "dcn/dcn_1_0_sh_mask.h"
84 #include "soc15_hw_ip.h"
85 #include "vega10_ip_offset.h"
87 #include "soc15_common.h"
90 #include "modules/inc/mod_freesync.h"
91 #include "modules/power/power_helpers.h"
92 #include "modules/inc/mod_info_packet.h"
94 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
95 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
98 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
103 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
104 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
105 * requests into DC requests, and DC responses into DRM responses.
107 * The root control structure is &struct amdgpu_display_manager.
110 /* basic init/fini API */
111 static int amdgpu_dm_init(struct amdgpu_device *adev);
112 static void amdgpu_dm_fini(struct amdgpu_device *adev);
115 * initializes drm_device display related structures, based on the information
116 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
117 * drm_encoder, drm_mode_config
119 * Returns 0 on success
121 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
122 /* removes and deallocates the drm structures, created by the above function */
123 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
126 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
128 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
129 struct drm_plane *plane,
130 unsigned long possible_crtcs,
131 const struct dc_plane_cap *plane_cap);
132 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
133 struct drm_plane *plane,
134 uint32_t link_index);
135 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
136 struct amdgpu_dm_connector *amdgpu_dm_connector,
138 struct amdgpu_encoder *amdgpu_encoder);
139 static int amdgpu_dm_encoder_init(struct drm_device *dev,
140 struct amdgpu_encoder *aencoder,
141 uint32_t link_index);
143 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
145 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
146 struct drm_atomic_state *state,
149 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
151 static int amdgpu_dm_atomic_check(struct drm_device *dev,
152 struct drm_atomic_state *state);
154 static void handle_cursor_update(struct drm_plane *plane,
155 struct drm_plane_state *old_plane_state);
157 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
158 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
159 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
160 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
164 * dm_vblank_get_counter
167 * Get counter for number of vertical blanks
170 * struct amdgpu_device *adev - [in] desired amdgpu device
171 * int disp_idx - [in] which CRTC to get the counter from
174 * Counter for vertical blanks
176 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
178 if (crtc >= adev->mode_info.num_crtc)
181 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
186 if (acrtc_state->stream == NULL) {
187 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
192 return dc_stream_get_vblank_counter(acrtc_state->stream);
196 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
197 u32 *vbl, u32 *position)
199 uint32_t v_blank_start, v_blank_end, h_position, v_position;
201 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
204 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
205 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
208 if (acrtc_state->stream == NULL) {
209 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
215 * TODO rework base driver to use values directly.
216 * for now parse it back into reg-format
218 dc_stream_get_scanoutpos(acrtc_state->stream,
224 *position = v_position | (h_position << 16);
225 *vbl = v_blank_start | (v_blank_end << 16);
231 static bool dm_is_idle(void *handle)
237 static int dm_wait_for_idle(void *handle)
243 static bool dm_check_soft_reset(void *handle)
248 static int dm_soft_reset(void *handle)
254 static struct amdgpu_crtc *
255 get_crtc_by_otg_inst(struct amdgpu_device *adev,
258 struct drm_device *dev = adev->ddev;
259 struct drm_crtc *crtc;
260 struct amdgpu_crtc *amdgpu_crtc;
262 if (otg_inst == -1) {
264 return adev->mode_info.crtcs[0];
267 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
268 amdgpu_crtc = to_amdgpu_crtc(crtc);
270 if (amdgpu_crtc->otg_inst == otg_inst)
277 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
279 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
280 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
284 * dm_pflip_high_irq() - Handle pageflip interrupt
285 * @interrupt_params: ignored
287 * Handles the pageflip interrupt by notifying all interested parties
288 * that the pageflip has been completed.
290 static void dm_pflip_high_irq(void *interrupt_params)
292 struct amdgpu_crtc *amdgpu_crtc;
293 struct common_irq_params *irq_params = interrupt_params;
294 struct amdgpu_device *adev = irq_params->adev;
296 struct drm_pending_vblank_event *e;
297 struct dm_crtc_state *acrtc_state;
298 uint32_t vpos, hpos, v_blank_start, v_blank_end;
301 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
303 /* IRQ could occur when in initial stage */
304 /* TODO work and BO cleanup */
305 if (amdgpu_crtc == NULL) {
306 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
310 spin_lock_irqsave(&adev->ddev->event_lock, flags);
312 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
313 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
314 amdgpu_crtc->pflip_status,
315 AMDGPU_FLIP_SUBMITTED,
316 amdgpu_crtc->crtc_id,
318 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
322 /* page flip completed. */
323 e = amdgpu_crtc->event;
324 amdgpu_crtc->event = NULL;
329 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
330 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
332 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
334 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
335 &v_blank_end, &hpos, &vpos) ||
336 (vpos < v_blank_start)) {
337 /* Update to correct count and vblank timestamp if racing with
338 * vblank irq. This also updates to the correct vblank timestamp
339 * even in VRR mode, as scanout is past the front-porch atm.
341 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
343 /* Wake up userspace by sending the pageflip event with proper
344 * count and timestamp of vblank of flip completion.
347 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
349 /* Event sent, so done with vblank for this flip */
350 drm_crtc_vblank_put(&amdgpu_crtc->base);
353 /* VRR active and inside front-porch: vblank count and
354 * timestamp for pageflip event will only be up to date after
355 * drm_crtc_handle_vblank() has been executed from late vblank
356 * irq handler after start of back-porch (vline 0). We queue the
357 * pageflip event for send-out by drm_crtc_handle_vblank() with
358 * updated timestamp and count, once it runs after us.
360 * We need to open-code this instead of using the helper
361 * drm_crtc_arm_vblank_event(), as that helper would
362 * call drm_crtc_accurate_vblank_count(), which we must
363 * not call in VRR mode while we are in front-porch!
366 /* sequence will be replaced by real count during send-out. */
367 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
368 e->pipe = amdgpu_crtc->crtc_id;
370 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
374 /* Keep track of vblank of this flip for flip throttling. We use the
375 * cooked hw counter, as that one incremented at start of this vblank
376 * of pageflip completion, so last_flip_vblank is the forbidden count
377 * for queueing new pageflips if vsync + VRR is enabled.
379 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
380 amdgpu_crtc->crtc_id);
382 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
383 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
385 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
386 amdgpu_crtc->crtc_id, amdgpu_crtc,
387 vrr_active, (int) !e);
390 static void dm_vupdate_high_irq(void *interrupt_params)
392 struct common_irq_params *irq_params = interrupt_params;
393 struct amdgpu_device *adev = irq_params->adev;
394 struct amdgpu_crtc *acrtc;
395 struct dm_crtc_state *acrtc_state;
398 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
401 acrtc_state = to_dm_crtc_state(acrtc->base.state);
403 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
404 amdgpu_dm_vrr_active(acrtc_state));
406 /* Core vblank handling is done here after end of front-porch in
407 * vrr mode, as vblank timestamping will give valid results
408 * while now done after front-porch. This will also deliver
409 * page-flip completion events that have been queued to us
410 * if a pageflip happened inside front-porch.
412 if (amdgpu_dm_vrr_active(acrtc_state)) {
413 drm_crtc_handle_vblank(&acrtc->base);
415 /* BTR processing for pre-DCE12 ASICs */
416 if (acrtc_state->stream &&
417 adev->family < AMDGPU_FAMILY_AI) {
418 spin_lock_irqsave(&adev->ddev->event_lock, flags);
419 mod_freesync_handle_v_update(
420 adev->dm.freesync_module,
422 &acrtc_state->vrr_params);
424 dc_stream_adjust_vmin_vmax(
427 &acrtc_state->vrr_params.adjust);
428 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
435 * dm_crtc_high_irq() - Handles CRTC interrupt
436 * @interrupt_params: ignored
438 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
441 static void dm_crtc_high_irq(void *interrupt_params)
443 struct common_irq_params *irq_params = interrupt_params;
444 struct amdgpu_device *adev = irq_params->adev;
445 struct amdgpu_crtc *acrtc;
446 struct dm_crtc_state *acrtc_state;
449 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
452 acrtc_state = to_dm_crtc_state(acrtc->base.state);
454 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
455 amdgpu_dm_vrr_active(acrtc_state));
457 /* Core vblank handling at start of front-porch is only possible
458 * in non-vrr mode, as only there vblank timestamping will give
459 * valid results while done in front-porch. Otherwise defer it
460 * to dm_vupdate_high_irq after end of front-porch.
462 if (!amdgpu_dm_vrr_active(acrtc_state))
463 drm_crtc_handle_vblank(&acrtc->base);
465 /* Following stuff must happen at start of vblank, for crc
466 * computation and below-the-range btr support in vrr mode.
468 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
470 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
471 acrtc_state->vrr_params.supported &&
472 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
473 spin_lock_irqsave(&adev->ddev->event_lock, flags);
474 mod_freesync_handle_v_update(
475 adev->dm.freesync_module,
477 &acrtc_state->vrr_params);
479 dc_stream_adjust_vmin_vmax(
482 &acrtc_state->vrr_params.adjust);
483 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
488 static int dm_set_clockgating_state(void *handle,
489 enum amd_clockgating_state state)
494 static int dm_set_powergating_state(void *handle,
495 enum amd_powergating_state state)
500 /* Prototypes of private functions */
501 static int dm_early_init(void* handle);
503 /* Allocate memory for FBC compressed data */
504 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
506 struct drm_device *dev = connector->dev;
507 struct amdgpu_device *adev = dev->dev_private;
508 struct dm_comressor_info *compressor = &adev->dm.compressor;
509 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
510 struct drm_display_mode *mode;
511 unsigned long max_size = 0;
513 if (adev->dm.dc->fbc_compressor == NULL)
516 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
519 if (compressor->bo_ptr)
523 list_for_each_entry(mode, &connector->modes, head) {
524 if (max_size < mode->htotal * mode->vtotal)
525 max_size = mode->htotal * mode->vtotal;
529 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
530 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
531 &compressor->gpu_addr, &compressor->cpu_addr);
534 DRM_ERROR("DM: Failed to initialize FBC\n");
536 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
537 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
544 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
545 int pipe, bool *enabled,
546 unsigned char *buf, int max_bytes)
548 struct drm_device *dev = dev_get_drvdata(kdev);
549 struct amdgpu_device *adev = dev->dev_private;
550 struct drm_connector *connector;
551 struct drm_connector_list_iter conn_iter;
552 struct amdgpu_dm_connector *aconnector;
557 mutex_lock(&adev->dm.audio_lock);
559 drm_connector_list_iter_begin(dev, &conn_iter);
560 drm_for_each_connector_iter(connector, &conn_iter) {
561 aconnector = to_amdgpu_dm_connector(connector);
562 if (aconnector->audio_inst != port)
566 ret = drm_eld_size(connector->eld);
567 memcpy(buf, connector->eld, min(max_bytes, ret));
571 drm_connector_list_iter_end(&conn_iter);
573 mutex_unlock(&adev->dm.audio_lock);
575 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
580 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
581 .get_eld = amdgpu_dm_audio_component_get_eld,
584 static int amdgpu_dm_audio_component_bind(struct device *kdev,
585 struct device *hda_kdev, void *data)
587 struct drm_device *dev = dev_get_drvdata(kdev);
588 struct amdgpu_device *adev = dev->dev_private;
589 struct drm_audio_component *acomp = data;
591 acomp->ops = &amdgpu_dm_audio_component_ops;
593 adev->dm.audio_component = acomp;
598 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
599 struct device *hda_kdev, void *data)
601 struct drm_device *dev = dev_get_drvdata(kdev);
602 struct amdgpu_device *adev = dev->dev_private;
603 struct drm_audio_component *acomp = data;
607 adev->dm.audio_component = NULL;
610 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
611 .bind = amdgpu_dm_audio_component_bind,
612 .unbind = amdgpu_dm_audio_component_unbind,
615 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
622 adev->mode_info.audio.enabled = true;
624 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
626 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
627 adev->mode_info.audio.pin[i].channels = -1;
628 adev->mode_info.audio.pin[i].rate = -1;
629 adev->mode_info.audio.pin[i].bits_per_sample = -1;
630 adev->mode_info.audio.pin[i].status_bits = 0;
631 adev->mode_info.audio.pin[i].category_code = 0;
632 adev->mode_info.audio.pin[i].connected = false;
633 adev->mode_info.audio.pin[i].id =
634 adev->dm.dc->res_pool->audios[i]->inst;
635 adev->mode_info.audio.pin[i].offset = 0;
638 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
642 adev->dm.audio_registered = true;
647 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
652 if (!adev->mode_info.audio.enabled)
655 if (adev->dm.audio_registered) {
656 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
657 adev->dm.audio_registered = false;
660 /* TODO: Disable audio? */
662 adev->mode_info.audio.enabled = false;
665 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
667 struct drm_audio_component *acomp = adev->dm.audio_component;
669 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
670 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
672 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
677 static int dm_dmub_hw_init(struct amdgpu_device *adev)
679 const unsigned int psp_header_bytes = 0x100;
680 const unsigned int psp_footer_bytes = 0x100;
681 const struct dmcub_firmware_header_v1_0 *hdr;
682 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
683 const struct firmware *dmub_fw = adev->dm.dmub_fw;
684 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
685 struct abm *abm = adev->dm.dc->res_pool->abm;
686 struct dmub_srv_region_params region_params;
687 struct dmub_srv_region_info region_info;
688 struct dmub_srv_fb_params fb_params;
689 struct dmub_srv_fb_info fb_info;
690 struct dmub_srv_hw_params hw_params;
691 enum dmub_status status;
692 const unsigned char *fw_inst_const, *fw_bss_data;
698 /* DMUB isn't supported on the ASIC. */
702 /* Firmware required for DMUB support. */
703 DRM_ERROR("No firmware provided for DMUB.\n");
707 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
708 if (status != DMUB_STATUS_OK) {
709 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
713 if (!has_hw_support) {
714 DRM_INFO("DMUB unsupported on ASIC\n");
718 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
720 /* Calculate the size of all the regions for the DMUB service. */
721 memset(®ion_params, 0, sizeof(region_params));
723 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
724 psp_header_bytes - psp_footer_bytes;
725 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
726 region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size;
728 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
731 if (status != DMUB_STATUS_OK) {
732 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
737 * Allocate a framebuffer based on the total size of all the regions.
738 * TODO: Move this into GART.
740 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
741 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
742 &adev->dm.dmub_bo_gpu_addr,
743 &adev->dm.dmub_bo_cpu_addr);
747 /* Rebase the regions on the framebuffer address. */
748 memset(&fb_params, 0, sizeof(fb_params));
749 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
750 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
751 fb_params.region_info = ®ion_info;
753 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info);
754 if (status != DMUB_STATUS_OK) {
755 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
759 fw_inst_const = dmub_fw->data +
760 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 fw_bss_data = dmub_fw->data +
764 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 le32_to_cpu(hdr->inst_const_bytes);
767 /* Copy firmware and bios info into FB memory. */
768 memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
769 region_params.inst_const_size);
770 memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
771 region_params.bss_data_size);
772 memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr,
773 adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size);
775 /* Initialize hardware. */
776 memset(&hw_params, 0, sizeof(hw_params));
777 hw_params.fb_base = adev->gmc.fb_start;
778 hw_params.fb_offset = adev->gmc.aper_base;
781 hw_params.psp_version = dmcu->psp_version;
783 for (i = 0; i < fb_info.num_fb; ++i)
784 hw_params.fb[i] = &fb_info.fb[i];
786 status = dmub_srv_hw_init(dmub_srv, &hw_params);
787 if (status != DMUB_STATUS_OK) {
788 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
792 /* Wait for firmware load to finish. */
793 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
794 if (status != DMUB_STATUS_OK)
795 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
797 /* Init DMCU and ABM if available. */
799 dmcu->funcs->dmcu_init(dmcu);
800 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
803 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
804 if (!adev->dm.dc->ctx->dmub_srv) {
805 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
809 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
810 adev->dm.dmcub_fw_version);
815 static int amdgpu_dm_init(struct amdgpu_device *adev)
817 struct dc_init_data init_data;
818 #ifdef CONFIG_DRM_AMD_DC_HDCP
819 struct dc_callback_init init_params;
823 adev->dm.ddev = adev->ddev;
824 adev->dm.adev = adev;
826 /* Zero all the fields */
827 memset(&init_data, 0, sizeof(init_data));
828 #ifdef CONFIG_DRM_AMD_DC_HDCP
829 memset(&init_params, 0, sizeof(init_params));
832 mutex_init(&adev->dm.dc_lock);
833 mutex_init(&adev->dm.audio_lock);
835 if(amdgpu_dm_irq_init(adev)) {
836 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
840 init_data.asic_id.chip_family = adev->family;
842 init_data.asic_id.pci_revision_id = adev->rev_id;
843 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
845 init_data.asic_id.vram_width = adev->gmc.vram_width;
846 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
847 init_data.asic_id.atombios_base_address =
848 adev->mode_info.atom_context->bios;
850 init_data.driver = adev;
852 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
854 if (!adev->dm.cgs_device) {
855 DRM_ERROR("amdgpu: failed to create cgs device.\n");
859 init_data.cgs_device = adev->dm.cgs_device;
861 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
864 * TODO debug why this doesn't work on Raven
866 if (adev->flags & AMD_IS_APU &&
867 adev->asic_type >= CHIP_CARRIZO &&
868 adev->asic_type <= CHIP_RAVEN)
869 init_data.flags.gpu_vm_support = true;
871 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
872 init_data.flags.fbc_support = true;
874 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
875 init_data.flags.multi_mon_pp_mclk_switch = true;
877 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
878 init_data.flags.disable_fractional_pwm = true;
880 init_data.flags.power_down_display_on_boot = true;
882 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
883 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
886 /* Display Core create. */
887 adev->dm.dc = dc_create(&init_data);
890 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
892 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
896 dc_hardware_init(adev->dm.dc);
898 r = dm_dmub_hw_init(adev);
900 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
904 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
905 if (!adev->dm.freesync_module) {
907 "amdgpu: failed to initialize freesync_module.\n");
909 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
910 adev->dm.freesync_module);
912 amdgpu_dm_init_color_mod();
914 #ifdef CONFIG_DRM_AMD_DC_HDCP
915 if (adev->asic_type >= CHIP_RAVEN) {
916 adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
918 if (!adev->dm.hdcp_workqueue)
919 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
921 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
923 dc_init_callbacks(adev->dm.dc, &init_params);
926 if (amdgpu_dm_initialize_drm_device(adev)) {
928 "amdgpu: failed to initialize sw for display support.\n");
932 /* Update the actual used number of crtc */
933 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
935 /* TODO: Add_display_info? */
937 /* TODO use dynamic cursor width */
938 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
939 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
941 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
943 "amdgpu: failed to initialize sw for display support.\n");
947 #if defined(CONFIG_DEBUG_FS)
948 if (dtn_debugfs_init(adev))
949 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
952 DRM_DEBUG_DRIVER("KMS initialized.\n");
956 amdgpu_dm_fini(adev);
961 static void amdgpu_dm_fini(struct amdgpu_device *adev)
963 amdgpu_dm_audio_fini(adev);
965 amdgpu_dm_destroy_drm_device(&adev->dm);
967 #ifdef CONFIG_DRM_AMD_DC_HDCP
968 if (adev->dm.hdcp_workqueue) {
969 hdcp_destroy(adev->dm.hdcp_workqueue);
970 adev->dm.hdcp_workqueue = NULL;
974 dc_deinit_callbacks(adev->dm.dc);
976 if (adev->dm.dc->ctx->dmub_srv) {
977 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
978 adev->dm.dc->ctx->dmub_srv = NULL;
981 if (adev->dm.dmub_bo)
982 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
983 &adev->dm.dmub_bo_gpu_addr,
984 &adev->dm.dmub_bo_cpu_addr);
986 /* DC Destroy TODO: Replace destroy DAL */
988 dc_destroy(&adev->dm.dc);
990 * TODO: pageflip, vlank interrupt
992 * amdgpu_dm_irq_fini(adev);
995 if (adev->dm.cgs_device) {
996 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
997 adev->dm.cgs_device = NULL;
999 if (adev->dm.freesync_module) {
1000 mod_freesync_destroy(adev->dm.freesync_module);
1001 adev->dm.freesync_module = NULL;
1004 mutex_destroy(&adev->dm.audio_lock);
1005 mutex_destroy(&adev->dm.dc_lock);
1010 static int load_dmcu_fw(struct amdgpu_device *adev)
1012 const char *fw_name_dmcu = NULL;
1014 const struct dmcu_firmware_header_v1_0 *hdr;
1016 switch(adev->asic_type) {
1026 case CHIP_POLARIS11:
1027 case CHIP_POLARIS10:
1028 case CHIP_POLARIS12:
1039 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1040 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1041 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1042 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1047 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1051 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1052 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1056 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1058 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1059 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1060 adev->dm.fw_dmcu = NULL;
1064 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1069 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1071 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1073 release_firmware(adev->dm.fw_dmcu);
1074 adev->dm.fw_dmcu = NULL;
1078 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1079 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1080 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1081 adev->firmware.fw_size +=
1082 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1084 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1085 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1086 adev->firmware.fw_size +=
1087 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1089 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1091 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1096 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1098 struct amdgpu_device *adev = ctx;
1100 return dm_read_reg(adev->dm.dc->ctx, address);
1103 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1106 struct amdgpu_device *adev = ctx;
1108 return dm_write_reg(adev->dm.dc->ctx, address, value);
1111 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1113 struct dmub_srv_create_params create_params;
1114 const struct dmcub_firmware_header_v1_0 *hdr;
1115 const char *fw_name_dmub;
1116 enum dmub_asic dmub_asic;
1117 enum dmub_status status;
1120 switch (adev->asic_type) {
1122 dmub_asic = DMUB_ASIC_DCN21;
1123 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1127 /* ASIC doesn't support DMUB. */
1131 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1132 if (!adev->dm.dmub_srv) {
1133 DRM_ERROR("Failed to allocate DMUB service!\n");
1137 memset(&create_params, 0, sizeof(create_params));
1138 create_params.user_ctx = adev;
1139 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1140 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1141 create_params.asic = dmub_asic;
1143 status = dmub_srv_create(adev->dm.dmub_srv, &create_params);
1144 if (status != DMUB_STATUS_OK) {
1145 DRM_ERROR("Error creating DMUB service: %d\n", status);
1149 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1151 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1155 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1157 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1161 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1162 DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
1166 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1167 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1168 AMDGPU_UCODE_ID_DMCUB;
1169 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
1170 adev->firmware.fw_size +=
1171 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1173 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1175 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1176 adev->dm.dmcub_fw_version);
1181 static int dm_sw_init(void *handle)
1183 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1186 r = dm_dmub_sw_init(adev);
1190 return load_dmcu_fw(adev);
1193 static int dm_sw_fini(void *handle)
1195 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1197 if (adev->dm.dmub_srv) {
1198 dmub_srv_destroy(adev->dm.dmub_srv);
1199 adev->dm.dmub_srv = NULL;
1202 if (adev->dm.dmub_fw) {
1203 release_firmware(adev->dm.dmub_fw);
1204 adev->dm.dmub_fw = NULL;
1207 if(adev->dm.fw_dmcu) {
1208 release_firmware(adev->dm.fw_dmcu);
1209 adev->dm.fw_dmcu = NULL;
1215 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1217 struct amdgpu_dm_connector *aconnector;
1218 struct drm_connector *connector;
1219 struct drm_connector_list_iter iter;
1222 drm_connector_list_iter_begin(dev, &iter);
1223 drm_for_each_connector_iter(connector, &iter) {
1224 aconnector = to_amdgpu_dm_connector(connector);
1225 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1226 aconnector->mst_mgr.aux) {
1227 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1229 aconnector->base.base.id);
1231 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1233 DRM_ERROR("DM_MST: Failed to start MST\n");
1234 aconnector->dc_link->type =
1235 dc_connection_single;
1240 drm_connector_list_iter_end(&iter);
1245 static int dm_late_init(void *handle)
1247 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1249 struct dmcu_iram_parameters params;
1250 unsigned int linear_lut[16];
1252 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1255 for (i = 0; i < 16; i++)
1256 linear_lut[i] = 0xFFFF * i / 15;
1259 params.backlight_ramping_start = 0xCCCC;
1260 params.backlight_ramping_reduction = 0xCCCCCCCC;
1261 params.backlight_lut_array_size = 16;
1262 params.backlight_lut_array = linear_lut;
1264 /* Min backlight level after ABM reduction, Don't allow below 1%
1265 * 0xFFFF x 0.01 = 0x28F
1267 params.min_abm_backlight = 0x28F;
1269 /* todo will enable for navi10 */
1270 if (adev->asic_type <= CHIP_RAVEN) {
1271 ret = dmcu_load_iram(dmcu, params);
1277 return detect_mst_link_for_all_connectors(adev->ddev);
1280 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1282 struct amdgpu_dm_connector *aconnector;
1283 struct drm_connector *connector;
1284 struct drm_connector_list_iter iter;
1285 struct drm_dp_mst_topology_mgr *mgr;
1287 bool need_hotplug = false;
1289 drm_connector_list_iter_begin(dev, &iter);
1290 drm_for_each_connector_iter(connector, &iter) {
1291 aconnector = to_amdgpu_dm_connector(connector);
1292 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1293 aconnector->mst_port)
1296 mgr = &aconnector->mst_mgr;
1299 drm_dp_mst_topology_mgr_suspend(mgr);
1301 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1303 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1304 need_hotplug = true;
1308 drm_connector_list_iter_end(&iter);
1311 drm_kms_helper_hotplug_event(dev);
1315 * dm_hw_init() - Initialize DC device
1316 * @handle: The base driver device containing the amdgpu_dm device.
1318 * Initialize the &struct amdgpu_display_manager device. This involves calling
1319 * the initializers of each DM component, then populating the struct with them.
1321 * Although the function implies hardware initialization, both hardware and
1322 * software are initialized here. Splitting them out to their relevant init
1323 * hooks is a future TODO item.
1325 * Some notable things that are initialized here:
1327 * - Display Core, both software and hardware
1328 * - DC modules that we need (freesync and color management)
1329 * - DRM software states
1330 * - Interrupt sources and handlers
1332 * - Debug FS entries, if enabled
1334 static int dm_hw_init(void *handle)
1336 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1337 /* Create DAL display manager */
1338 amdgpu_dm_init(adev);
1339 amdgpu_dm_hpd_init(adev);
1345 * dm_hw_fini() - Teardown DC device
1346 * @handle: The base driver device containing the amdgpu_dm device.
1348 * Teardown components within &struct amdgpu_display_manager that require
1349 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1350 * were loaded. Also flush IRQ workqueues and disable them.
1352 static int dm_hw_fini(void *handle)
1354 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1356 amdgpu_dm_hpd_fini(adev);
1358 amdgpu_dm_irq_fini(adev);
1359 amdgpu_dm_fini(adev);
1363 static int dm_suspend(void *handle)
1365 struct amdgpu_device *adev = handle;
1366 struct amdgpu_display_manager *dm = &adev->dm;
1369 WARN_ON(adev->dm.cached_state);
1370 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1372 s3_handle_mst(adev->ddev, true);
1374 amdgpu_dm_irq_suspend(adev);
1377 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1382 static struct amdgpu_dm_connector *
1383 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1384 struct drm_crtc *crtc)
1387 struct drm_connector_state *new_con_state;
1388 struct drm_connector *connector;
1389 struct drm_crtc *crtc_from_state;
1391 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1392 crtc_from_state = new_con_state->crtc;
1394 if (crtc_from_state == crtc)
1395 return to_amdgpu_dm_connector(connector);
1401 static void emulated_link_detect(struct dc_link *link)
1403 struct dc_sink_init_data sink_init_data = { 0 };
1404 struct display_sink_capability sink_caps = { 0 };
1405 enum dc_edid_status edid_status;
1406 struct dc_context *dc_ctx = link->ctx;
1407 struct dc_sink *sink = NULL;
1408 struct dc_sink *prev_sink = NULL;
1410 link->type = dc_connection_none;
1411 prev_sink = link->local_sink;
1413 if (prev_sink != NULL)
1414 dc_sink_retain(prev_sink);
1416 switch (link->connector_signal) {
1417 case SIGNAL_TYPE_HDMI_TYPE_A: {
1418 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1419 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1423 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1424 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1425 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1429 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1430 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1431 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1435 case SIGNAL_TYPE_LVDS: {
1436 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1437 sink_caps.signal = SIGNAL_TYPE_LVDS;
1441 case SIGNAL_TYPE_EDP: {
1442 sink_caps.transaction_type =
1443 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1444 sink_caps.signal = SIGNAL_TYPE_EDP;
1448 case SIGNAL_TYPE_DISPLAY_PORT: {
1449 sink_caps.transaction_type =
1450 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1451 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1456 DC_ERROR("Invalid connector type! signal:%d\n",
1457 link->connector_signal);
1461 sink_init_data.link = link;
1462 sink_init_data.sink_signal = sink_caps.signal;
1464 sink = dc_sink_create(&sink_init_data);
1466 DC_ERROR("Failed to create sink!\n");
1470 /* dc_sink_create returns a new reference */
1471 link->local_sink = sink;
1473 edid_status = dm_helpers_read_local_edid(
1478 if (edid_status != EDID_OK)
1479 DC_ERROR("Failed to read EDID");
1483 static int dm_resume(void *handle)
1485 struct amdgpu_device *adev = handle;
1486 struct drm_device *ddev = adev->ddev;
1487 struct amdgpu_display_manager *dm = &adev->dm;
1488 struct amdgpu_dm_connector *aconnector;
1489 struct drm_connector *connector;
1490 struct drm_connector_list_iter iter;
1491 struct drm_crtc *crtc;
1492 struct drm_crtc_state *new_crtc_state;
1493 struct dm_crtc_state *dm_new_crtc_state;
1494 struct drm_plane *plane;
1495 struct drm_plane_state *new_plane_state;
1496 struct dm_plane_state *dm_new_plane_state;
1497 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1498 enum dc_connection_type new_connection_type = dc_connection_none;
1501 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1502 dc_release_state(dm_state->context);
1503 dm_state->context = dc_create_state(dm->dc);
1504 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1505 dc_resource_state_construct(dm->dc, dm_state->context);
1507 /* power on hardware */
1508 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1510 /* program HPD filter */
1514 * early enable HPD Rx IRQ, should be done before set mode as short
1515 * pulse interrupts are used for MST
1517 amdgpu_dm_irq_resume_early(adev);
1519 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1520 s3_handle_mst(ddev, false);
1523 drm_connector_list_iter_begin(ddev, &iter);
1524 drm_for_each_connector_iter(connector, &iter) {
1525 aconnector = to_amdgpu_dm_connector(connector);
1528 * this is the case when traversing through already created
1529 * MST connectors, should be skipped
1531 if (aconnector->mst_port)
1534 mutex_lock(&aconnector->hpd_lock);
1535 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1536 DRM_ERROR("KMS: Failed to detect connector\n");
1538 if (aconnector->base.force && new_connection_type == dc_connection_none)
1539 emulated_link_detect(aconnector->dc_link);
1541 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1543 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1544 aconnector->fake_enable = false;
1546 if (aconnector->dc_sink)
1547 dc_sink_release(aconnector->dc_sink);
1548 aconnector->dc_sink = NULL;
1549 amdgpu_dm_update_connector_after_detect(aconnector);
1550 mutex_unlock(&aconnector->hpd_lock);
1552 drm_connector_list_iter_end(&iter);
1554 /* Force mode set in atomic commit */
1555 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1556 new_crtc_state->active_changed = true;
1559 * atomic_check is expected to create the dc states. We need to release
1560 * them here, since they were duplicated as part of the suspend
1563 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1564 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1565 if (dm_new_crtc_state->stream) {
1566 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1567 dc_stream_release(dm_new_crtc_state->stream);
1568 dm_new_crtc_state->stream = NULL;
1572 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1573 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1574 if (dm_new_plane_state->dc_state) {
1575 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1576 dc_plane_state_release(dm_new_plane_state->dc_state);
1577 dm_new_plane_state->dc_state = NULL;
1581 drm_atomic_helper_resume(ddev, dm->cached_state);
1583 dm->cached_state = NULL;
1585 amdgpu_dm_irq_resume_late(adev);
1593 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1594 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1595 * the base driver's device list to be initialized and torn down accordingly.
1597 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1600 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1602 .early_init = dm_early_init,
1603 .late_init = dm_late_init,
1604 .sw_init = dm_sw_init,
1605 .sw_fini = dm_sw_fini,
1606 .hw_init = dm_hw_init,
1607 .hw_fini = dm_hw_fini,
1608 .suspend = dm_suspend,
1609 .resume = dm_resume,
1610 .is_idle = dm_is_idle,
1611 .wait_for_idle = dm_wait_for_idle,
1612 .check_soft_reset = dm_check_soft_reset,
1613 .soft_reset = dm_soft_reset,
1614 .set_clockgating_state = dm_set_clockgating_state,
1615 .set_powergating_state = dm_set_powergating_state,
1618 const struct amdgpu_ip_block_version dm_ip_block =
1620 .type = AMD_IP_BLOCK_TYPE_DCE,
1624 .funcs = &amdgpu_dm_funcs,
1634 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1635 .fb_create = amdgpu_display_user_framebuffer_create,
1636 .output_poll_changed = drm_fb_helper_output_poll_changed,
1637 .atomic_check = amdgpu_dm_atomic_check,
1638 .atomic_commit = amdgpu_dm_atomic_commit,
1641 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1642 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1646 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1648 struct drm_connector *connector = &aconnector->base;
1649 struct drm_device *dev = connector->dev;
1650 struct dc_sink *sink;
1652 /* MST handled by drm_mst framework */
1653 if (aconnector->mst_mgr.mst_state == true)
1657 sink = aconnector->dc_link->local_sink;
1659 dc_sink_retain(sink);
1662 * Edid mgmt connector gets first update only in mode_valid hook and then
1663 * the connector sink is set to either fake or physical sink depends on link status.
1664 * Skip if already done during boot.
1666 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1667 && aconnector->dc_em_sink) {
1670 * For S3 resume with headless use eml_sink to fake stream
1671 * because on resume connector->sink is set to NULL
1673 mutex_lock(&dev->mode_config.mutex);
1676 if (aconnector->dc_sink) {
1677 amdgpu_dm_update_freesync_caps(connector, NULL);
1679 * retain and release below are used to
1680 * bump up refcount for sink because the link doesn't point
1681 * to it anymore after disconnect, so on next crtc to connector
1682 * reshuffle by UMD we will get into unwanted dc_sink release
1684 dc_sink_release(aconnector->dc_sink);
1686 aconnector->dc_sink = sink;
1687 dc_sink_retain(aconnector->dc_sink);
1688 amdgpu_dm_update_freesync_caps(connector,
1691 amdgpu_dm_update_freesync_caps(connector, NULL);
1692 if (!aconnector->dc_sink) {
1693 aconnector->dc_sink = aconnector->dc_em_sink;
1694 dc_sink_retain(aconnector->dc_sink);
1698 mutex_unlock(&dev->mode_config.mutex);
1701 dc_sink_release(sink);
1706 * TODO: temporary guard to look for proper fix
1707 * if this sink is MST sink, we should not do anything
1709 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1710 dc_sink_release(sink);
1714 if (aconnector->dc_sink == sink) {
1716 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1719 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1720 aconnector->connector_id);
1722 dc_sink_release(sink);
1726 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1727 aconnector->connector_id, aconnector->dc_sink, sink);
1729 mutex_lock(&dev->mode_config.mutex);
1732 * 1. Update status of the drm connector
1733 * 2. Send an event and let userspace tell us what to do
1737 * TODO: check if we still need the S3 mode update workaround.
1738 * If yes, put it here.
1740 if (aconnector->dc_sink)
1741 amdgpu_dm_update_freesync_caps(connector, NULL);
1743 aconnector->dc_sink = sink;
1744 dc_sink_retain(aconnector->dc_sink);
1745 if (sink->dc_edid.length == 0) {
1746 aconnector->edid = NULL;
1747 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1750 (struct edid *) sink->dc_edid.raw_edid;
1753 drm_connector_update_edid_property(connector,
1755 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1758 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1761 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1762 amdgpu_dm_update_freesync_caps(connector, NULL);
1763 drm_connector_update_edid_property(connector, NULL);
1764 aconnector->num_modes = 0;
1765 dc_sink_release(aconnector->dc_sink);
1766 aconnector->dc_sink = NULL;
1767 aconnector->edid = NULL;
1768 #ifdef CONFIG_DRM_AMD_DC_HDCP
1769 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1770 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1771 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1775 mutex_unlock(&dev->mode_config.mutex);
1778 dc_sink_release(sink);
1781 static void handle_hpd_irq(void *param)
1783 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1784 struct drm_connector *connector = &aconnector->base;
1785 struct drm_device *dev = connector->dev;
1786 enum dc_connection_type new_connection_type = dc_connection_none;
1787 #ifdef CONFIG_DRM_AMD_DC_HDCP
1788 struct amdgpu_device *adev = dev->dev_private;
1792 * In case of failure or MST no need to update connector status or notify the OS
1793 * since (for MST case) MST does this in its own context.
1795 mutex_lock(&aconnector->hpd_lock);
1797 #ifdef CONFIG_DRM_AMD_DC_HDCP
1798 if (adev->asic_type >= CHIP_RAVEN)
1799 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
1801 if (aconnector->fake_enable)
1802 aconnector->fake_enable = false;
1804 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1805 DRM_ERROR("KMS: Failed to detect connector\n");
1807 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1808 emulated_link_detect(aconnector->dc_link);
1811 drm_modeset_lock_all(dev);
1812 dm_restore_drm_connector_state(dev, connector);
1813 drm_modeset_unlock_all(dev);
1815 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1816 drm_kms_helper_hotplug_event(dev);
1818 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1819 amdgpu_dm_update_connector_after_detect(aconnector);
1822 drm_modeset_lock_all(dev);
1823 dm_restore_drm_connector_state(dev, connector);
1824 drm_modeset_unlock_all(dev);
1826 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1827 drm_kms_helper_hotplug_event(dev);
1829 mutex_unlock(&aconnector->hpd_lock);
1833 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1835 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1837 bool new_irq_handled = false;
1839 int dpcd_bytes_to_read;
1841 const int max_process_count = 30;
1842 int process_count = 0;
1844 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1846 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1847 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1848 /* DPCD 0x200 - 0x201 for downstream IRQ */
1849 dpcd_addr = DP_SINK_COUNT;
1851 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1852 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1853 dpcd_addr = DP_SINK_COUNT_ESI;
1856 dret = drm_dp_dpcd_read(
1857 &aconnector->dm_dp_aux.aux,
1860 dpcd_bytes_to_read);
1862 while (dret == dpcd_bytes_to_read &&
1863 process_count < max_process_count) {
1869 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1870 /* handle HPD short pulse irq */
1871 if (aconnector->mst_mgr.mst_state)
1873 &aconnector->mst_mgr,
1877 if (new_irq_handled) {
1878 /* ACK at DPCD to notify down stream */
1879 const int ack_dpcd_bytes_to_write =
1880 dpcd_bytes_to_read - 1;
1882 for (retry = 0; retry < 3; retry++) {
1885 wret = drm_dp_dpcd_write(
1886 &aconnector->dm_dp_aux.aux,
1889 ack_dpcd_bytes_to_write);
1890 if (wret == ack_dpcd_bytes_to_write)
1894 /* check if there is new irq to be handled */
1895 dret = drm_dp_dpcd_read(
1896 &aconnector->dm_dp_aux.aux,
1899 dpcd_bytes_to_read);
1901 new_irq_handled = false;
1907 if (process_count == max_process_count)
1908 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1911 static void handle_hpd_rx_irq(void *param)
1913 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1914 struct drm_connector *connector = &aconnector->base;
1915 struct drm_device *dev = connector->dev;
1916 struct dc_link *dc_link = aconnector->dc_link;
1917 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1918 enum dc_connection_type new_connection_type = dc_connection_none;
1919 #ifdef CONFIG_DRM_AMD_DC_HDCP
1920 union hpd_irq_data hpd_irq_data;
1921 struct amdgpu_device *adev = dev->dev_private;
1923 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
1927 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1928 * conflict, after implement i2c helper, this mutex should be
1931 if (dc_link->type != dc_connection_mst_branch)
1932 mutex_lock(&aconnector->hpd_lock);
1935 #ifdef CONFIG_DRM_AMD_DC_HDCP
1936 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
1938 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1940 !is_mst_root_connector) {
1941 /* Downstream Port status changed. */
1942 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1943 DRM_ERROR("KMS: Failed to detect connector\n");
1945 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1946 emulated_link_detect(dc_link);
1948 if (aconnector->fake_enable)
1949 aconnector->fake_enable = false;
1951 amdgpu_dm_update_connector_after_detect(aconnector);
1954 drm_modeset_lock_all(dev);
1955 dm_restore_drm_connector_state(dev, connector);
1956 drm_modeset_unlock_all(dev);
1958 drm_kms_helper_hotplug_event(dev);
1959 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1961 if (aconnector->fake_enable)
1962 aconnector->fake_enable = false;
1964 amdgpu_dm_update_connector_after_detect(aconnector);
1967 drm_modeset_lock_all(dev);
1968 dm_restore_drm_connector_state(dev, connector);
1969 drm_modeset_unlock_all(dev);
1971 drm_kms_helper_hotplug_event(dev);
1974 #ifdef CONFIG_DRM_AMD_DC_HDCP
1975 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
1976 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
1978 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1979 (dc_link->type == dc_connection_mst_branch))
1980 dm_handle_hpd_rx_irq(aconnector);
1982 if (dc_link->type != dc_connection_mst_branch) {
1983 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1984 mutex_unlock(&aconnector->hpd_lock);
1988 static void register_hpd_handlers(struct amdgpu_device *adev)
1990 struct drm_device *dev = adev->ddev;
1991 struct drm_connector *connector;
1992 struct amdgpu_dm_connector *aconnector;
1993 const struct dc_link *dc_link;
1994 struct dc_interrupt_params int_params = {0};
1996 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1997 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1999 list_for_each_entry(connector,
2000 &dev->mode_config.connector_list, head) {
2002 aconnector = to_amdgpu_dm_connector(connector);
2003 dc_link = aconnector->dc_link;
2005 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2006 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2007 int_params.irq_source = dc_link->irq_source_hpd;
2009 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2011 (void *) aconnector);
2014 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2016 /* Also register for DP short pulse (hpd_rx). */
2017 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2018 int_params.irq_source = dc_link->irq_source_hpd_rx;
2020 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2022 (void *) aconnector);
2027 /* Register IRQ sources and initialize IRQ callbacks */
2028 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2030 struct dc *dc = adev->dm.dc;
2031 struct common_irq_params *c_irq_params;
2032 struct dc_interrupt_params int_params = {0};
2035 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2037 if (adev->asic_type >= CHIP_VEGA10)
2038 client_id = SOC15_IH_CLIENTID_DCE;
2040 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2041 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2044 * Actions of amdgpu_irq_add_id():
2045 * 1. Register a set() function with base driver.
2046 * Base driver will call set() function to enable/disable an
2047 * interrupt in DC hardware.
2048 * 2. Register amdgpu_dm_irq_handler().
2049 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2050 * coming from DC hardware.
2051 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2052 * for acknowledging and handling. */
2054 /* Use VBLANK interrupt */
2055 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2056 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2058 DRM_ERROR("Failed to add crtc irq id!\n");
2062 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2063 int_params.irq_source =
2064 dc_interrupt_to_irq_source(dc, i, 0);
2066 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2068 c_irq_params->adev = adev;
2069 c_irq_params->irq_src = int_params.irq_source;
2071 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2072 dm_crtc_high_irq, c_irq_params);
2075 /* Use VUPDATE interrupt */
2076 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2077 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2079 DRM_ERROR("Failed to add vupdate irq id!\n");
2083 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2084 int_params.irq_source =
2085 dc_interrupt_to_irq_source(dc, i, 0);
2087 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2089 c_irq_params->adev = adev;
2090 c_irq_params->irq_src = int_params.irq_source;
2092 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2093 dm_vupdate_high_irq, c_irq_params);
2096 /* Use GRPH_PFLIP interrupt */
2097 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2098 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2099 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2101 DRM_ERROR("Failed to add page flip irq id!\n");
2105 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2106 int_params.irq_source =
2107 dc_interrupt_to_irq_source(dc, i, 0);
2109 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2111 c_irq_params->adev = adev;
2112 c_irq_params->irq_src = int_params.irq_source;
2114 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2115 dm_pflip_high_irq, c_irq_params);
2120 r = amdgpu_irq_add_id(adev, client_id,
2121 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2123 DRM_ERROR("Failed to add hpd irq id!\n");
2127 register_hpd_handlers(adev);
2132 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2133 /* Register IRQ sources and initialize IRQ callbacks */
2134 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2136 struct dc *dc = adev->dm.dc;
2137 struct common_irq_params *c_irq_params;
2138 struct dc_interrupt_params int_params = {0};
2142 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2143 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2146 * Actions of amdgpu_irq_add_id():
2147 * 1. Register a set() function with base driver.
2148 * Base driver will call set() function to enable/disable an
2149 * interrupt in DC hardware.
2150 * 2. Register amdgpu_dm_irq_handler().
2151 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2152 * coming from DC hardware.
2153 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2154 * for acknowledging and handling.
2157 /* Use VSTARTUP interrupt */
2158 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2159 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2161 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2164 DRM_ERROR("Failed to add crtc irq id!\n");
2168 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2169 int_params.irq_source =
2170 dc_interrupt_to_irq_source(dc, i, 0);
2172 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2174 c_irq_params->adev = adev;
2175 c_irq_params->irq_src = int_params.irq_source;
2177 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2178 dm_crtc_high_irq, c_irq_params);
2181 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2182 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2183 * to trigger at end of each vblank, regardless of state of the lock,
2184 * matching DCE behaviour.
2186 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2187 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2189 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2192 DRM_ERROR("Failed to add vupdate irq id!\n");
2196 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2197 int_params.irq_source =
2198 dc_interrupt_to_irq_source(dc, i, 0);
2200 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2202 c_irq_params->adev = adev;
2203 c_irq_params->irq_src = int_params.irq_source;
2205 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2206 dm_vupdate_high_irq, c_irq_params);
2209 /* Use GRPH_PFLIP interrupt */
2210 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2211 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2213 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2215 DRM_ERROR("Failed to add page flip irq id!\n");
2219 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2220 int_params.irq_source =
2221 dc_interrupt_to_irq_source(dc, i, 0);
2223 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2225 c_irq_params->adev = adev;
2226 c_irq_params->irq_src = int_params.irq_source;
2228 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2229 dm_pflip_high_irq, c_irq_params);
2234 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2237 DRM_ERROR("Failed to add hpd irq id!\n");
2241 register_hpd_handlers(adev);
2248 * Acquires the lock for the atomic state object and returns
2249 * the new atomic state.
2251 * This should only be called during atomic check.
2253 static int dm_atomic_get_state(struct drm_atomic_state *state,
2254 struct dm_atomic_state **dm_state)
2256 struct drm_device *dev = state->dev;
2257 struct amdgpu_device *adev = dev->dev_private;
2258 struct amdgpu_display_manager *dm = &adev->dm;
2259 struct drm_private_state *priv_state;
2264 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2265 if (IS_ERR(priv_state))
2266 return PTR_ERR(priv_state);
2268 *dm_state = to_dm_atomic_state(priv_state);
2273 struct dm_atomic_state *
2274 dm_atomic_get_new_state(struct drm_atomic_state *state)
2276 struct drm_device *dev = state->dev;
2277 struct amdgpu_device *adev = dev->dev_private;
2278 struct amdgpu_display_manager *dm = &adev->dm;
2279 struct drm_private_obj *obj;
2280 struct drm_private_state *new_obj_state;
2283 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2284 if (obj->funcs == dm->atomic_obj.funcs)
2285 return to_dm_atomic_state(new_obj_state);
2291 struct dm_atomic_state *
2292 dm_atomic_get_old_state(struct drm_atomic_state *state)
2294 struct drm_device *dev = state->dev;
2295 struct amdgpu_device *adev = dev->dev_private;
2296 struct amdgpu_display_manager *dm = &adev->dm;
2297 struct drm_private_obj *obj;
2298 struct drm_private_state *old_obj_state;
2301 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2302 if (obj->funcs == dm->atomic_obj.funcs)
2303 return to_dm_atomic_state(old_obj_state);
2309 static struct drm_private_state *
2310 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2312 struct dm_atomic_state *old_state, *new_state;
2314 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2318 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2320 old_state = to_dm_atomic_state(obj->state);
2322 if (old_state && old_state->context)
2323 new_state->context = dc_copy_state(old_state->context);
2325 if (!new_state->context) {
2330 return &new_state->base;
2333 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2334 struct drm_private_state *state)
2336 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2338 if (dm_state && dm_state->context)
2339 dc_release_state(dm_state->context);
2344 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2345 .atomic_duplicate_state = dm_atomic_duplicate_state,
2346 .atomic_destroy_state = dm_atomic_destroy_state,
2349 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2351 struct dm_atomic_state *state;
2354 adev->mode_info.mode_config_initialized = true;
2356 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2357 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2359 adev->ddev->mode_config.max_width = 16384;
2360 adev->ddev->mode_config.max_height = 16384;
2362 adev->ddev->mode_config.preferred_depth = 24;
2363 adev->ddev->mode_config.prefer_shadow = 1;
2364 /* indicates support for immediate flip */
2365 adev->ddev->mode_config.async_page_flip = true;
2367 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2369 state = kzalloc(sizeof(*state), GFP_KERNEL);
2373 state->context = dc_create_state(adev->dm.dc);
2374 if (!state->context) {
2379 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2381 drm_atomic_private_obj_init(adev->ddev,
2382 &adev->dm.atomic_obj,
2384 &dm_atomic_state_funcs);
2386 r = amdgpu_display_modeset_create_props(adev);
2390 r = amdgpu_dm_audio_init(adev);
2397 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2398 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2400 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2401 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2403 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2405 #if defined(CONFIG_ACPI)
2406 struct amdgpu_dm_backlight_caps caps;
2408 if (dm->backlight_caps.caps_valid)
2411 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2412 if (caps.caps_valid) {
2413 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2414 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2415 dm->backlight_caps.caps_valid = true;
2417 dm->backlight_caps.min_input_signal =
2418 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2419 dm->backlight_caps.max_input_signal =
2420 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2423 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2424 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2428 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2430 struct amdgpu_display_manager *dm = bl_get_data(bd);
2431 struct amdgpu_dm_backlight_caps caps;
2432 uint32_t brightness = bd->props.brightness;
2434 amdgpu_dm_update_backlight_caps(dm);
2435 caps = dm->backlight_caps;
2437 * The brightness input is in the range 0-255
2438 * It needs to be rescaled to be between the
2439 * requested min and max input signal
2441 * It also needs to be scaled up by 0x101 to
2442 * match the DC interface which has a range of
2448 * (caps.max_input_signal - caps.min_input_signal)
2449 / AMDGPU_MAX_BL_LEVEL
2450 + caps.min_input_signal * 0x101;
2452 if (dc_link_set_backlight_level(dm->backlight_link,
2459 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2461 struct amdgpu_display_manager *dm = bl_get_data(bd);
2462 int ret = dc_link_get_backlight_level(dm->backlight_link);
2464 if (ret == DC_ERROR_UNEXPECTED)
2465 return bd->props.brightness;
2469 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2470 .options = BL_CORE_SUSPENDRESUME,
2471 .get_brightness = amdgpu_dm_backlight_get_brightness,
2472 .update_status = amdgpu_dm_backlight_update_status,
2476 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2479 struct backlight_properties props = { 0 };
2481 amdgpu_dm_update_backlight_caps(dm);
2483 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2484 props.brightness = AMDGPU_MAX_BL_LEVEL;
2485 props.type = BACKLIGHT_RAW;
2487 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2488 dm->adev->ddev->primary->index);
2490 dm->backlight_dev = backlight_device_register(bl_name,
2491 dm->adev->ddev->dev,
2493 &amdgpu_dm_backlight_ops,
2496 if (IS_ERR(dm->backlight_dev))
2497 DRM_ERROR("DM: Backlight registration failed!\n");
2499 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2504 static int initialize_plane(struct amdgpu_display_manager *dm,
2505 struct amdgpu_mode_info *mode_info, int plane_id,
2506 enum drm_plane_type plane_type,
2507 const struct dc_plane_cap *plane_cap)
2509 struct drm_plane *plane;
2510 unsigned long possible_crtcs;
2513 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2515 DRM_ERROR("KMS: Failed to allocate plane\n");
2518 plane->type = plane_type;
2521 * HACK: IGT tests expect that the primary plane for a CRTC
2522 * can only have one possible CRTC. Only expose support for
2523 * any CRTC if they're not going to be used as a primary plane
2524 * for a CRTC - like overlay or underlay planes.
2526 possible_crtcs = 1 << plane_id;
2527 if (plane_id >= dm->dc->caps.max_streams)
2528 possible_crtcs = 0xff;
2530 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2533 DRM_ERROR("KMS: Failed to initialize plane\n");
2539 mode_info->planes[plane_id] = plane;
2545 static void register_backlight_device(struct amdgpu_display_manager *dm,
2546 struct dc_link *link)
2548 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2549 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2551 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2552 link->type != dc_connection_none) {
2554 * Event if registration failed, we should continue with
2555 * DM initialization because not having a backlight control
2556 * is better then a black screen.
2558 amdgpu_dm_register_backlight_device(dm);
2560 if (dm->backlight_dev)
2561 dm->backlight_link = link;
2568 * In this architecture, the association
2569 * connector -> encoder -> crtc
2570 * id not really requried. The crtc and connector will hold the
2571 * display_index as an abstraction to use with DAL component
2573 * Returns 0 on success
2575 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2577 struct amdgpu_display_manager *dm = &adev->dm;
2579 struct amdgpu_dm_connector *aconnector = NULL;
2580 struct amdgpu_encoder *aencoder = NULL;
2581 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2583 int32_t primary_planes;
2584 enum dc_connection_type new_connection_type = dc_connection_none;
2585 const struct dc_plane_cap *plane;
2587 link_cnt = dm->dc->caps.max_links;
2588 if (amdgpu_dm_mode_config_init(dm->adev)) {
2589 DRM_ERROR("DM: Failed to initialize mode config\n");
2593 /* There is one primary plane per CRTC */
2594 primary_planes = dm->dc->caps.max_streams;
2595 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2598 * Initialize primary planes, implicit planes for legacy IOCTLS.
2599 * Order is reversed to match iteration order in atomic check.
2601 for (i = (primary_planes - 1); i >= 0; i--) {
2602 plane = &dm->dc->caps.planes[i];
2604 if (initialize_plane(dm, mode_info, i,
2605 DRM_PLANE_TYPE_PRIMARY, plane)) {
2606 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2612 * Initialize overlay planes, index starting after primary planes.
2613 * These planes have a higher DRM index than the primary planes since
2614 * they should be considered as having a higher z-order.
2615 * Order is reversed to match iteration order in atomic check.
2617 * Only support DCN for now, and only expose one so we don't encourage
2618 * userspace to use up all the pipes.
2620 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2621 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2623 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2626 if (!plane->blends_with_above || !plane->blends_with_below)
2629 if (!plane->pixel_format_support.argb8888)
2632 if (initialize_plane(dm, NULL, primary_planes + i,
2633 DRM_PLANE_TYPE_OVERLAY, plane)) {
2634 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2638 /* Only create one overlay plane. */
2642 for (i = 0; i < dm->dc->caps.max_streams; i++)
2643 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2644 DRM_ERROR("KMS: Failed to initialize crtc\n");
2648 dm->display_indexes_num = dm->dc->caps.max_streams;
2650 /* loops over all connectors on the board */
2651 for (i = 0; i < link_cnt; i++) {
2652 struct dc_link *link = NULL;
2654 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2656 "KMS: Cannot support more than %d display indexes\n",
2657 AMDGPU_DM_MAX_DISPLAY_INDEX);
2661 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2665 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2669 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2670 DRM_ERROR("KMS: Failed to initialize encoder\n");
2674 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2675 DRM_ERROR("KMS: Failed to initialize connector\n");
2679 link = dc_get_link_at_index(dm->dc, i);
2681 if (!dc_link_detect_sink(link, &new_connection_type))
2682 DRM_ERROR("KMS: Failed to detect connector\n");
2684 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2685 emulated_link_detect(link);
2686 amdgpu_dm_update_connector_after_detect(aconnector);
2688 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2689 amdgpu_dm_update_connector_after_detect(aconnector);
2690 register_backlight_device(dm, link);
2691 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2692 amdgpu_dm_set_psr_caps(link);
2698 /* Software is initialized. Now we can register interrupt handlers. */
2699 switch (adev->asic_type) {
2709 case CHIP_POLARIS11:
2710 case CHIP_POLARIS10:
2711 case CHIP_POLARIS12:
2716 if (dce110_register_irq_handlers(dm->adev)) {
2717 DRM_ERROR("DM: Failed to initialize IRQ\n");
2721 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2723 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2728 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2731 if (dcn10_register_irq_handlers(dm->adev)) {
2732 DRM_ERROR("DM: Failed to initialize IRQ\n");
2738 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2742 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2743 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2753 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2755 drm_mode_config_cleanup(dm->ddev);
2756 drm_atomic_private_obj_fini(&dm->atomic_obj);
2760 /******************************************************************************
2761 * amdgpu_display_funcs functions
2762 *****************************************************************************/
2765 * dm_bandwidth_update - program display watermarks
2767 * @adev: amdgpu_device pointer
2769 * Calculate and program the display watermarks and line buffer allocation.
2771 static void dm_bandwidth_update(struct amdgpu_device *adev)
2773 /* TODO: implement later */
2776 static const struct amdgpu_display_funcs dm_display_funcs = {
2777 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2778 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2779 .backlight_set_level = NULL, /* never called for DC */
2780 .backlight_get_level = NULL, /* never called for DC */
2781 .hpd_sense = NULL,/* called unconditionally */
2782 .hpd_set_polarity = NULL, /* called unconditionally */
2783 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2784 .page_flip_get_scanoutpos =
2785 dm_crtc_get_scanoutpos,/* called unconditionally */
2786 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2787 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2790 #if defined(CONFIG_DEBUG_KERNEL_DC)
2792 static ssize_t s3_debug_store(struct device *device,
2793 struct device_attribute *attr,
2799 struct drm_device *drm_dev = dev_get_drvdata(device);
2800 struct amdgpu_device *adev = drm_dev->dev_private;
2802 ret = kstrtoint(buf, 0, &s3_state);
2807 drm_kms_helper_hotplug_event(adev->ddev);
2812 return ret == 0 ? count : 0;
2815 DEVICE_ATTR_WO(s3_debug);
2819 static int dm_early_init(void *handle)
2821 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2823 switch (adev->asic_type) {
2826 adev->mode_info.num_crtc = 6;
2827 adev->mode_info.num_hpd = 6;
2828 adev->mode_info.num_dig = 6;
2831 adev->mode_info.num_crtc = 4;
2832 adev->mode_info.num_hpd = 6;
2833 adev->mode_info.num_dig = 7;
2837 adev->mode_info.num_crtc = 2;
2838 adev->mode_info.num_hpd = 6;
2839 adev->mode_info.num_dig = 6;
2843 adev->mode_info.num_crtc = 6;
2844 adev->mode_info.num_hpd = 6;
2845 adev->mode_info.num_dig = 7;
2848 adev->mode_info.num_crtc = 3;
2849 adev->mode_info.num_hpd = 6;
2850 adev->mode_info.num_dig = 9;
2853 adev->mode_info.num_crtc = 2;
2854 adev->mode_info.num_hpd = 6;
2855 adev->mode_info.num_dig = 9;
2857 case CHIP_POLARIS11:
2858 case CHIP_POLARIS12:
2859 adev->mode_info.num_crtc = 5;
2860 adev->mode_info.num_hpd = 5;
2861 adev->mode_info.num_dig = 5;
2863 case CHIP_POLARIS10:
2865 adev->mode_info.num_crtc = 6;
2866 adev->mode_info.num_hpd = 6;
2867 adev->mode_info.num_dig = 6;
2872 adev->mode_info.num_crtc = 6;
2873 adev->mode_info.num_hpd = 6;
2874 adev->mode_info.num_dig = 6;
2876 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2878 adev->mode_info.num_crtc = 4;
2879 adev->mode_info.num_hpd = 4;
2880 adev->mode_info.num_dig = 4;
2883 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2886 adev->mode_info.num_crtc = 6;
2887 adev->mode_info.num_hpd = 6;
2888 adev->mode_info.num_dig = 6;
2891 adev->mode_info.num_crtc = 5;
2892 adev->mode_info.num_hpd = 5;
2893 adev->mode_info.num_dig = 5;
2896 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2898 adev->mode_info.num_crtc = 4;
2899 adev->mode_info.num_hpd = 4;
2900 adev->mode_info.num_dig = 4;
2904 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2908 amdgpu_dm_set_irq_funcs(adev);
2910 if (adev->mode_info.funcs == NULL)
2911 adev->mode_info.funcs = &dm_display_funcs;
2914 * Note: Do NOT change adev->audio_endpt_rreg and
2915 * adev->audio_endpt_wreg because they are initialised in
2916 * amdgpu_device_init()
2918 #if defined(CONFIG_DEBUG_KERNEL_DC)
2921 &dev_attr_s3_debug);
2927 static bool modeset_required(struct drm_crtc_state *crtc_state,
2928 struct dc_stream_state *new_stream,
2929 struct dc_stream_state *old_stream)
2931 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2934 if (!crtc_state->enable)
2937 return crtc_state->active;
2940 static bool modereset_required(struct drm_crtc_state *crtc_state)
2942 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2945 return !crtc_state->enable || !crtc_state->active;
2948 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2950 drm_encoder_cleanup(encoder);
2954 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2955 .destroy = amdgpu_dm_encoder_destroy,
2959 static int fill_dc_scaling_info(const struct drm_plane_state *state,
2960 struct dc_scaling_info *scaling_info)
2962 int scale_w, scale_h;
2964 memset(scaling_info, 0, sizeof(*scaling_info));
2966 /* Source is fixed 16.16 but we ignore mantissa for now... */
2967 scaling_info->src_rect.x = state->src_x >> 16;
2968 scaling_info->src_rect.y = state->src_y >> 16;
2970 scaling_info->src_rect.width = state->src_w >> 16;
2971 if (scaling_info->src_rect.width == 0)
2974 scaling_info->src_rect.height = state->src_h >> 16;
2975 if (scaling_info->src_rect.height == 0)
2978 scaling_info->dst_rect.x = state->crtc_x;
2979 scaling_info->dst_rect.y = state->crtc_y;
2981 if (state->crtc_w == 0)
2984 scaling_info->dst_rect.width = state->crtc_w;
2986 if (state->crtc_h == 0)
2989 scaling_info->dst_rect.height = state->crtc_h;
2991 /* DRM doesn't specify clipping on destination output. */
2992 scaling_info->clip_rect = scaling_info->dst_rect;
2994 /* TODO: Validate scaling per-format with DC plane caps */
2995 scale_w = scaling_info->dst_rect.width * 1000 /
2996 scaling_info->src_rect.width;
2998 if (scale_w < 250 || scale_w > 16000)
3001 scale_h = scaling_info->dst_rect.height * 1000 /
3002 scaling_info->src_rect.height;
3004 if (scale_h < 250 || scale_h > 16000)
3008 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3009 * assume reasonable defaults based on the format.
3015 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3016 uint64_t *tiling_flags)
3018 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3019 int r = amdgpu_bo_reserve(rbo, false);
3022 /* Don't show error message when returning -ERESTARTSYS */
3023 if (r != -ERESTARTSYS)
3024 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3029 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3031 amdgpu_bo_unreserve(rbo);
3036 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3038 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3040 return offset ? (address + offset * 256) : 0;
3044 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3045 const struct amdgpu_framebuffer *afb,
3046 const enum surface_pixel_format format,
3047 const enum dc_rotation_angle rotation,
3048 const struct plane_size *plane_size,
3049 const union dc_tiling_info *tiling_info,
3050 const uint64_t info,
3051 struct dc_plane_dcc_param *dcc,
3052 struct dc_plane_address *address)
3054 struct dc *dc = adev->dm.dc;
3055 struct dc_dcc_surface_param input;
3056 struct dc_surface_dcc_cap output;
3057 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3058 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3059 uint64_t dcc_address;
3061 memset(&input, 0, sizeof(input));
3062 memset(&output, 0, sizeof(output));
3067 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3070 if (!dc->cap_funcs.get_dcc_compression_cap)
3073 input.format = format;
3074 input.surface_size.width = plane_size->surface_size.width;
3075 input.surface_size.height = plane_size->surface_size.height;
3076 input.swizzle_mode = tiling_info->gfx9.swizzle;
3078 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3079 input.scan = SCAN_DIRECTION_HORIZONTAL;
3080 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3081 input.scan = SCAN_DIRECTION_VERTICAL;
3083 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3086 if (!output.capable)
3089 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3094 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3095 dcc->independent_64b_blks = i64b;
3097 dcc_address = get_dcc_address(afb->address, info);
3098 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3099 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3105 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3106 const struct amdgpu_framebuffer *afb,
3107 const enum surface_pixel_format format,
3108 const enum dc_rotation_angle rotation,
3109 const uint64_t tiling_flags,
3110 union dc_tiling_info *tiling_info,
3111 struct plane_size *plane_size,
3112 struct dc_plane_dcc_param *dcc,
3113 struct dc_plane_address *address)
3115 const struct drm_framebuffer *fb = &afb->base;
3118 memset(tiling_info, 0, sizeof(*tiling_info));
3119 memset(plane_size, 0, sizeof(*plane_size));
3120 memset(dcc, 0, sizeof(*dcc));
3121 memset(address, 0, sizeof(*address));
3123 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3124 plane_size->surface_size.x = 0;
3125 plane_size->surface_size.y = 0;
3126 plane_size->surface_size.width = fb->width;
3127 plane_size->surface_size.height = fb->height;
3128 plane_size->surface_pitch =
3129 fb->pitches[0] / fb->format->cpp[0];
3131 address->type = PLN_ADDR_TYPE_GRAPHICS;
3132 address->grph.addr.low_part = lower_32_bits(afb->address);
3133 address->grph.addr.high_part = upper_32_bits(afb->address);
3134 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3135 uint64_t chroma_addr = afb->address + fb->offsets[1];
3137 plane_size->surface_size.x = 0;
3138 plane_size->surface_size.y = 0;
3139 plane_size->surface_size.width = fb->width;
3140 plane_size->surface_size.height = fb->height;
3141 plane_size->surface_pitch =
3142 fb->pitches[0] / fb->format->cpp[0];
3144 plane_size->chroma_size.x = 0;
3145 plane_size->chroma_size.y = 0;
3146 /* TODO: set these based on surface format */
3147 plane_size->chroma_size.width = fb->width / 2;
3148 plane_size->chroma_size.height = fb->height / 2;
3150 plane_size->chroma_pitch =
3151 fb->pitches[1] / fb->format->cpp[1];
3153 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3154 address->video_progressive.luma_addr.low_part =
3155 lower_32_bits(afb->address);
3156 address->video_progressive.luma_addr.high_part =
3157 upper_32_bits(afb->address);
3158 address->video_progressive.chroma_addr.low_part =
3159 lower_32_bits(chroma_addr);
3160 address->video_progressive.chroma_addr.high_part =
3161 upper_32_bits(chroma_addr);
3164 /* Fill GFX8 params */
3165 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3166 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3168 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3169 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3170 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3171 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3172 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3174 /* XXX fix me for VI */
3175 tiling_info->gfx8.num_banks = num_banks;
3176 tiling_info->gfx8.array_mode =
3177 DC_ARRAY_2D_TILED_THIN1;
3178 tiling_info->gfx8.tile_split = tile_split;
3179 tiling_info->gfx8.bank_width = bankw;
3180 tiling_info->gfx8.bank_height = bankh;
3181 tiling_info->gfx8.tile_aspect = mtaspect;
3182 tiling_info->gfx8.tile_mode =
3183 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3184 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3185 == DC_ARRAY_1D_TILED_THIN1) {
3186 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3189 tiling_info->gfx8.pipe_config =
3190 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3192 if (adev->asic_type == CHIP_VEGA10 ||
3193 adev->asic_type == CHIP_VEGA12 ||
3194 adev->asic_type == CHIP_VEGA20 ||
3195 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
3196 adev->asic_type == CHIP_NAVI10 ||
3197 adev->asic_type == CHIP_NAVI14 ||
3198 adev->asic_type == CHIP_NAVI12 ||
3200 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
3201 adev->asic_type == CHIP_RENOIR ||
3203 adev->asic_type == CHIP_RAVEN) {
3204 /* Fill GFX9 params */
3205 tiling_info->gfx9.num_pipes =
3206 adev->gfx.config.gb_addr_config_fields.num_pipes;
3207 tiling_info->gfx9.num_banks =
3208 adev->gfx.config.gb_addr_config_fields.num_banks;
3209 tiling_info->gfx9.pipe_interleave =
3210 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3211 tiling_info->gfx9.num_shader_engines =
3212 adev->gfx.config.gb_addr_config_fields.num_se;
3213 tiling_info->gfx9.max_compressed_frags =
3214 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3215 tiling_info->gfx9.num_rb_per_se =
3216 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3217 tiling_info->gfx9.swizzle =
3218 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3219 tiling_info->gfx9.shaderEnable = 1;
3221 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3222 plane_size, tiling_info,
3223 tiling_flags, dcc, address);
3232 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3233 bool *per_pixel_alpha, bool *global_alpha,
3234 int *global_alpha_value)
3236 *per_pixel_alpha = false;
3237 *global_alpha = false;
3238 *global_alpha_value = 0xff;
3240 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3243 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3244 static const uint32_t alpha_formats[] = {
3245 DRM_FORMAT_ARGB8888,
3246 DRM_FORMAT_RGBA8888,
3247 DRM_FORMAT_ABGR8888,
3249 uint32_t format = plane_state->fb->format->format;
3252 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3253 if (format == alpha_formats[i]) {
3254 *per_pixel_alpha = true;
3260 if (plane_state->alpha < 0xffff) {
3261 *global_alpha = true;
3262 *global_alpha_value = plane_state->alpha >> 8;
3267 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3268 const enum surface_pixel_format format,
3269 enum dc_color_space *color_space)
3273 *color_space = COLOR_SPACE_SRGB;
3275 /* DRM color properties only affect non-RGB formats. */
3276 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3279 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3281 switch (plane_state->color_encoding) {
3282 case DRM_COLOR_YCBCR_BT601:
3284 *color_space = COLOR_SPACE_YCBCR601;
3286 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3289 case DRM_COLOR_YCBCR_BT709:
3291 *color_space = COLOR_SPACE_YCBCR709;
3293 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3296 case DRM_COLOR_YCBCR_BT2020:
3298 *color_space = COLOR_SPACE_2020_YCBCR;
3311 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3312 const struct drm_plane_state *plane_state,
3313 const uint64_t tiling_flags,
3314 struct dc_plane_info *plane_info,
3315 struct dc_plane_address *address)
3317 const struct drm_framebuffer *fb = plane_state->fb;
3318 const struct amdgpu_framebuffer *afb =
3319 to_amdgpu_framebuffer(plane_state->fb);
3320 struct drm_format_name_buf format_name;
3323 memset(plane_info, 0, sizeof(*plane_info));
3325 switch (fb->format->format) {
3327 plane_info->format =
3328 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3330 case DRM_FORMAT_RGB565:
3331 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3333 case DRM_FORMAT_XRGB8888:
3334 case DRM_FORMAT_ARGB8888:
3335 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3337 case DRM_FORMAT_XRGB2101010:
3338 case DRM_FORMAT_ARGB2101010:
3339 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3341 case DRM_FORMAT_XBGR2101010:
3342 case DRM_FORMAT_ABGR2101010:
3343 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3345 case DRM_FORMAT_XBGR8888:
3346 case DRM_FORMAT_ABGR8888:
3347 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3349 case DRM_FORMAT_NV21:
3350 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3352 case DRM_FORMAT_NV12:
3353 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3357 "Unsupported screen format %s\n",
3358 drm_get_format_name(fb->format->format, &format_name));
3362 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3363 case DRM_MODE_ROTATE_0:
3364 plane_info->rotation = ROTATION_ANGLE_0;
3366 case DRM_MODE_ROTATE_90:
3367 plane_info->rotation = ROTATION_ANGLE_90;
3369 case DRM_MODE_ROTATE_180:
3370 plane_info->rotation = ROTATION_ANGLE_180;
3372 case DRM_MODE_ROTATE_270:
3373 plane_info->rotation = ROTATION_ANGLE_270;
3376 plane_info->rotation = ROTATION_ANGLE_0;
3380 plane_info->visible = true;
3381 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3383 plane_info->layer_index = 0;
3385 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3386 &plane_info->color_space);
3390 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3391 plane_info->rotation, tiling_flags,
3392 &plane_info->tiling_info,
3393 &plane_info->plane_size,
3394 &plane_info->dcc, address);
3398 fill_blending_from_plane_state(
3399 plane_state, &plane_info->per_pixel_alpha,
3400 &plane_info->global_alpha, &plane_info->global_alpha_value);
3405 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3406 struct dc_plane_state *dc_plane_state,
3407 struct drm_plane_state *plane_state,
3408 struct drm_crtc_state *crtc_state)
3410 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3411 const struct amdgpu_framebuffer *amdgpu_fb =
3412 to_amdgpu_framebuffer(plane_state->fb);
3413 struct dc_scaling_info scaling_info;
3414 struct dc_plane_info plane_info;
3415 uint64_t tiling_flags;
3418 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3422 dc_plane_state->src_rect = scaling_info.src_rect;
3423 dc_plane_state->dst_rect = scaling_info.dst_rect;
3424 dc_plane_state->clip_rect = scaling_info.clip_rect;
3425 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3427 ret = get_fb_info(amdgpu_fb, &tiling_flags);
3431 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3433 &dc_plane_state->address);
3437 dc_plane_state->format = plane_info.format;
3438 dc_plane_state->color_space = plane_info.color_space;
3439 dc_plane_state->format = plane_info.format;
3440 dc_plane_state->plane_size = plane_info.plane_size;
3441 dc_plane_state->rotation = plane_info.rotation;
3442 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3443 dc_plane_state->stereo_format = plane_info.stereo_format;
3444 dc_plane_state->tiling_info = plane_info.tiling_info;
3445 dc_plane_state->visible = plane_info.visible;
3446 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3447 dc_plane_state->global_alpha = plane_info.global_alpha;
3448 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3449 dc_plane_state->dcc = plane_info.dcc;
3450 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3453 * Always set input transfer function, since plane state is refreshed
3456 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3463 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3464 const struct dm_connector_state *dm_state,
3465 struct dc_stream_state *stream)
3467 enum amdgpu_rmx_type rmx_type;
3469 struct rect src = { 0 }; /* viewport in composition space*/
3470 struct rect dst = { 0 }; /* stream addressable area */
3472 /* no mode. nothing to be done */
3476 /* Full screen scaling by default */
3477 src.width = mode->hdisplay;
3478 src.height = mode->vdisplay;
3479 dst.width = stream->timing.h_addressable;
3480 dst.height = stream->timing.v_addressable;
3483 rmx_type = dm_state->scaling;
3484 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3485 if (src.width * dst.height <
3486 src.height * dst.width) {
3487 /* height needs less upscaling/more downscaling */
3488 dst.width = src.width *
3489 dst.height / src.height;
3491 /* width needs less upscaling/more downscaling */
3492 dst.height = src.height *
3493 dst.width / src.width;
3495 } else if (rmx_type == RMX_CENTER) {
3499 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3500 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3502 if (dm_state->underscan_enable) {
3503 dst.x += dm_state->underscan_hborder / 2;
3504 dst.y += dm_state->underscan_vborder / 2;
3505 dst.width -= dm_state->underscan_hborder;
3506 dst.height -= dm_state->underscan_vborder;
3513 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3514 dst.x, dst.y, dst.width, dst.height);
3518 static enum dc_color_depth
3519 convert_color_depth_from_display_info(const struct drm_connector *connector,
3520 const struct drm_connector_state *state)
3522 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3524 /* Assume 8 bpc by default if no bpc is specified. */
3525 bpc = bpc ? bpc : 8;
3528 state = connector->state;
3532 * Cap display bpc based on the user requested value.
3534 * The value for state->max_bpc may not correctly updated
3535 * depending on when the connector gets added to the state
3536 * or if this was called outside of atomic check, so it
3537 * can't be used directly.
3539 bpc = min(bpc, state->max_requested_bpc);
3541 /* Round down to the nearest even number. */
3542 bpc = bpc - (bpc & 1);
3548 * Temporary Work around, DRM doesn't parse color depth for
3549 * EDID revision before 1.4
3550 * TODO: Fix edid parsing
3552 return COLOR_DEPTH_888;
3554 return COLOR_DEPTH_666;
3556 return COLOR_DEPTH_888;
3558 return COLOR_DEPTH_101010;
3560 return COLOR_DEPTH_121212;
3562 return COLOR_DEPTH_141414;
3564 return COLOR_DEPTH_161616;
3566 return COLOR_DEPTH_UNDEFINED;
3570 static enum dc_aspect_ratio
3571 get_aspect_ratio(const struct drm_display_mode *mode_in)
3573 /* 1-1 mapping, since both enums follow the HDMI spec. */
3574 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3577 static enum dc_color_space
3578 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3580 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3582 switch (dc_crtc_timing->pixel_encoding) {
3583 case PIXEL_ENCODING_YCBCR422:
3584 case PIXEL_ENCODING_YCBCR444:
3585 case PIXEL_ENCODING_YCBCR420:
3588 * 27030khz is the separation point between HDTV and SDTV
3589 * according to HDMI spec, we use YCbCr709 and YCbCr601
3592 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3593 if (dc_crtc_timing->flags.Y_ONLY)
3595 COLOR_SPACE_YCBCR709_LIMITED;
3597 color_space = COLOR_SPACE_YCBCR709;
3599 if (dc_crtc_timing->flags.Y_ONLY)
3601 COLOR_SPACE_YCBCR601_LIMITED;
3603 color_space = COLOR_SPACE_YCBCR601;
3608 case PIXEL_ENCODING_RGB:
3609 color_space = COLOR_SPACE_SRGB;
3620 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
3622 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3625 timing_out->display_color_depth--;
3628 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
3629 const struct drm_display_info *info)
3632 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3635 normalized_clk = timing_out->pix_clk_100hz / 10;
3636 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3637 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3638 normalized_clk /= 2;
3639 /* Adjusting pix clock following on HDMI spec based on colour depth */
3640 switch (timing_out->display_color_depth) {
3641 case COLOR_DEPTH_101010:
3642 normalized_clk = (normalized_clk * 30) / 24;
3644 case COLOR_DEPTH_121212:
3645 normalized_clk = (normalized_clk * 36) / 24;
3647 case COLOR_DEPTH_161616:
3648 normalized_clk = (normalized_clk * 48) / 24;
3653 if (normalized_clk <= info->max_tmds_clock)
3655 reduce_mode_colour_depth(timing_out);
3657 } while (timing_out->display_color_depth > COLOR_DEPTH_888);
3661 static void fill_stream_properties_from_drm_display_mode(
3662 struct dc_stream_state *stream,
3663 const struct drm_display_mode *mode_in,
3664 const struct drm_connector *connector,
3665 const struct drm_connector_state *connector_state,
3666 const struct dc_stream_state *old_stream)
3668 struct dc_crtc_timing *timing_out = &stream->timing;
3669 const struct drm_display_info *info = &connector->display_info;
3670 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3671 struct hdmi_vendor_infoframe hv_frame;
3672 struct hdmi_avi_infoframe avi_frame;
3674 memset(&hv_frame, 0, sizeof(hv_frame));
3675 memset(&avi_frame, 0, sizeof(avi_frame));
3677 timing_out->h_border_left = 0;
3678 timing_out->h_border_right = 0;
3679 timing_out->v_border_top = 0;
3680 timing_out->v_border_bottom = 0;
3681 /* TODO: un-hardcode */
3682 if (drm_mode_is_420_only(info, mode_in)
3683 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3684 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3685 else if (drm_mode_is_420_also(info, mode_in)
3686 && aconnector->force_yuv420_output)
3687 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3688 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3689 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3690 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3692 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3694 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3695 timing_out->display_color_depth = convert_color_depth_from_display_info(
3696 connector, connector_state);
3697 timing_out->scan_type = SCANNING_TYPE_NODATA;
3698 timing_out->hdmi_vic = 0;
3701 timing_out->vic = old_stream->timing.vic;
3702 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3703 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3705 timing_out->vic = drm_match_cea_mode(mode_in);
3706 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3707 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3708 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3709 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3712 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3713 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
3714 timing_out->vic = avi_frame.video_code;
3715 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
3716 timing_out->hdmi_vic = hv_frame.vic;
3719 timing_out->h_addressable = mode_in->crtc_hdisplay;
3720 timing_out->h_total = mode_in->crtc_htotal;
3721 timing_out->h_sync_width =
3722 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3723 timing_out->h_front_porch =
3724 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3725 timing_out->v_total = mode_in->crtc_vtotal;
3726 timing_out->v_addressable = mode_in->crtc_vdisplay;
3727 timing_out->v_front_porch =
3728 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3729 timing_out->v_sync_width =
3730 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3731 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3732 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3734 stream->output_color_space = get_output_color_space(timing_out);
3736 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3737 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3738 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3739 adjust_colour_depth_from_display_info(timing_out, info);
3742 static void fill_audio_info(struct audio_info *audio_info,
3743 const struct drm_connector *drm_connector,
3744 const struct dc_sink *dc_sink)
3747 int cea_revision = 0;
3748 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3750 audio_info->manufacture_id = edid_caps->manufacturer_id;
3751 audio_info->product_id = edid_caps->product_id;
3753 cea_revision = drm_connector->display_info.cea_rev;
3755 strscpy(audio_info->display_name,
3756 edid_caps->display_name,
3757 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3759 if (cea_revision >= 3) {
3760 audio_info->mode_count = edid_caps->audio_mode_count;
3762 for (i = 0; i < audio_info->mode_count; ++i) {
3763 audio_info->modes[i].format_code =
3764 (enum audio_format_code)
3765 (edid_caps->audio_modes[i].format_code);
3766 audio_info->modes[i].channel_count =
3767 edid_caps->audio_modes[i].channel_count;
3768 audio_info->modes[i].sample_rates.all =
3769 edid_caps->audio_modes[i].sample_rate;
3770 audio_info->modes[i].sample_size =
3771 edid_caps->audio_modes[i].sample_size;
3775 audio_info->flags.all = edid_caps->speaker_flags;
3777 /* TODO: We only check for the progressive mode, check for interlace mode too */
3778 if (drm_connector->latency_present[0]) {
3779 audio_info->video_latency = drm_connector->video_latency[0];
3780 audio_info->audio_latency = drm_connector->audio_latency[0];
3783 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3788 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3789 struct drm_display_mode *dst_mode)
3791 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3792 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3793 dst_mode->crtc_clock = src_mode->crtc_clock;
3794 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3795 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3796 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
3797 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3798 dst_mode->crtc_htotal = src_mode->crtc_htotal;
3799 dst_mode->crtc_hskew = src_mode->crtc_hskew;
3800 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3801 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3802 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3803 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3804 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3808 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3809 const struct drm_display_mode *native_mode,
3812 if (scale_enabled) {
3813 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3814 } else if (native_mode->clock == drm_mode->clock &&
3815 native_mode->htotal == drm_mode->htotal &&
3816 native_mode->vtotal == drm_mode->vtotal) {
3817 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3819 /* no scaling nor amdgpu inserted, no need to patch */
3823 static struct dc_sink *
3824 create_fake_sink(struct amdgpu_dm_connector *aconnector)
3826 struct dc_sink_init_data sink_init_data = { 0 };
3827 struct dc_sink *sink = NULL;
3828 sink_init_data.link = aconnector->dc_link;
3829 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3831 sink = dc_sink_create(&sink_init_data);
3833 DRM_ERROR("Failed to create sink!\n");
3836 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
3841 static void set_multisync_trigger_params(
3842 struct dc_stream_state *stream)
3844 if (stream->triggered_crtc_reset.enabled) {
3845 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3846 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3850 static void set_master_stream(struct dc_stream_state *stream_set[],
3853 int j, highest_rfr = 0, master_stream = 0;
3855 for (j = 0; j < stream_count; j++) {
3856 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3857 int refresh_rate = 0;
3859 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
3860 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3861 if (refresh_rate > highest_rfr) {
3862 highest_rfr = refresh_rate;
3867 for (j = 0; j < stream_count; j++) {
3869 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3873 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3877 if (context->stream_count < 2)
3879 for (i = 0; i < context->stream_count ; i++) {
3880 if (!context->streams[i])
3883 * TODO: add a function to read AMD VSDB bits and set
3884 * crtc_sync_master.multi_sync_enabled flag
3885 * For now it's set to false
3887 set_multisync_trigger_params(context->streams[i]);
3889 set_master_stream(context->streams, context->stream_count);
3892 static struct dc_stream_state *
3893 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3894 const struct drm_display_mode *drm_mode,
3895 const struct dm_connector_state *dm_state,
3896 const struct dc_stream_state *old_stream)
3898 struct drm_display_mode *preferred_mode = NULL;
3899 struct drm_connector *drm_connector;
3900 const struct drm_connector_state *con_state =
3901 dm_state ? &dm_state->base : NULL;
3902 struct dc_stream_state *stream = NULL;
3903 struct drm_display_mode mode = *drm_mode;
3904 bool native_mode_found = false;
3905 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3907 int preferred_refresh = 0;
3908 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3909 struct dsc_dec_dpcd_caps dsc_caps;
3910 uint32_t link_bandwidth_kbps;
3913 struct dc_sink *sink = NULL;
3914 if (aconnector == NULL) {
3915 DRM_ERROR("aconnector is NULL!\n");
3919 drm_connector = &aconnector->base;
3921 if (!aconnector->dc_sink) {
3922 sink = create_fake_sink(aconnector);
3926 sink = aconnector->dc_sink;
3927 dc_sink_retain(sink);
3930 stream = dc_create_stream_for_sink(sink);
3932 if (stream == NULL) {
3933 DRM_ERROR("Failed to create stream for sink!\n");
3937 stream->dm_stream_context = aconnector;
3939 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
3940 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
3942 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3943 /* Search for preferred mode */
3944 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3945 native_mode_found = true;
3949 if (!native_mode_found)
3950 preferred_mode = list_first_entry_or_null(
3951 &aconnector->base.modes,
3952 struct drm_display_mode,
3955 mode_refresh = drm_mode_vrefresh(&mode);
3957 if (preferred_mode == NULL) {
3959 * This may not be an error, the use case is when we have no
3960 * usermode calls to reset and set mode upon hotplug. In this
3961 * case, we call set mode ourselves to restore the previous mode
3962 * and the modelist may not be filled in in time.
3964 DRM_DEBUG_DRIVER("No preferred mode found\n");
3966 decide_crtc_timing_for_drm_display_mode(
3967 &mode, preferred_mode,
3968 dm_state ? (dm_state->scaling != RMX_OFF) : false);
3969 preferred_refresh = drm_mode_vrefresh(preferred_mode);
3973 drm_mode_set_crtcinfo(&mode, 0);
3976 * If scaling is enabled and refresh rate didn't change
3977 * we copy the vic and polarities of the old timings
3979 if (!scale || mode_refresh != preferred_refresh)
3980 fill_stream_properties_from_drm_display_mode(stream,
3981 &mode, &aconnector->base, con_state, NULL);
3983 fill_stream_properties_from_drm_display_mode(stream,
3984 &mode, &aconnector->base, con_state, old_stream);
3986 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3987 stream->timing.flags.DSC = 0;
3989 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3990 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
3991 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
3993 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
3994 dc_link_get_link_cap(aconnector->dc_link));
3996 if (dsc_caps.is_dsc_supported)
3997 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
3999 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4000 link_bandwidth_kbps,
4002 &stream->timing.dsc_cfg))
4003 stream->timing.flags.DSC = 1;
4007 update_stream_scaling_settings(&mode, dm_state, stream);
4010 &stream->audio_info,
4014 update_stream_signal(stream, sink);
4016 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4017 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4018 if (stream->link->psr_feature_enabled) {
4019 struct dc *core_dc = stream->link->ctx->dc;
4021 if (dc_is_dmcu_initialized(core_dc)) {
4022 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4024 stream->psr_version = dmcu->dmcu_version.psr_version;
4025 mod_build_vsc_infopacket(stream,
4026 &stream->vsc_infopacket,
4027 &stream->use_vsc_sdp_for_colorimetry);
4031 dc_sink_release(sink);
4036 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4038 drm_crtc_cleanup(crtc);
4042 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4043 struct drm_crtc_state *state)
4045 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4047 /* TODO Destroy dc_stream objects are stream object is flattened */
4049 dc_stream_release(cur->stream);
4052 __drm_atomic_helper_crtc_destroy_state(state);
4058 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4060 struct dm_crtc_state *state;
4063 dm_crtc_destroy_state(crtc, crtc->state);
4065 state = kzalloc(sizeof(*state), GFP_KERNEL);
4066 if (WARN_ON(!state))
4069 crtc->state = &state->base;
4070 crtc->state->crtc = crtc;
4074 static struct drm_crtc_state *
4075 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4077 struct dm_crtc_state *state, *cur;
4079 cur = to_dm_crtc_state(crtc->state);
4081 if (WARN_ON(!crtc->state))
4084 state = kzalloc(sizeof(*state), GFP_KERNEL);
4088 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4091 state->stream = cur->stream;
4092 dc_stream_retain(state->stream);
4095 state->active_planes = cur->active_planes;
4096 state->interrupts_enabled = cur->interrupts_enabled;
4097 state->vrr_params = cur->vrr_params;
4098 state->vrr_infopacket = cur->vrr_infopacket;
4099 state->abm_level = cur->abm_level;
4100 state->vrr_supported = cur->vrr_supported;
4101 state->freesync_config = cur->freesync_config;
4102 state->crc_src = cur->crc_src;
4103 state->cm_has_degamma = cur->cm_has_degamma;
4104 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4106 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4108 return &state->base;
4111 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4113 enum dc_irq_source irq_source;
4114 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4115 struct amdgpu_device *adev = crtc->dev->dev_private;
4118 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4120 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4122 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4123 acrtc->crtc_id, enable ? "en" : "dis", rc);
4127 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4129 enum dc_irq_source irq_source;
4130 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4131 struct amdgpu_device *adev = crtc->dev->dev_private;
4132 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4136 /* vblank irq on -> Only need vupdate irq in vrr mode */
4137 if (amdgpu_dm_vrr_active(acrtc_state))
4138 rc = dm_set_vupdate_irq(crtc, true);
4140 /* vblank irq off -> vupdate irq off */
4141 rc = dm_set_vupdate_irq(crtc, false);
4147 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4148 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4151 static int dm_enable_vblank(struct drm_crtc *crtc)
4153 return dm_set_vblank(crtc, true);
4156 static void dm_disable_vblank(struct drm_crtc *crtc)
4158 dm_set_vblank(crtc, false);
4161 /* Implemented only the options currently availible for the driver */
4162 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4163 .reset = dm_crtc_reset_state,
4164 .destroy = amdgpu_dm_crtc_destroy,
4165 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4166 .set_config = drm_atomic_helper_set_config,
4167 .page_flip = drm_atomic_helper_page_flip,
4168 .atomic_duplicate_state = dm_crtc_duplicate_state,
4169 .atomic_destroy_state = dm_crtc_destroy_state,
4170 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4171 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4172 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4173 .enable_vblank = dm_enable_vblank,
4174 .disable_vblank = dm_disable_vblank,
4177 static enum drm_connector_status
4178 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4181 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4185 * 1. This interface is NOT called in context of HPD irq.
4186 * 2. This interface *is called* in context of user-mode ioctl. Which
4187 * makes it a bad place for *any* MST-related activity.
4190 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4191 !aconnector->fake_enable)
4192 connected = (aconnector->dc_sink != NULL);
4194 connected = (aconnector->base.force == DRM_FORCE_ON);
4196 return (connected ? connector_status_connected :
4197 connector_status_disconnected);
4200 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4201 struct drm_connector_state *connector_state,
4202 struct drm_property *property,
4205 struct drm_device *dev = connector->dev;
4206 struct amdgpu_device *adev = dev->dev_private;
4207 struct dm_connector_state *dm_old_state =
4208 to_dm_connector_state(connector->state);
4209 struct dm_connector_state *dm_new_state =
4210 to_dm_connector_state(connector_state);
4214 if (property == dev->mode_config.scaling_mode_property) {
4215 enum amdgpu_rmx_type rmx_type;
4218 case DRM_MODE_SCALE_CENTER:
4219 rmx_type = RMX_CENTER;
4221 case DRM_MODE_SCALE_ASPECT:
4222 rmx_type = RMX_ASPECT;
4224 case DRM_MODE_SCALE_FULLSCREEN:
4225 rmx_type = RMX_FULL;
4227 case DRM_MODE_SCALE_NONE:
4233 if (dm_old_state->scaling == rmx_type)
4236 dm_new_state->scaling = rmx_type;
4238 } else if (property == adev->mode_info.underscan_hborder_property) {
4239 dm_new_state->underscan_hborder = val;
4241 } else if (property == adev->mode_info.underscan_vborder_property) {
4242 dm_new_state->underscan_vborder = val;
4244 } else if (property == adev->mode_info.underscan_property) {
4245 dm_new_state->underscan_enable = val;
4247 } else if (property == adev->mode_info.abm_level_property) {
4248 dm_new_state->abm_level = val;
4255 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4256 const struct drm_connector_state *state,
4257 struct drm_property *property,
4260 struct drm_device *dev = connector->dev;
4261 struct amdgpu_device *adev = dev->dev_private;
4262 struct dm_connector_state *dm_state =
4263 to_dm_connector_state(state);
4266 if (property == dev->mode_config.scaling_mode_property) {
4267 switch (dm_state->scaling) {
4269 *val = DRM_MODE_SCALE_CENTER;
4272 *val = DRM_MODE_SCALE_ASPECT;
4275 *val = DRM_MODE_SCALE_FULLSCREEN;
4279 *val = DRM_MODE_SCALE_NONE;
4283 } else if (property == adev->mode_info.underscan_hborder_property) {
4284 *val = dm_state->underscan_hborder;
4286 } else if (property == adev->mode_info.underscan_vborder_property) {
4287 *val = dm_state->underscan_vborder;
4289 } else if (property == adev->mode_info.underscan_property) {
4290 *val = dm_state->underscan_enable;
4292 } else if (property == adev->mode_info.abm_level_property) {
4293 *val = dm_state->abm_level;
4300 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4302 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4304 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4307 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4309 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4310 const struct dc_link *link = aconnector->dc_link;
4311 struct amdgpu_device *adev = connector->dev->dev_private;
4312 struct amdgpu_display_manager *dm = &adev->dm;
4314 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4315 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4317 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4318 link->type != dc_connection_none &&
4319 dm->backlight_dev) {
4320 backlight_device_unregister(dm->backlight_dev);
4321 dm->backlight_dev = NULL;
4325 if (aconnector->dc_em_sink)
4326 dc_sink_release(aconnector->dc_em_sink);
4327 aconnector->dc_em_sink = NULL;
4328 if (aconnector->dc_sink)
4329 dc_sink_release(aconnector->dc_sink);
4330 aconnector->dc_sink = NULL;
4332 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4333 drm_connector_unregister(connector);
4334 drm_connector_cleanup(connector);
4335 if (aconnector->i2c) {
4336 i2c_del_adapter(&aconnector->i2c->base);
4337 kfree(aconnector->i2c);
4343 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4345 struct dm_connector_state *state =
4346 to_dm_connector_state(connector->state);
4348 if (connector->state)
4349 __drm_atomic_helper_connector_destroy_state(connector->state);
4353 state = kzalloc(sizeof(*state), GFP_KERNEL);
4356 state->scaling = RMX_OFF;
4357 state->underscan_enable = false;
4358 state->underscan_hborder = 0;
4359 state->underscan_vborder = 0;
4360 state->base.max_requested_bpc = 8;
4361 state->vcpi_slots = 0;
4363 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4364 state->abm_level = amdgpu_dm_abm_level;
4366 __drm_atomic_helper_connector_reset(connector, &state->base);
4370 struct drm_connector_state *
4371 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4373 struct dm_connector_state *state =
4374 to_dm_connector_state(connector->state);
4376 struct dm_connector_state *new_state =
4377 kmemdup(state, sizeof(*state), GFP_KERNEL);
4382 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4384 new_state->freesync_capable = state->freesync_capable;
4385 new_state->abm_level = state->abm_level;
4386 new_state->scaling = state->scaling;
4387 new_state->underscan_enable = state->underscan_enable;
4388 new_state->underscan_hborder = state->underscan_hborder;
4389 new_state->underscan_vborder = state->underscan_vborder;
4390 new_state->vcpi_slots = state->vcpi_slots;
4391 new_state->pbn = state->pbn;
4392 return &new_state->base;
4395 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4396 .reset = amdgpu_dm_connector_funcs_reset,
4397 .detect = amdgpu_dm_connector_detect,
4398 .fill_modes = drm_helper_probe_single_connector_modes,
4399 .destroy = amdgpu_dm_connector_destroy,
4400 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4401 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4402 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4403 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4404 .early_unregister = amdgpu_dm_connector_unregister
4407 static int get_modes(struct drm_connector *connector)
4409 return amdgpu_dm_connector_get_modes(connector);
4412 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4414 struct dc_sink_init_data init_params = {
4415 .link = aconnector->dc_link,
4416 .sink_signal = SIGNAL_TYPE_VIRTUAL
4420 if (!aconnector->base.edid_blob_ptr) {
4421 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4422 aconnector->base.name);
4424 aconnector->base.force = DRM_FORCE_OFF;
4425 aconnector->base.override_edid = false;
4429 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4431 aconnector->edid = edid;
4433 aconnector->dc_em_sink = dc_link_add_remote_sink(
4434 aconnector->dc_link,
4436 (edid->extensions + 1) * EDID_LENGTH,
4439 if (aconnector->base.force == DRM_FORCE_ON) {
4440 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4441 aconnector->dc_link->local_sink :
4442 aconnector->dc_em_sink;
4443 dc_sink_retain(aconnector->dc_sink);
4447 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4449 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4452 * In case of headless boot with force on for DP managed connector
4453 * Those settings have to be != 0 to get initial modeset
4455 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4456 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4457 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4461 aconnector->base.override_edid = true;
4462 create_eml_sink(aconnector);
4465 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4466 struct drm_display_mode *mode)
4468 int result = MODE_ERROR;
4469 struct dc_sink *dc_sink;
4470 struct amdgpu_device *adev = connector->dev->dev_private;
4471 /* TODO: Unhardcode stream count */
4472 struct dc_stream_state *stream;
4473 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4474 enum dc_status dc_result = DC_OK;
4476 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4477 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4481 * Only run this the first time mode_valid is called to initilialize
4484 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4485 !aconnector->dc_em_sink)
4486 handle_edid_mgmt(aconnector);
4488 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4490 if (dc_sink == NULL) {
4491 DRM_ERROR("dc_sink is NULL!\n");
4495 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4496 if (stream == NULL) {
4497 DRM_ERROR("Failed to create stream for sink!\n");
4501 dc_result = dc_validate_stream(adev->dm.dc, stream);
4503 if (dc_result == DC_OK)
4506 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4512 dc_stream_release(stream);
4515 /* TODO: error handling*/
4519 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4520 struct dc_info_packet *out)
4522 struct hdmi_drm_infoframe frame;
4523 unsigned char buf[30]; /* 26 + 4 */
4527 memset(out, 0, sizeof(*out));
4529 if (!state->hdr_output_metadata)
4532 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4536 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4540 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4544 /* Prepare the infopacket for DC. */
4545 switch (state->connector->connector_type) {
4546 case DRM_MODE_CONNECTOR_HDMIA:
4547 out->hb0 = 0x87; /* type */
4548 out->hb1 = 0x01; /* version */
4549 out->hb2 = 0x1A; /* length */
4550 out->sb[0] = buf[3]; /* checksum */
4554 case DRM_MODE_CONNECTOR_DisplayPort:
4555 case DRM_MODE_CONNECTOR_eDP:
4556 out->hb0 = 0x00; /* sdp id, zero */
4557 out->hb1 = 0x87; /* type */
4558 out->hb2 = 0x1D; /* payload len - 1 */
4559 out->hb3 = (0x13 << 2); /* sdp version */
4560 out->sb[0] = 0x01; /* version */
4561 out->sb[1] = 0x1A; /* length */
4569 memcpy(&out->sb[i], &buf[4], 26);
4572 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4573 sizeof(out->sb), false);
4579 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4580 const struct drm_connector_state *new_state)
4582 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4583 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4585 if (old_blob != new_blob) {
4586 if (old_blob && new_blob &&
4587 old_blob->length == new_blob->length)
4588 return memcmp(old_blob->data, new_blob->data,
4598 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4599 struct drm_atomic_state *state)
4601 struct drm_connector_state *new_con_state =
4602 drm_atomic_get_new_connector_state(state, conn);
4603 struct drm_connector_state *old_con_state =
4604 drm_atomic_get_old_connector_state(state, conn);
4605 struct drm_crtc *crtc = new_con_state->crtc;
4606 struct drm_crtc_state *new_crtc_state;
4612 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4613 struct dc_info_packet hdr_infopacket;
4615 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4619 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4620 if (IS_ERR(new_crtc_state))
4621 return PTR_ERR(new_crtc_state);
4624 * DC considers the stream backends changed if the
4625 * static metadata changes. Forcing the modeset also
4626 * gives a simple way for userspace to switch from
4627 * 8bpc to 10bpc when setting the metadata to enter
4630 * Changing the static metadata after it's been
4631 * set is permissible, however. So only force a
4632 * modeset if we're entering or exiting HDR.
4634 new_crtc_state->mode_changed =
4635 !old_con_state->hdr_output_metadata ||
4636 !new_con_state->hdr_output_metadata;
4642 static const struct drm_connector_helper_funcs
4643 amdgpu_dm_connector_helper_funcs = {
4645 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4646 * modes will be filtered by drm_mode_validate_size(), and those modes
4647 * are missing after user start lightdm. So we need to renew modes list.
4648 * in get_modes call back, not just return the modes count
4650 .get_modes = get_modes,
4651 .mode_valid = amdgpu_dm_connector_mode_valid,
4652 .atomic_check = amdgpu_dm_connector_atomic_check,
4655 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4659 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4661 struct drm_device *dev = new_crtc_state->crtc->dev;
4662 struct drm_plane *plane;
4664 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4665 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4672 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4674 struct drm_atomic_state *state = new_crtc_state->state;
4675 struct drm_plane *plane;
4678 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4679 struct drm_plane_state *new_plane_state;
4681 /* Cursor planes are "fake". */
4682 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4685 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4687 if (!new_plane_state) {
4689 * The plane is enable on the CRTC and hasn't changed
4690 * state. This means that it previously passed
4691 * validation and is therefore enabled.
4697 /* We need a framebuffer to be considered enabled. */
4698 num_active += (new_plane_state->fb != NULL);
4705 * Sets whether interrupts should be enabled on a specific CRTC.
4706 * We require that the stream be enabled and that there exist active
4707 * DC planes on the stream.
4710 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4711 struct drm_crtc_state *new_crtc_state)
4713 struct dm_crtc_state *dm_new_crtc_state =
4714 to_dm_crtc_state(new_crtc_state);
4716 dm_new_crtc_state->active_planes = 0;
4717 dm_new_crtc_state->interrupts_enabled = false;
4719 if (!dm_new_crtc_state->stream)
4722 dm_new_crtc_state->active_planes =
4723 count_crtc_active_planes(new_crtc_state);
4725 dm_new_crtc_state->interrupts_enabled =
4726 dm_new_crtc_state->active_planes > 0;
4729 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4730 struct drm_crtc_state *state)
4732 struct amdgpu_device *adev = crtc->dev->dev_private;
4733 struct dc *dc = adev->dm.dc;
4734 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4738 * Update interrupt state for the CRTC. This needs to happen whenever
4739 * the CRTC has changed or whenever any of its planes have changed.
4740 * Atomic check satisfies both of these requirements since the CRTC
4741 * is added to the state by DRM during drm_atomic_helper_check_planes.
4743 dm_update_crtc_interrupt_state(crtc, state);
4745 if (unlikely(!dm_crtc_state->stream &&
4746 modeset_required(state, NULL, dm_crtc_state->stream))) {
4751 /* In some use cases, like reset, no stream is attached */
4752 if (!dm_crtc_state->stream)
4756 * We want at least one hardware plane enabled to use
4757 * the stream with a cursor enabled.
4759 if (state->enable && state->active &&
4760 does_crtc_have_active_cursor(state) &&
4761 dm_crtc_state->active_planes == 0)
4764 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
4770 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4771 const struct drm_display_mode *mode,
4772 struct drm_display_mode *adjusted_mode)
4777 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4778 .disable = dm_crtc_helper_disable,
4779 .atomic_check = dm_crtc_helper_atomic_check,
4780 .mode_fixup = dm_crtc_helper_mode_fixup
4783 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4788 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
4790 switch (display_color_depth) {
4791 case COLOR_DEPTH_666:
4793 case COLOR_DEPTH_888:
4795 case COLOR_DEPTH_101010:
4797 case COLOR_DEPTH_121212:
4799 case COLOR_DEPTH_141414:
4801 case COLOR_DEPTH_161616:
4809 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4810 struct drm_crtc_state *crtc_state,
4811 struct drm_connector_state *conn_state)
4813 struct drm_atomic_state *state = crtc_state->state;
4814 struct drm_connector *connector = conn_state->connector;
4815 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4816 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
4817 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
4818 struct drm_dp_mst_topology_mgr *mst_mgr;
4819 struct drm_dp_mst_port *mst_port;
4820 enum dc_color_depth color_depth;
4823 if (!aconnector->port || !aconnector->dc_sink)
4826 mst_port = aconnector->port;
4827 mst_mgr = &aconnector->mst_port->mst_mgr;
4829 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
4832 if (!state->duplicated) {
4833 color_depth = convert_color_depth_from_display_info(connector, conn_state);
4834 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
4835 clock = adjusted_mode->clock;
4836 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp);
4838 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
4841 dm_new_connector_state->pbn);
4842 if (dm_new_connector_state->vcpi_slots < 0) {
4843 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
4844 return dm_new_connector_state->vcpi_slots;
4849 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
4850 .disable = dm_encoder_helper_disable,
4851 .atomic_check = dm_encoder_helper_atomic_check
4854 static void dm_drm_plane_reset(struct drm_plane *plane)
4856 struct dm_plane_state *amdgpu_state = NULL;
4859 plane->funcs->atomic_destroy_state(plane, plane->state);
4861 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
4862 WARN_ON(amdgpu_state == NULL);
4865 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
4868 static struct drm_plane_state *
4869 dm_drm_plane_duplicate_state(struct drm_plane *plane)
4871 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
4873 old_dm_plane_state = to_dm_plane_state(plane->state);
4874 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
4875 if (!dm_plane_state)
4878 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
4880 if (old_dm_plane_state->dc_state) {
4881 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
4882 dc_plane_state_retain(dm_plane_state->dc_state);
4885 return &dm_plane_state->base;
4888 void dm_drm_plane_destroy_state(struct drm_plane *plane,
4889 struct drm_plane_state *state)
4891 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
4893 if (dm_plane_state->dc_state)
4894 dc_plane_state_release(dm_plane_state->dc_state);
4896 drm_atomic_helper_plane_destroy_state(plane, state);
4899 static const struct drm_plane_funcs dm_plane_funcs = {
4900 .update_plane = drm_atomic_helper_update_plane,
4901 .disable_plane = drm_atomic_helper_disable_plane,
4902 .destroy = drm_primary_helper_destroy,
4903 .reset = dm_drm_plane_reset,
4904 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
4905 .atomic_destroy_state = dm_drm_plane_destroy_state,
4908 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
4909 struct drm_plane_state *new_state)
4911 struct amdgpu_framebuffer *afb;
4912 struct drm_gem_object *obj;
4913 struct amdgpu_device *adev;
4914 struct amdgpu_bo *rbo;
4915 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
4916 struct list_head list;
4917 struct ttm_validate_buffer tv;
4918 struct ww_acquire_ctx ticket;
4919 uint64_t tiling_flags;
4923 dm_plane_state_old = to_dm_plane_state(plane->state);
4924 dm_plane_state_new = to_dm_plane_state(new_state);
4926 if (!new_state->fb) {
4927 DRM_DEBUG_DRIVER("No FB bound\n");
4931 afb = to_amdgpu_framebuffer(new_state->fb);
4932 obj = new_state->fb->obj[0];
4933 rbo = gem_to_amdgpu_bo(obj);
4934 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
4935 INIT_LIST_HEAD(&list);
4939 list_add(&tv.head, &list);
4941 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
4943 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
4947 if (plane->type != DRM_PLANE_TYPE_CURSOR)
4948 domain = amdgpu_display_supported_domains(adev, rbo->flags);
4950 domain = AMDGPU_GEM_DOMAIN_VRAM;
4952 r = amdgpu_bo_pin(rbo, domain);
4953 if (unlikely(r != 0)) {
4954 if (r != -ERESTARTSYS)
4955 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
4956 ttm_eu_backoff_reservation(&ticket, &list);
4960 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
4961 if (unlikely(r != 0)) {
4962 amdgpu_bo_unpin(rbo);
4963 ttm_eu_backoff_reservation(&ticket, &list);
4964 DRM_ERROR("%p bind failed\n", rbo);
4968 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
4970 ttm_eu_backoff_reservation(&ticket, &list);
4972 afb->address = amdgpu_bo_gpu_offset(rbo);
4976 if (dm_plane_state_new->dc_state &&
4977 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
4978 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
4980 fill_plane_buffer_attributes(
4981 adev, afb, plane_state->format, plane_state->rotation,
4982 tiling_flags, &plane_state->tiling_info,
4983 &plane_state->plane_size, &plane_state->dcc,
4984 &plane_state->address);
4990 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
4991 struct drm_plane_state *old_state)
4993 struct amdgpu_bo *rbo;
4999 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5000 r = amdgpu_bo_reserve(rbo, false);
5002 DRM_ERROR("failed to reserve rbo before unpin\n");
5006 amdgpu_bo_unpin(rbo);
5007 amdgpu_bo_unreserve(rbo);
5008 amdgpu_bo_unref(&rbo);
5011 static int dm_plane_atomic_check(struct drm_plane *plane,
5012 struct drm_plane_state *state)
5014 struct amdgpu_device *adev = plane->dev->dev_private;
5015 struct dc *dc = adev->dm.dc;
5016 struct dm_plane_state *dm_plane_state;
5017 struct dc_scaling_info scaling_info;
5020 dm_plane_state = to_dm_plane_state(state);
5022 if (!dm_plane_state->dc_state)
5025 ret = fill_dc_scaling_info(state, &scaling_info);
5029 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5035 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5036 struct drm_plane_state *new_plane_state)
5038 /* Only support async updates on cursor planes. */
5039 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5045 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5046 struct drm_plane_state *new_state)
5048 struct drm_plane_state *old_state =
5049 drm_atomic_get_old_plane_state(new_state->state, plane);
5051 swap(plane->state->fb, new_state->fb);
5053 plane->state->src_x = new_state->src_x;
5054 plane->state->src_y = new_state->src_y;
5055 plane->state->src_w = new_state->src_w;
5056 plane->state->src_h = new_state->src_h;
5057 plane->state->crtc_x = new_state->crtc_x;
5058 plane->state->crtc_y = new_state->crtc_y;
5059 plane->state->crtc_w = new_state->crtc_w;
5060 plane->state->crtc_h = new_state->crtc_h;
5062 handle_cursor_update(plane, old_state);
5065 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5066 .prepare_fb = dm_plane_helper_prepare_fb,
5067 .cleanup_fb = dm_plane_helper_cleanup_fb,
5068 .atomic_check = dm_plane_atomic_check,
5069 .atomic_async_check = dm_plane_atomic_async_check,
5070 .atomic_async_update = dm_plane_atomic_async_update
5074 * TODO: these are currently initialized to rgb formats only.
5075 * For future use cases we should either initialize them dynamically based on
5076 * plane capabilities, or initialize this array to all formats, so internal drm
5077 * check will succeed, and let DC implement proper check
5079 static const uint32_t rgb_formats[] = {
5080 DRM_FORMAT_XRGB8888,
5081 DRM_FORMAT_ARGB8888,
5082 DRM_FORMAT_RGBA8888,
5083 DRM_FORMAT_XRGB2101010,
5084 DRM_FORMAT_XBGR2101010,
5085 DRM_FORMAT_ARGB2101010,
5086 DRM_FORMAT_ABGR2101010,
5087 DRM_FORMAT_XBGR8888,
5088 DRM_FORMAT_ABGR8888,
5092 static const uint32_t overlay_formats[] = {
5093 DRM_FORMAT_XRGB8888,
5094 DRM_FORMAT_ARGB8888,
5095 DRM_FORMAT_RGBA8888,
5096 DRM_FORMAT_XBGR8888,
5097 DRM_FORMAT_ABGR8888,
5101 static const u32 cursor_formats[] = {
5105 static int get_plane_formats(const struct drm_plane *plane,
5106 const struct dc_plane_cap *plane_cap,
5107 uint32_t *formats, int max_formats)
5109 int i, num_formats = 0;
5112 * TODO: Query support for each group of formats directly from
5113 * DC plane caps. This will require adding more formats to the
5117 switch (plane->type) {
5118 case DRM_PLANE_TYPE_PRIMARY:
5119 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5120 if (num_formats >= max_formats)
5123 formats[num_formats++] = rgb_formats[i];
5126 if (plane_cap && plane_cap->pixel_format_support.nv12)
5127 formats[num_formats++] = DRM_FORMAT_NV12;
5130 case DRM_PLANE_TYPE_OVERLAY:
5131 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5132 if (num_formats >= max_formats)
5135 formats[num_formats++] = overlay_formats[i];
5139 case DRM_PLANE_TYPE_CURSOR:
5140 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5141 if (num_formats >= max_formats)
5144 formats[num_formats++] = cursor_formats[i];
5152 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5153 struct drm_plane *plane,
5154 unsigned long possible_crtcs,
5155 const struct dc_plane_cap *plane_cap)
5157 uint32_t formats[32];
5161 num_formats = get_plane_formats(plane, plane_cap, formats,
5162 ARRAY_SIZE(formats));
5164 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5165 &dm_plane_funcs, formats, num_formats,
5166 NULL, plane->type, NULL);
5170 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5171 plane_cap && plane_cap->per_pixel_alpha) {
5172 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5173 BIT(DRM_MODE_BLEND_PREMULTI);
5175 drm_plane_create_alpha_property(plane);
5176 drm_plane_create_blend_mode_property(plane, blend_caps);
5179 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5180 plane_cap && plane_cap->pixel_format_support.nv12) {
5181 /* This only affects YUV formats. */
5182 drm_plane_create_color_properties(
5184 BIT(DRM_COLOR_YCBCR_BT601) |
5185 BIT(DRM_COLOR_YCBCR_BT709),
5186 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5187 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5188 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5191 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5193 /* Create (reset) the plane state */
5194 if (plane->funcs->reset)
5195 plane->funcs->reset(plane);
5200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5201 struct drm_plane *plane,
5202 uint32_t crtc_index)
5204 struct amdgpu_crtc *acrtc = NULL;
5205 struct drm_plane *cursor_plane;
5209 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5213 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5214 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5216 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5220 res = drm_crtc_init_with_planes(
5225 &amdgpu_dm_crtc_funcs, NULL);
5230 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5232 /* Create (reset) the plane state */
5233 if (acrtc->base.funcs->reset)
5234 acrtc->base.funcs->reset(&acrtc->base);
5236 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5237 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5239 acrtc->crtc_id = crtc_index;
5240 acrtc->base.enabled = false;
5241 acrtc->otg_inst = -1;
5243 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5244 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5245 true, MAX_COLOR_LUT_ENTRIES);
5246 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5252 kfree(cursor_plane);
5257 static int to_drm_connector_type(enum signal_type st)
5260 case SIGNAL_TYPE_HDMI_TYPE_A:
5261 return DRM_MODE_CONNECTOR_HDMIA;
5262 case SIGNAL_TYPE_EDP:
5263 return DRM_MODE_CONNECTOR_eDP;
5264 case SIGNAL_TYPE_LVDS:
5265 return DRM_MODE_CONNECTOR_LVDS;
5266 case SIGNAL_TYPE_RGB:
5267 return DRM_MODE_CONNECTOR_VGA;
5268 case SIGNAL_TYPE_DISPLAY_PORT:
5269 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5270 return DRM_MODE_CONNECTOR_DisplayPort;
5271 case SIGNAL_TYPE_DVI_DUAL_LINK:
5272 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5273 return DRM_MODE_CONNECTOR_DVID;
5274 case SIGNAL_TYPE_VIRTUAL:
5275 return DRM_MODE_CONNECTOR_VIRTUAL;
5278 return DRM_MODE_CONNECTOR_Unknown;
5282 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5284 struct drm_encoder *encoder;
5286 /* There is only one encoder per connector */
5287 drm_connector_for_each_possible_encoder(connector, encoder)
5293 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5295 struct drm_encoder *encoder;
5296 struct amdgpu_encoder *amdgpu_encoder;
5298 encoder = amdgpu_dm_connector_to_encoder(connector);
5300 if (encoder == NULL)
5303 amdgpu_encoder = to_amdgpu_encoder(encoder);
5305 amdgpu_encoder->native_mode.clock = 0;
5307 if (!list_empty(&connector->probed_modes)) {
5308 struct drm_display_mode *preferred_mode = NULL;
5310 list_for_each_entry(preferred_mode,
5311 &connector->probed_modes,
5313 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5314 amdgpu_encoder->native_mode = *preferred_mode;
5322 static struct drm_display_mode *
5323 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5325 int hdisplay, int vdisplay)
5327 struct drm_device *dev = encoder->dev;
5328 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5329 struct drm_display_mode *mode = NULL;
5330 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5332 mode = drm_mode_duplicate(dev, native_mode);
5337 mode->hdisplay = hdisplay;
5338 mode->vdisplay = vdisplay;
5339 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5340 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5346 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5347 struct drm_connector *connector)
5349 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5350 struct drm_display_mode *mode = NULL;
5351 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5352 struct amdgpu_dm_connector *amdgpu_dm_connector =
5353 to_amdgpu_dm_connector(connector);
5357 char name[DRM_DISPLAY_MODE_LEN];
5360 } common_modes[] = {
5361 { "640x480", 640, 480},
5362 { "800x600", 800, 600},
5363 { "1024x768", 1024, 768},
5364 { "1280x720", 1280, 720},
5365 { "1280x800", 1280, 800},
5366 {"1280x1024", 1280, 1024},
5367 { "1440x900", 1440, 900},
5368 {"1680x1050", 1680, 1050},
5369 {"1600x1200", 1600, 1200},
5370 {"1920x1080", 1920, 1080},
5371 {"1920x1200", 1920, 1200}
5374 n = ARRAY_SIZE(common_modes);
5376 for (i = 0; i < n; i++) {
5377 struct drm_display_mode *curmode = NULL;
5378 bool mode_existed = false;
5380 if (common_modes[i].w > native_mode->hdisplay ||
5381 common_modes[i].h > native_mode->vdisplay ||
5382 (common_modes[i].w == native_mode->hdisplay &&
5383 common_modes[i].h == native_mode->vdisplay))
5386 list_for_each_entry(curmode, &connector->probed_modes, head) {
5387 if (common_modes[i].w == curmode->hdisplay &&
5388 common_modes[i].h == curmode->vdisplay) {
5389 mode_existed = true;
5397 mode = amdgpu_dm_create_common_mode(encoder,
5398 common_modes[i].name, common_modes[i].w,
5400 drm_mode_probed_add(connector, mode);
5401 amdgpu_dm_connector->num_modes++;
5405 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5408 struct amdgpu_dm_connector *amdgpu_dm_connector =
5409 to_amdgpu_dm_connector(connector);
5412 /* empty probed_modes */
5413 INIT_LIST_HEAD(&connector->probed_modes);
5414 amdgpu_dm_connector->num_modes =
5415 drm_add_edid_modes(connector, edid);
5417 /* sorting the probed modes before calling function
5418 * amdgpu_dm_get_native_mode() since EDID can have
5419 * more than one preferred mode. The modes that are
5420 * later in the probed mode list could be of higher
5421 * and preferred resolution. For example, 3840x2160
5422 * resolution in base EDID preferred timing and 4096x2160
5423 * preferred resolution in DID extension block later.
5425 drm_mode_sort(&connector->probed_modes);
5426 amdgpu_dm_get_native_mode(connector);
5428 amdgpu_dm_connector->num_modes = 0;
5432 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5434 struct amdgpu_dm_connector *amdgpu_dm_connector =
5435 to_amdgpu_dm_connector(connector);
5436 struct drm_encoder *encoder;
5437 struct edid *edid = amdgpu_dm_connector->edid;
5439 encoder = amdgpu_dm_connector_to_encoder(connector);
5441 if (!edid || !drm_edid_is_valid(edid)) {
5442 amdgpu_dm_connector->num_modes =
5443 drm_add_modes_noedid(connector, 640, 480);
5445 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5446 amdgpu_dm_connector_add_common_modes(encoder, connector);
5448 amdgpu_dm_fbc_init(connector);
5450 return amdgpu_dm_connector->num_modes;
5453 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5454 struct amdgpu_dm_connector *aconnector,
5456 struct dc_link *link,
5459 struct amdgpu_device *adev = dm->ddev->dev_private;
5462 * Some of the properties below require access to state, like bpc.
5463 * Allocate some default initial connector state with our reset helper.
5465 if (aconnector->base.funcs->reset)
5466 aconnector->base.funcs->reset(&aconnector->base);
5468 aconnector->connector_id = link_index;
5469 aconnector->dc_link = link;
5470 aconnector->base.interlace_allowed = false;
5471 aconnector->base.doublescan_allowed = false;
5472 aconnector->base.stereo_allowed = false;
5473 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5474 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5475 aconnector->audio_inst = -1;
5476 mutex_init(&aconnector->hpd_lock);
5479 * configure support HPD hot plug connector_>polled default value is 0
5480 * which means HPD hot plug not supported
5482 switch (connector_type) {
5483 case DRM_MODE_CONNECTOR_HDMIA:
5484 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5485 aconnector->base.ycbcr_420_allowed =
5486 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5488 case DRM_MODE_CONNECTOR_DisplayPort:
5489 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5490 aconnector->base.ycbcr_420_allowed =
5491 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5493 case DRM_MODE_CONNECTOR_DVID:
5494 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5500 drm_object_attach_property(&aconnector->base.base,
5501 dm->ddev->mode_config.scaling_mode_property,
5502 DRM_MODE_SCALE_NONE);
5504 drm_object_attach_property(&aconnector->base.base,
5505 adev->mode_info.underscan_property,
5507 drm_object_attach_property(&aconnector->base.base,
5508 adev->mode_info.underscan_hborder_property,
5510 drm_object_attach_property(&aconnector->base.base,
5511 adev->mode_info.underscan_vborder_property,
5514 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5516 /* This defaults to the max in the range, but we want 8bpc. */
5517 aconnector->base.state->max_bpc = 8;
5518 aconnector->base.state->max_requested_bpc = 8;
5520 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5521 dc_is_dmcu_initialized(adev->dm.dc)) {
5522 drm_object_attach_property(&aconnector->base.base,
5523 adev->mode_info.abm_level_property, 0);
5526 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5527 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5528 connector_type == DRM_MODE_CONNECTOR_eDP) {
5529 drm_object_attach_property(
5530 &aconnector->base.base,
5531 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5533 drm_connector_attach_vrr_capable_property(
5535 #ifdef CONFIG_DRM_AMD_DC_HDCP
5536 if (adev->asic_type >= CHIP_RAVEN)
5537 drm_connector_attach_content_protection_property(&aconnector->base, false);
5542 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5543 struct i2c_msg *msgs, int num)
5545 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5546 struct ddc_service *ddc_service = i2c->ddc_service;
5547 struct i2c_command cmd;
5551 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5556 cmd.number_of_payloads = num;
5557 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5560 for (i = 0; i < num; i++) {
5561 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5562 cmd.payloads[i].address = msgs[i].addr;
5563 cmd.payloads[i].length = msgs[i].len;
5564 cmd.payloads[i].data = msgs[i].buf;
5568 ddc_service->ctx->dc,
5569 ddc_service->ddc_pin->hw_info.ddc_channel,
5573 kfree(cmd.payloads);
5577 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5579 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5582 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5583 .master_xfer = amdgpu_dm_i2c_xfer,
5584 .functionality = amdgpu_dm_i2c_func,
5587 static struct amdgpu_i2c_adapter *
5588 create_i2c(struct ddc_service *ddc_service,
5592 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5593 struct amdgpu_i2c_adapter *i2c;
5595 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
5598 i2c->base.owner = THIS_MODULE;
5599 i2c->base.class = I2C_CLASS_DDC;
5600 i2c->base.dev.parent = &adev->pdev->dev;
5601 i2c->base.algo = &amdgpu_dm_i2c_algo;
5602 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
5603 i2c_set_adapdata(&i2c->base, i2c);
5604 i2c->ddc_service = ddc_service;
5605 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
5612 * Note: this function assumes that dc_link_detect() was called for the
5613 * dc_link which will be represented by this aconnector.
5615 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5616 struct amdgpu_dm_connector *aconnector,
5617 uint32_t link_index,
5618 struct amdgpu_encoder *aencoder)
5622 struct dc *dc = dm->dc;
5623 struct dc_link *link = dc_get_link_at_index(dc, link_index);
5624 struct amdgpu_i2c_adapter *i2c;
5626 link->priv = aconnector;
5628 DRM_DEBUG_DRIVER("%s()\n", __func__);
5630 i2c = create_i2c(link->ddc, link->link_index, &res);
5632 DRM_ERROR("Failed to create i2c adapter data\n");
5636 aconnector->i2c = i2c;
5637 res = i2c_add_adapter(&i2c->base);
5640 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
5644 connector_type = to_drm_connector_type(link->connector_signal);
5646 res = drm_connector_init(
5649 &amdgpu_dm_connector_funcs,
5653 DRM_ERROR("connector_init failed\n");
5654 aconnector->connector_id = -1;
5658 drm_connector_helper_add(
5660 &amdgpu_dm_connector_helper_funcs);
5662 amdgpu_dm_connector_init_helper(
5669 drm_connector_attach_encoder(
5670 &aconnector->base, &aencoder->base);
5672 drm_connector_register(&aconnector->base);
5673 #if defined(CONFIG_DEBUG_FS)
5674 connector_debugfs_init(aconnector);
5675 aconnector->debugfs_dpcd_address = 0;
5676 aconnector->debugfs_dpcd_size = 0;
5679 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
5680 || connector_type == DRM_MODE_CONNECTOR_eDP)
5681 amdgpu_dm_initialize_dp_connector(dm, aconnector);
5686 aconnector->i2c = NULL;
5691 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
5693 switch (adev->mode_info.num_crtc) {
5710 static int amdgpu_dm_encoder_init(struct drm_device *dev,
5711 struct amdgpu_encoder *aencoder,
5712 uint32_t link_index)
5714 struct amdgpu_device *adev = dev->dev_private;
5716 int res = drm_encoder_init(dev,
5718 &amdgpu_dm_encoder_funcs,
5719 DRM_MODE_ENCODER_TMDS,
5722 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5725 aencoder->encoder_id = link_index;
5727 aencoder->encoder_id = -1;
5729 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5734 static void manage_dm_interrupts(struct amdgpu_device *adev,
5735 struct amdgpu_crtc *acrtc,
5739 * this is not correct translation but will work as soon as VBLANK
5740 * constant is the same as PFLIP
5743 amdgpu_display_crtc_idx_to_irq_type(
5748 drm_crtc_vblank_on(&acrtc->base);
5751 &adev->pageflip_irq,
5757 &adev->pageflip_irq,
5759 drm_crtc_vblank_off(&acrtc->base);
5764 is_scaling_state_different(const struct dm_connector_state *dm_state,
5765 const struct dm_connector_state *old_dm_state)
5767 if (dm_state->scaling != old_dm_state->scaling)
5769 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
5770 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
5772 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
5773 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
5775 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
5776 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
5781 #ifdef CONFIG_DRM_AMD_DC_HDCP
5782 static bool is_content_protection_different(struct drm_connector_state *state,
5783 const struct drm_connector_state *old_state,
5784 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
5786 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5788 /* CP is being re enabled, ignore this */
5789 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
5790 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
5791 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
5795 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
5796 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
5797 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
5798 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
5800 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
5801 * hot-plug, headless s3, dpms
5803 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
5804 aconnector->dc_sink != NULL)
5807 if (old_state->content_protection == state->content_protection)
5810 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
5816 static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
5817 struct hdcp_workqueue *hdcp_w)
5819 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5821 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
5822 hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
5823 else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
5824 hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
5828 static void remove_stream(struct amdgpu_device *adev,
5829 struct amdgpu_crtc *acrtc,
5830 struct dc_stream_state *stream)
5832 /* this is the update mode case */
5834 acrtc->otg_inst = -1;
5835 acrtc->enabled = false;
5838 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
5839 struct dc_cursor_position *position)
5841 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5843 int xorigin = 0, yorigin = 0;
5845 position->enable = false;
5849 if (!crtc || !plane->state->fb)
5852 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
5853 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
5854 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
5856 plane->state->crtc_w,
5857 plane->state->crtc_h);
5861 x = plane->state->crtc_x;
5862 y = plane->state->crtc_y;
5864 if (x <= -amdgpu_crtc->max_cursor_width ||
5865 y <= -amdgpu_crtc->max_cursor_height)
5868 if (crtc->primary->state) {
5869 /* avivo cursor are offset into the total surface */
5870 x += crtc->primary->state->src_x >> 16;
5871 y += crtc->primary->state->src_y >> 16;
5875 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
5879 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
5882 position->enable = true;
5885 position->x_hotspot = xorigin;
5886 position->y_hotspot = yorigin;
5891 static void handle_cursor_update(struct drm_plane *plane,
5892 struct drm_plane_state *old_plane_state)
5894 struct amdgpu_device *adev = plane->dev->dev_private;
5895 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
5896 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
5897 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
5898 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5899 uint64_t address = afb ? afb->address : 0;
5900 struct dc_cursor_position position;
5901 struct dc_cursor_attributes attributes;
5904 if (!plane->state->fb && !old_plane_state->fb)
5907 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
5909 amdgpu_crtc->crtc_id,
5910 plane->state->crtc_w,
5911 plane->state->crtc_h);
5913 ret = get_cursor_position(plane, crtc, &position);
5917 if (!position.enable) {
5918 /* turn off cursor */
5919 if (crtc_state && crtc_state->stream) {
5920 mutex_lock(&adev->dm.dc_lock);
5921 dc_stream_set_cursor_position(crtc_state->stream,
5923 mutex_unlock(&adev->dm.dc_lock);
5928 amdgpu_crtc->cursor_width = plane->state->crtc_w;
5929 amdgpu_crtc->cursor_height = plane->state->crtc_h;
5931 memset(&attributes, 0, sizeof(attributes));
5932 attributes.address.high_part = upper_32_bits(address);
5933 attributes.address.low_part = lower_32_bits(address);
5934 attributes.width = plane->state->crtc_w;
5935 attributes.height = plane->state->crtc_h;
5936 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
5937 attributes.rotation_angle = 0;
5938 attributes.attribute_flags.value = 0;
5940 attributes.pitch = attributes.width;
5942 if (crtc_state->stream) {
5943 mutex_lock(&adev->dm.dc_lock);
5944 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
5946 DRM_ERROR("DC failed to set cursor attributes\n");
5948 if (!dc_stream_set_cursor_position(crtc_state->stream,
5950 DRM_ERROR("DC failed to set cursor position\n");
5951 mutex_unlock(&adev->dm.dc_lock);
5955 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
5958 assert_spin_locked(&acrtc->base.dev->event_lock);
5959 WARN_ON(acrtc->event);
5961 acrtc->event = acrtc->base.state->event;
5963 /* Set the flip status */
5964 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
5966 /* Mark this event as consumed */
5967 acrtc->base.state->event = NULL;
5969 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
5973 static void update_freesync_state_on_stream(
5974 struct amdgpu_display_manager *dm,
5975 struct dm_crtc_state *new_crtc_state,
5976 struct dc_stream_state *new_stream,
5977 struct dc_plane_state *surface,
5978 u32 flip_timestamp_in_us)
5980 struct mod_vrr_params vrr_params;
5981 struct dc_info_packet vrr_infopacket = {0};
5982 struct amdgpu_device *adev = dm->adev;
5983 unsigned long flags;
5989 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5990 * For now it's sufficient to just guard against these conditions.
5993 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5996 spin_lock_irqsave(&adev->ddev->event_lock, flags);
5997 vrr_params = new_crtc_state->vrr_params;
6000 mod_freesync_handle_preflip(
6001 dm->freesync_module,
6004 flip_timestamp_in_us,
6007 if (adev->family < AMDGPU_FAMILY_AI &&
6008 amdgpu_dm_vrr_active(new_crtc_state)) {
6009 mod_freesync_handle_v_update(dm->freesync_module,
6010 new_stream, &vrr_params);
6012 /* Need to call this before the frame ends. */
6013 dc_stream_adjust_vmin_vmax(dm->dc,
6014 new_crtc_state->stream,
6015 &vrr_params.adjust);
6019 mod_freesync_build_vrr_infopacket(
6020 dm->freesync_module,
6024 TRANSFER_FUNC_UNKNOWN,
6027 new_crtc_state->freesync_timing_changed |=
6028 (memcmp(&new_crtc_state->vrr_params.adjust,
6030 sizeof(vrr_params.adjust)) != 0);
6032 new_crtc_state->freesync_vrr_info_changed |=
6033 (memcmp(&new_crtc_state->vrr_infopacket,
6035 sizeof(vrr_infopacket)) != 0);
6037 new_crtc_state->vrr_params = vrr_params;
6038 new_crtc_state->vrr_infopacket = vrr_infopacket;
6040 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6041 new_stream->vrr_infopacket = vrr_infopacket;
6043 if (new_crtc_state->freesync_vrr_info_changed)
6044 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6045 new_crtc_state->base.crtc->base.id,
6046 (int)new_crtc_state->base.vrr_enabled,
6047 (int)vrr_params.state);
6049 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6052 static void pre_update_freesync_state_on_stream(
6053 struct amdgpu_display_manager *dm,
6054 struct dm_crtc_state *new_crtc_state)
6056 struct dc_stream_state *new_stream = new_crtc_state->stream;
6057 struct mod_vrr_params vrr_params;
6058 struct mod_freesync_config config = new_crtc_state->freesync_config;
6059 struct amdgpu_device *adev = dm->adev;
6060 unsigned long flags;
6066 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6067 * For now it's sufficient to just guard against these conditions.
6069 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6072 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6073 vrr_params = new_crtc_state->vrr_params;
6075 if (new_crtc_state->vrr_supported &&
6076 config.min_refresh_in_uhz &&
6077 config.max_refresh_in_uhz) {
6078 config.state = new_crtc_state->base.vrr_enabled ?
6079 VRR_STATE_ACTIVE_VARIABLE :
6082 config.state = VRR_STATE_UNSUPPORTED;
6085 mod_freesync_build_vrr_params(dm->freesync_module,
6087 &config, &vrr_params);
6089 new_crtc_state->freesync_timing_changed |=
6090 (memcmp(&new_crtc_state->vrr_params.adjust,
6092 sizeof(vrr_params.adjust)) != 0);
6094 new_crtc_state->vrr_params = vrr_params;
6095 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6098 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6099 struct dm_crtc_state *new_state)
6101 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6102 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6104 if (!old_vrr_active && new_vrr_active) {
6105 /* Transition VRR inactive -> active:
6106 * While VRR is active, we must not disable vblank irq, as a
6107 * reenable after disable would compute bogus vblank/pflip
6108 * timestamps if it likely happened inside display front-porch.
6110 * We also need vupdate irq for the actual core vblank handling
6113 dm_set_vupdate_irq(new_state->base.crtc, true);
6114 drm_crtc_vblank_get(new_state->base.crtc);
6115 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6116 __func__, new_state->base.crtc->base.id);
6117 } else if (old_vrr_active && !new_vrr_active) {
6118 /* Transition VRR active -> inactive:
6119 * Allow vblank irq disable again for fixed refresh rate.
6121 dm_set_vupdate_irq(new_state->base.crtc, false);
6122 drm_crtc_vblank_put(new_state->base.crtc);
6123 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6124 __func__, new_state->base.crtc->base.id);
6128 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6130 struct drm_plane *plane;
6131 struct drm_plane_state *old_plane_state, *new_plane_state;
6135 * TODO: Make this per-stream so we don't issue redundant updates for
6136 * commits with multiple streams.
6138 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6140 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6141 handle_cursor_update(plane, old_plane_state);
6144 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6145 struct dc_state *dc_state,
6146 struct drm_device *dev,
6147 struct amdgpu_display_manager *dm,
6148 struct drm_crtc *pcrtc,
6149 bool wait_for_vblank)
6152 uint64_t timestamp_ns;
6153 struct drm_plane *plane;
6154 struct drm_plane_state *old_plane_state, *new_plane_state;
6155 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6156 struct drm_crtc_state *new_pcrtc_state =
6157 drm_atomic_get_new_crtc_state(state, pcrtc);
6158 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6159 struct dm_crtc_state *dm_old_crtc_state =
6160 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6161 int planes_count = 0, vpos, hpos;
6163 unsigned long flags;
6164 struct amdgpu_bo *abo;
6165 uint64_t tiling_flags;
6166 uint32_t target_vblank, last_flip_vblank;
6167 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6168 bool pflip_present = false;
6169 bool swizzle = true;
6171 struct dc_surface_update surface_updates[MAX_SURFACES];
6172 struct dc_plane_info plane_infos[MAX_SURFACES];
6173 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6174 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6175 struct dc_stream_update stream_update;
6178 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6181 dm_error("Failed to allocate update bundle\n");
6186 * Disable the cursor first if we're disabling all the planes.
6187 * It'll remain on the screen after the planes are re-enabled
6190 if (acrtc_state->active_planes == 0)
6191 amdgpu_dm_commit_cursors(state);
6193 /* update planes when needed */
6194 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6195 struct drm_crtc *crtc = new_plane_state->crtc;
6196 struct drm_crtc_state *new_crtc_state;
6197 struct drm_framebuffer *fb = new_plane_state->fb;
6198 bool plane_needs_flip;
6199 struct dc_plane_state *dc_plane;
6200 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6202 /* Cursor plane is handled after stream updates */
6203 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6206 if (!fb || !crtc || pcrtc != crtc)
6209 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6210 if (!new_crtc_state->active)
6213 dc_plane = dm_new_plane_state->dc_state;
6215 if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
6218 bundle->surface_updates[planes_count].surface = dc_plane;
6219 if (new_pcrtc_state->color_mgmt_changed) {
6220 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6221 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6224 fill_dc_scaling_info(new_plane_state,
6225 &bundle->scaling_infos[planes_count]);
6227 bundle->surface_updates[planes_count].scaling_info =
6228 &bundle->scaling_infos[planes_count];
6230 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6232 pflip_present = pflip_present || plane_needs_flip;
6234 if (!plane_needs_flip) {
6239 abo = gem_to_amdgpu_bo(fb->obj[0]);
6242 * Wait for all fences on this FB. Do limited wait to avoid
6243 * deadlock during GPU reset when this fence will not signal
6244 * but we hold reservation lock for the BO.
6246 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6248 msecs_to_jiffies(5000));
6249 if (unlikely(r <= 0))
6250 DRM_ERROR("Waiting for fences timed out!");
6253 * TODO This might fail and hence better not used, wait
6254 * explicitly on fences instead
6255 * and in general should be called for
6256 * blocking commit to as per framework helpers
6258 r = amdgpu_bo_reserve(abo, true);
6259 if (unlikely(r != 0))
6260 DRM_ERROR("failed to reserve buffer before flip\n");
6262 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6264 amdgpu_bo_unreserve(abo);
6266 fill_dc_plane_info_and_addr(
6267 dm->adev, new_plane_state, tiling_flags,
6268 &bundle->plane_infos[planes_count],
6269 &bundle->flip_addrs[planes_count].address);
6271 bundle->surface_updates[planes_count].plane_info =
6272 &bundle->plane_infos[planes_count];
6275 * Only allow immediate flips for fast updates that don't
6276 * change FB pitch, DCC state, rotation or mirroing.
6278 bundle->flip_addrs[planes_count].flip_immediate =
6279 crtc->state->async_flip &&
6280 acrtc_state->update_type == UPDATE_TYPE_FAST;
6282 timestamp_ns = ktime_get_ns();
6283 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6284 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6285 bundle->surface_updates[planes_count].surface = dc_plane;
6287 if (!bundle->surface_updates[planes_count].surface) {
6288 DRM_ERROR("No surface for CRTC: id=%d\n",
6289 acrtc_attach->crtc_id);
6293 if (plane == pcrtc->primary)
6294 update_freesync_state_on_stream(
6297 acrtc_state->stream,
6299 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6301 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6303 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6304 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6310 if (pflip_present) {
6312 /* Use old throttling in non-vrr fixed refresh rate mode
6313 * to keep flip scheduling based on target vblank counts
6314 * working in a backwards compatible way, e.g., for
6315 * clients using the GLX_OML_sync_control extension or
6316 * DRI3/Present extension with defined target_msc.
6318 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
6321 /* For variable refresh rate mode only:
6322 * Get vblank of last completed flip to avoid > 1 vrr
6323 * flips per video frame by use of throttling, but allow
6324 * flip programming anywhere in the possibly large
6325 * variable vrr vblank interval for fine-grained flip
6326 * timing control and more opportunity to avoid stutter
6327 * on late submission of flips.
6329 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6330 last_flip_vblank = acrtc_attach->last_flip_vblank;
6331 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6334 target_vblank = last_flip_vblank + wait_for_vblank;
6337 * Wait until we're out of the vertical blank period before the one
6338 * targeted by the flip
6340 while ((acrtc_attach->enabled &&
6341 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6342 0, &vpos, &hpos, NULL,
6343 NULL, &pcrtc->hwmode)
6344 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6345 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6346 (int)(target_vblank -
6347 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
6348 usleep_range(1000, 1100);
6351 if (acrtc_attach->base.state->event) {
6352 drm_crtc_vblank_get(pcrtc);
6354 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6356 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6357 prepare_flip_isr(acrtc_attach);
6359 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6362 if (acrtc_state->stream) {
6363 if (acrtc_state->freesync_vrr_info_changed)
6364 bundle->stream_update.vrr_infopacket =
6365 &acrtc_state->stream->vrr_infopacket;
6369 /* Update the planes if changed or disable if we don't have any. */
6370 if ((planes_count || acrtc_state->active_planes == 0) &&
6371 acrtc_state->stream) {
6372 bundle->stream_update.stream = acrtc_state->stream;
6373 if (new_pcrtc_state->mode_changed) {
6374 bundle->stream_update.src = acrtc_state->stream->src;
6375 bundle->stream_update.dst = acrtc_state->stream->dst;
6378 if (new_pcrtc_state->color_mgmt_changed) {
6380 * TODO: This isn't fully correct since we've actually
6381 * already modified the stream in place.
6383 bundle->stream_update.gamut_remap =
6384 &acrtc_state->stream->gamut_remap_matrix;
6385 bundle->stream_update.output_csc_transform =
6386 &acrtc_state->stream->csc_color_matrix;
6387 bundle->stream_update.out_transfer_func =
6388 acrtc_state->stream->out_transfer_func;
6391 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6392 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6393 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6396 * If FreeSync state on the stream has changed then we need to
6397 * re-adjust the min/max bounds now that DC doesn't handle this
6398 * as part of commit.
6400 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6401 amdgpu_dm_vrr_active(acrtc_state)) {
6402 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6403 dc_stream_adjust_vmin_vmax(
6404 dm->dc, acrtc_state->stream,
6405 &acrtc_state->vrr_params.adjust);
6406 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6408 mutex_lock(&dm->dc_lock);
6409 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6410 acrtc_state->stream->link->psr_allow_active)
6411 amdgpu_dm_psr_disable(acrtc_state->stream);
6413 dc_commit_updates_for_stream(dm->dc,
6414 bundle->surface_updates,
6416 acrtc_state->stream,
6417 &bundle->stream_update,
6420 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6421 acrtc_state->stream->psr_version &&
6422 !acrtc_state->stream->link->psr_feature_enabled)
6423 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6424 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6425 acrtc_state->stream->link->psr_feature_enabled &&
6426 !acrtc_state->stream->link->psr_allow_active &&
6428 amdgpu_dm_psr_enable(acrtc_state->stream);
6431 mutex_unlock(&dm->dc_lock);
6435 * Update cursor state *after* programming all the planes.
6436 * This avoids redundant programming in the case where we're going
6437 * to be disabling a single plane - those pipes are being disabled.
6439 if (acrtc_state->active_planes)
6440 amdgpu_dm_commit_cursors(state);
6446 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6447 struct drm_atomic_state *state)
6449 struct amdgpu_device *adev = dev->dev_private;
6450 struct amdgpu_dm_connector *aconnector;
6451 struct drm_connector *connector;
6452 struct drm_connector_state *old_con_state, *new_con_state;
6453 struct drm_crtc_state *new_crtc_state;
6454 struct dm_crtc_state *new_dm_crtc_state;
6455 const struct dc_stream_status *status;
6458 /* Notify device removals. */
6459 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6460 if (old_con_state->crtc != new_con_state->crtc) {
6461 /* CRTC changes require notification. */
6465 if (!new_con_state->crtc)
6468 new_crtc_state = drm_atomic_get_new_crtc_state(
6469 state, new_con_state->crtc);
6471 if (!new_crtc_state)
6474 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6478 aconnector = to_amdgpu_dm_connector(connector);
6480 mutex_lock(&adev->dm.audio_lock);
6481 inst = aconnector->audio_inst;
6482 aconnector->audio_inst = -1;
6483 mutex_unlock(&adev->dm.audio_lock);
6485 amdgpu_dm_audio_eld_notify(adev, inst);
6488 /* Notify audio device additions. */
6489 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6490 if (!new_con_state->crtc)
6493 new_crtc_state = drm_atomic_get_new_crtc_state(
6494 state, new_con_state->crtc);
6496 if (!new_crtc_state)
6499 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6502 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6503 if (!new_dm_crtc_state->stream)
6506 status = dc_stream_get_status(new_dm_crtc_state->stream);
6510 aconnector = to_amdgpu_dm_connector(connector);
6512 mutex_lock(&adev->dm.audio_lock);
6513 inst = status->audio_inst;
6514 aconnector->audio_inst = inst;
6515 mutex_unlock(&adev->dm.audio_lock);
6517 amdgpu_dm_audio_eld_notify(adev, inst);
6522 * Enable interrupts on CRTCs that are newly active, undergone
6523 * a modeset, or have active planes again.
6525 * Done in two passes, based on the for_modeset flag:
6526 * Pass 1: For CRTCs going through modeset
6527 * Pass 2: For CRTCs going from 0 to n active planes
6529 * Interrupts can only be enabled after the planes are programmed,
6530 * so this requires a two-pass approach since we don't want to
6531 * just defer the interrupts until after commit planes every time.
6533 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6534 struct drm_atomic_state *state,
6537 struct amdgpu_device *adev = dev->dev_private;
6538 struct drm_crtc *crtc;
6539 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6541 #ifdef CONFIG_DEBUG_FS
6542 enum amdgpu_dm_pipe_crc_source source;
6545 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6546 new_crtc_state, i) {
6547 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6548 struct dm_crtc_state *dm_new_crtc_state =
6549 to_dm_crtc_state(new_crtc_state);
6550 struct dm_crtc_state *dm_old_crtc_state =
6551 to_dm_crtc_state(old_crtc_state);
6552 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6555 run_pass = (for_modeset && modeset) ||
6556 (!for_modeset && !modeset &&
6557 !dm_old_crtc_state->interrupts_enabled);
6562 if (!dm_new_crtc_state->interrupts_enabled)
6565 manage_dm_interrupts(adev, acrtc, true);
6567 #ifdef CONFIG_DEBUG_FS
6568 /* The stream has changed so CRC capture needs to re-enabled. */
6569 source = dm_new_crtc_state->crc_src;
6570 if (amdgpu_dm_is_valid_crc_source(source)) {
6571 amdgpu_dm_crtc_configure_crc_source(
6572 crtc, dm_new_crtc_state,
6573 dm_new_crtc_state->crc_src);
6580 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6581 * @crtc_state: the DRM CRTC state
6582 * @stream_state: the DC stream state.
6584 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6585 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6587 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6588 struct dc_stream_state *stream_state)
6590 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6593 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6594 struct drm_atomic_state *state,
6597 struct drm_crtc *crtc;
6598 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6599 struct amdgpu_device *adev = dev->dev_private;
6603 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6604 * a modeset, being disabled, or have no active planes.
6606 * It's done in atomic commit rather than commit tail for now since
6607 * some of these interrupt handlers access the current CRTC state and
6608 * potentially the stream pointer itself.
6610 * Since the atomic state is swapped within atomic commit and not within
6611 * commit tail this would leave to new state (that hasn't been committed yet)
6612 * being accesssed from within the handlers.
6614 * TODO: Fix this so we can do this in commit tail and not have to block
6617 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6618 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6619 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6620 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6622 if (dm_old_crtc_state->interrupts_enabled &&
6623 (!dm_new_crtc_state->interrupts_enabled ||
6624 drm_atomic_crtc_needs_modeset(new_crtc_state)))
6625 manage_dm_interrupts(adev, acrtc, false);
6628 * Add check here for SoC's that support hardware cursor plane, to
6629 * unset legacy_cursor_update
6632 return drm_atomic_helper_commit(dev, state, nonblock);
6634 /*TODO Handle EINTR, reenable IRQ*/
6638 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6639 * @state: The atomic state to commit
6641 * This will tell DC to commit the constructed DC state from atomic_check,
6642 * programming the hardware. Any failures here implies a hardware failure, since
6643 * atomic check should have filtered anything non-kosher.
6645 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
6647 struct drm_device *dev = state->dev;
6648 struct amdgpu_device *adev = dev->dev_private;
6649 struct amdgpu_display_manager *dm = &adev->dm;
6650 struct dm_atomic_state *dm_state;
6651 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
6653 struct drm_crtc *crtc;
6654 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6655 unsigned long flags;
6656 bool wait_for_vblank = true;
6657 struct drm_connector *connector;
6658 struct drm_connector_state *old_con_state, *new_con_state;
6659 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6660 int crtc_disable_count = 0;
6662 drm_atomic_helper_update_legacy_modeset_state(dev, state);
6664 dm_state = dm_atomic_get_new_state(state);
6665 if (dm_state && dm_state->context) {
6666 dc_state = dm_state->context;
6668 /* No state changes, retain current state. */
6669 dc_state_temp = dc_create_state(dm->dc);
6670 ASSERT(dc_state_temp);
6671 dc_state = dc_state_temp;
6672 dc_resource_state_copy_construct_current(dm->dc, dc_state);
6675 /* update changed items */
6676 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6677 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6679 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6680 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6683 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6684 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6685 "connectors_changed:%d\n",
6687 new_crtc_state->enable,
6688 new_crtc_state->active,
6689 new_crtc_state->planes_changed,
6690 new_crtc_state->mode_changed,
6691 new_crtc_state->active_changed,
6692 new_crtc_state->connectors_changed);
6694 /* Copy all transient state flags into dc state */
6695 if (dm_new_crtc_state->stream) {
6696 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
6697 dm_new_crtc_state->stream);
6700 /* handles headless hotplug case, updating new_state and
6701 * aconnector as needed
6704 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
6706 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
6708 if (!dm_new_crtc_state->stream) {
6710 * this could happen because of issues with
6711 * userspace notifications delivery.
6712 * In this case userspace tries to set mode on
6713 * display which is disconnected in fact.
6714 * dc_sink is NULL in this case on aconnector.
6715 * We expect reset mode will come soon.
6717 * This can also happen when unplug is done
6718 * during resume sequence ended
6720 * In this case, we want to pretend we still
6721 * have a sink to keep the pipe running so that
6722 * hw state is consistent with the sw state
6724 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6725 __func__, acrtc->base.base.id);
6729 if (dm_old_crtc_state->stream)
6730 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6732 pm_runtime_get_noresume(dev->dev);
6734 acrtc->enabled = true;
6735 acrtc->hw_mode = new_crtc_state->mode;
6736 crtc->hwmode = new_crtc_state->mode;
6737 } else if (modereset_required(new_crtc_state)) {
6738 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
6739 /* i.e. reset mode */
6740 if (dm_old_crtc_state->stream) {
6741 if (dm_old_crtc_state->stream->link->psr_allow_active)
6742 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
6744 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6747 } /* for_each_crtc_in_state() */
6750 dm_enable_per_frame_crtc_master_sync(dc_state);
6751 mutex_lock(&dm->dc_lock);
6752 WARN_ON(!dc_commit_state(dm->dc, dc_state));
6753 mutex_unlock(&dm->dc_lock);
6756 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6757 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6759 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6761 if (dm_new_crtc_state->stream != NULL) {
6762 const struct dc_stream_status *status =
6763 dc_stream_get_status(dm_new_crtc_state->stream);
6766 status = dc_stream_get_status_from_state(dc_state,
6767 dm_new_crtc_state->stream);
6770 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
6772 acrtc->otg_inst = status->primary_otg_inst;
6775 #ifdef CONFIG_DRM_AMD_DC_HDCP
6776 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6777 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6778 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6779 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6781 new_crtc_state = NULL;
6784 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
6786 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6788 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
6789 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
6790 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
6791 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6795 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
6796 update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
6800 /* Handle connector state changes */
6801 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6802 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6803 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6804 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6805 struct dc_surface_update dummy_updates[MAX_SURFACES];
6806 struct dc_stream_update stream_update;
6807 struct dc_info_packet hdr_packet;
6808 struct dc_stream_status *status = NULL;
6809 bool abm_changed, hdr_changed, scaling_changed;
6811 memset(&dummy_updates, 0, sizeof(dummy_updates));
6812 memset(&stream_update, 0, sizeof(stream_update));
6815 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
6816 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
6819 /* Skip any modesets/resets */
6820 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
6823 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6824 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6826 scaling_changed = is_scaling_state_different(dm_new_con_state,
6829 abm_changed = dm_new_crtc_state->abm_level !=
6830 dm_old_crtc_state->abm_level;
6833 is_hdr_metadata_different(old_con_state, new_con_state);
6835 if (!scaling_changed && !abm_changed && !hdr_changed)
6838 stream_update.stream = dm_new_crtc_state->stream;
6839 if (scaling_changed) {
6840 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
6841 dm_new_con_state, dm_new_crtc_state->stream);
6843 stream_update.src = dm_new_crtc_state->stream->src;
6844 stream_update.dst = dm_new_crtc_state->stream->dst;
6848 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
6850 stream_update.abm_level = &dm_new_crtc_state->abm_level;
6854 fill_hdr_info_packet(new_con_state, &hdr_packet);
6855 stream_update.hdr_static_metadata = &hdr_packet;
6858 status = dc_stream_get_status(dm_new_crtc_state->stream);
6860 WARN_ON(!status->plane_count);
6863 * TODO: DC refuses to perform stream updates without a dc_surface_update.
6864 * Here we create an empty update on each plane.
6865 * To fix this, DC should permit updating only stream properties.
6867 for (j = 0; j < status->plane_count; j++)
6868 dummy_updates[j].surface = status->plane_states[0];
6871 mutex_lock(&dm->dc_lock);
6872 dc_commit_updates_for_stream(dm->dc,
6874 status->plane_count,
6875 dm_new_crtc_state->stream,
6878 mutex_unlock(&dm->dc_lock);
6881 /* Count number of newly disabled CRTCs for dropping PM refs later. */
6882 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6883 new_crtc_state, i) {
6884 if (old_crtc_state->active && !new_crtc_state->active)
6885 crtc_disable_count++;
6887 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6888 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6890 /* Update freesync active state. */
6891 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
6893 /* Handle vrr on->off / off->on transitions */
6894 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
6898 /* Enable interrupts for CRTCs going through a modeset. */
6899 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
6901 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
6902 if (new_crtc_state->async_flip)
6903 wait_for_vblank = false;
6905 /* update planes when needed per crtc*/
6906 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
6907 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6909 if (dm_new_crtc_state->stream)
6910 amdgpu_dm_commit_planes(state, dc_state, dev,
6911 dm, crtc, wait_for_vblank);
6914 /* Enable interrupts for CRTCs going from 0 to n active planes. */
6915 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
6917 /* Update audio instances for each connector. */
6918 amdgpu_dm_commit_audio(dev, state);
6921 * send vblank event on all events not handled in flip and
6922 * mark consumed event for drm_atomic_helper_commit_hw_done
6924 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6925 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6927 if (new_crtc_state->event)
6928 drm_send_event_locked(dev, &new_crtc_state->event->base);
6930 new_crtc_state->event = NULL;
6932 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6934 /* Signal HW programming completion */
6935 drm_atomic_helper_commit_hw_done(state);
6937 if (wait_for_vblank)
6938 drm_atomic_helper_wait_for_flip_done(dev, state);
6940 drm_atomic_helper_cleanup_planes(dev, state);
6943 * Finally, drop a runtime PM reference for each newly disabled CRTC,
6944 * so we can put the GPU into runtime suspend if we're not driving any
6947 for (i = 0; i < crtc_disable_count; i++)
6948 pm_runtime_put_autosuspend(dev->dev);
6949 pm_runtime_mark_last_busy(dev->dev);
6952 dc_release_state(dc_state_temp);
6956 static int dm_force_atomic_commit(struct drm_connector *connector)
6959 struct drm_device *ddev = connector->dev;
6960 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
6961 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6962 struct drm_plane *plane = disconnected_acrtc->base.primary;
6963 struct drm_connector_state *conn_state;
6964 struct drm_crtc_state *crtc_state;
6965 struct drm_plane_state *plane_state;
6970 state->acquire_ctx = ddev->mode_config.acquire_ctx;
6972 /* Construct an atomic state to restore previous display setting */
6975 * Attach connectors to drm_atomic_state
6977 conn_state = drm_atomic_get_connector_state(state, connector);
6979 ret = PTR_ERR_OR_ZERO(conn_state);
6983 /* Attach crtc to drm_atomic_state*/
6984 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
6986 ret = PTR_ERR_OR_ZERO(crtc_state);
6990 /* force a restore */
6991 crtc_state->mode_changed = true;
6993 /* Attach plane to drm_atomic_state */
6994 plane_state = drm_atomic_get_plane_state(state, plane);
6996 ret = PTR_ERR_OR_ZERO(plane_state);
7001 /* Call commit internally with the state we just constructed */
7002 ret = drm_atomic_commit(state);
7007 DRM_ERROR("Restoring old state failed with %i\n", ret);
7008 drm_atomic_state_put(state);
7014 * This function handles all cases when set mode does not come upon hotplug.
7015 * This includes when a display is unplugged then plugged back into the
7016 * same port and when running without usermode desktop manager supprot
7018 void dm_restore_drm_connector_state(struct drm_device *dev,
7019 struct drm_connector *connector)
7021 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7022 struct amdgpu_crtc *disconnected_acrtc;
7023 struct dm_crtc_state *acrtc_state;
7025 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7028 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7029 if (!disconnected_acrtc)
7032 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7033 if (!acrtc_state->stream)
7037 * If the previous sink is not released and different from the current,
7038 * we deduce we are in a state where we can not rely on usermode call
7039 * to turn on the display, so we do it here
7041 if (acrtc_state->stream->sink != aconnector->dc_sink)
7042 dm_force_atomic_commit(&aconnector->base);
7046 * Grabs all modesetting locks to serialize against any blocking commits,
7047 * Waits for completion of all non blocking commits.
7049 static int do_aquire_global_lock(struct drm_device *dev,
7050 struct drm_atomic_state *state)
7052 struct drm_crtc *crtc;
7053 struct drm_crtc_commit *commit;
7057 * Adding all modeset locks to aquire_ctx will
7058 * ensure that when the framework release it the
7059 * extra locks we are locking here will get released to
7061 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7065 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7066 spin_lock(&crtc->commit_lock);
7067 commit = list_first_entry_or_null(&crtc->commit_list,
7068 struct drm_crtc_commit, commit_entry);
7070 drm_crtc_commit_get(commit);
7071 spin_unlock(&crtc->commit_lock);
7077 * Make sure all pending HW programming completed and
7080 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7083 ret = wait_for_completion_interruptible_timeout(
7084 &commit->flip_done, 10*HZ);
7087 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7088 "timed out\n", crtc->base.id, crtc->name);
7090 drm_crtc_commit_put(commit);
7093 return ret < 0 ? ret : 0;
7096 static void get_freesync_config_for_crtc(
7097 struct dm_crtc_state *new_crtc_state,
7098 struct dm_connector_state *new_con_state)
7100 struct mod_freesync_config config = {0};
7101 struct amdgpu_dm_connector *aconnector =
7102 to_amdgpu_dm_connector(new_con_state->base.connector);
7103 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7104 int vrefresh = drm_mode_vrefresh(mode);
7106 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7107 vrefresh >= aconnector->min_vfreq &&
7108 vrefresh <= aconnector->max_vfreq;
7110 if (new_crtc_state->vrr_supported) {
7111 new_crtc_state->stream->ignore_msa_timing_param = true;
7112 config.state = new_crtc_state->base.vrr_enabled ?
7113 VRR_STATE_ACTIVE_VARIABLE :
7115 config.min_refresh_in_uhz =
7116 aconnector->min_vfreq * 1000000;
7117 config.max_refresh_in_uhz =
7118 aconnector->max_vfreq * 1000000;
7119 config.vsif_supported = true;
7123 new_crtc_state->freesync_config = config;
7126 static void reset_freesync_config_for_crtc(
7127 struct dm_crtc_state *new_crtc_state)
7129 new_crtc_state->vrr_supported = false;
7131 memset(&new_crtc_state->vrr_params, 0,
7132 sizeof(new_crtc_state->vrr_params));
7133 memset(&new_crtc_state->vrr_infopacket, 0,
7134 sizeof(new_crtc_state->vrr_infopacket));
7137 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7138 struct drm_atomic_state *state,
7139 struct drm_crtc *crtc,
7140 struct drm_crtc_state *old_crtc_state,
7141 struct drm_crtc_state *new_crtc_state,
7143 bool *lock_and_validation_needed)
7145 struct dm_atomic_state *dm_state = NULL;
7146 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7147 struct dc_stream_state *new_stream;
7151 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7152 * update changed items
7154 struct amdgpu_crtc *acrtc = NULL;
7155 struct amdgpu_dm_connector *aconnector = NULL;
7156 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7157 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7161 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7162 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7163 acrtc = to_amdgpu_crtc(crtc);
7164 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7166 /* TODO This hack should go away */
7167 if (aconnector && enable) {
7168 /* Make sure fake sink is created in plug-in scenario */
7169 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7171 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7174 if (IS_ERR(drm_new_conn_state)) {
7175 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7179 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7180 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7182 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7185 new_stream = create_stream_for_sink(aconnector,
7186 &new_crtc_state->mode,
7188 dm_old_crtc_state->stream);
7191 * we can have no stream on ACTION_SET if a display
7192 * was disconnected during S3, in this case it is not an
7193 * error, the OS will be updated after detection, and
7194 * will do the right thing on next atomic commit
7198 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7199 __func__, acrtc->base.base.id);
7204 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7206 ret = fill_hdr_info_packet(drm_new_conn_state,
7207 &new_stream->hdr_static_metadata);
7212 * If we already removed the old stream from the context
7213 * (and set the new stream to NULL) then we can't reuse
7214 * the old stream even if the stream and scaling are unchanged.
7215 * We'll hit the BUG_ON and black screen.
7217 * TODO: Refactor this function to allow this check to work
7218 * in all conditions.
7220 if (dm_new_crtc_state->stream &&
7221 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7222 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7223 new_crtc_state->mode_changed = false;
7224 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7225 new_crtc_state->mode_changed);
7229 /* mode_changed flag may get updated above, need to check again */
7230 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7234 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7235 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7236 "connectors_changed:%d\n",
7238 new_crtc_state->enable,
7239 new_crtc_state->active,
7240 new_crtc_state->planes_changed,
7241 new_crtc_state->mode_changed,
7242 new_crtc_state->active_changed,
7243 new_crtc_state->connectors_changed);
7245 /* Remove stream for any changed/disabled CRTC */
7248 if (!dm_old_crtc_state->stream)
7251 ret = dm_atomic_get_state(state, &dm_state);
7255 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7258 /* i.e. reset mode */
7259 if (dc_remove_stream_from_ctx(
7262 dm_old_crtc_state->stream) != DC_OK) {
7267 dc_stream_release(dm_old_crtc_state->stream);
7268 dm_new_crtc_state->stream = NULL;
7270 reset_freesync_config_for_crtc(dm_new_crtc_state);
7272 *lock_and_validation_needed = true;
7274 } else {/* Add stream for any updated/enabled CRTC */
7276 * Quick fix to prevent NULL pointer on new_stream when
7277 * added MST connectors not found in existing crtc_state in the chained mode
7278 * TODO: need to dig out the root cause of that
7280 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7283 if (modereset_required(new_crtc_state))
7286 if (modeset_required(new_crtc_state, new_stream,
7287 dm_old_crtc_state->stream)) {
7289 WARN_ON(dm_new_crtc_state->stream);
7291 ret = dm_atomic_get_state(state, &dm_state);
7295 dm_new_crtc_state->stream = new_stream;
7297 dc_stream_retain(new_stream);
7299 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7302 if (dc_add_stream_to_ctx(
7305 dm_new_crtc_state->stream) != DC_OK) {
7310 *lock_and_validation_needed = true;
7315 /* Release extra reference */
7317 dc_stream_release(new_stream);
7320 * We want to do dc stream updates that do not require a
7321 * full modeset below.
7323 if (!(enable && aconnector && new_crtc_state->enable &&
7324 new_crtc_state->active))
7327 * Given above conditions, the dc state cannot be NULL because:
7328 * 1. We're in the process of enabling CRTCs (just been added
7329 * to the dc context, or already is on the context)
7330 * 2. Has a valid connector attached, and
7331 * 3. Is currently active and enabled.
7332 * => The dc stream state currently exists.
7334 BUG_ON(dm_new_crtc_state->stream == NULL);
7336 /* Scaling or underscan settings */
7337 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7338 update_stream_scaling_settings(
7339 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7342 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7345 * Color management settings. We also update color properties
7346 * when a modeset is needed, to ensure it gets reprogrammed.
7348 if (dm_new_crtc_state->base.color_mgmt_changed ||
7349 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7350 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7355 /* Update Freesync settings. */
7356 get_freesync_config_for_crtc(dm_new_crtc_state,
7363 dc_stream_release(new_stream);
7367 static bool should_reset_plane(struct drm_atomic_state *state,
7368 struct drm_plane *plane,
7369 struct drm_plane_state *old_plane_state,
7370 struct drm_plane_state *new_plane_state)
7372 struct drm_plane *other;
7373 struct drm_plane_state *old_other_state, *new_other_state;
7374 struct drm_crtc_state *new_crtc_state;
7378 * TODO: Remove this hack once the checks below are sufficient
7379 * enough to determine when we need to reset all the planes on
7382 if (state->allow_modeset)
7385 /* Exit early if we know that we're adding or removing the plane. */
7386 if (old_plane_state->crtc != new_plane_state->crtc)
7389 /* old crtc == new_crtc == NULL, plane not in context. */
7390 if (!new_plane_state->crtc)
7394 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7396 if (!new_crtc_state)
7399 /* CRTC Degamma changes currently require us to recreate planes. */
7400 if (new_crtc_state->color_mgmt_changed)
7403 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7407 * If there are any new primary or overlay planes being added or
7408 * removed then the z-order can potentially change. To ensure
7409 * correct z-order and pipe acquisition the current DC architecture
7410 * requires us to remove and recreate all existing planes.
7412 * TODO: Come up with a more elegant solution for this.
7414 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7415 if (other->type == DRM_PLANE_TYPE_CURSOR)
7418 if (old_other_state->crtc != new_plane_state->crtc &&
7419 new_other_state->crtc != new_plane_state->crtc)
7422 if (old_other_state->crtc != new_other_state->crtc)
7425 /* TODO: Remove this once we can handle fast format changes. */
7426 if (old_other_state->fb && new_other_state->fb &&
7427 old_other_state->fb->format != new_other_state->fb->format)
7434 static int dm_update_plane_state(struct dc *dc,
7435 struct drm_atomic_state *state,
7436 struct drm_plane *plane,
7437 struct drm_plane_state *old_plane_state,
7438 struct drm_plane_state *new_plane_state,
7440 bool *lock_and_validation_needed)
7443 struct dm_atomic_state *dm_state = NULL;
7444 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7445 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7446 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7447 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7452 new_plane_crtc = new_plane_state->crtc;
7453 old_plane_crtc = old_plane_state->crtc;
7454 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7455 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7457 /*TODO Implement atomic check for cursor plane */
7458 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7461 needs_reset = should_reset_plane(state, plane, old_plane_state,
7464 /* Remove any changed/removed planes */
7469 if (!old_plane_crtc)
7472 old_crtc_state = drm_atomic_get_old_crtc_state(
7473 state, old_plane_crtc);
7474 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7476 if (!dm_old_crtc_state->stream)
7479 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7480 plane->base.id, old_plane_crtc->base.id);
7482 ret = dm_atomic_get_state(state, &dm_state);
7486 if (!dc_remove_plane_from_context(
7488 dm_old_crtc_state->stream,
7489 dm_old_plane_state->dc_state,
7490 dm_state->context)) {
7497 dc_plane_state_release(dm_old_plane_state->dc_state);
7498 dm_new_plane_state->dc_state = NULL;
7500 *lock_and_validation_needed = true;
7502 } else { /* Add new planes */
7503 struct dc_plane_state *dc_new_plane_state;
7505 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7508 if (!new_plane_crtc)
7511 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7512 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7514 if (!dm_new_crtc_state->stream)
7520 WARN_ON(dm_new_plane_state->dc_state);
7522 dc_new_plane_state = dc_create_plane_state(dc);
7523 if (!dc_new_plane_state)
7526 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7527 plane->base.id, new_plane_crtc->base.id);
7529 ret = fill_dc_plane_attributes(
7530 new_plane_crtc->dev->dev_private,
7535 dc_plane_state_release(dc_new_plane_state);
7539 ret = dm_atomic_get_state(state, &dm_state);
7541 dc_plane_state_release(dc_new_plane_state);
7546 * Any atomic check errors that occur after this will
7547 * not need a release. The plane state will be attached
7548 * to the stream, and therefore part of the atomic
7549 * state. It'll be released when the atomic state is
7552 if (!dc_add_plane_to_context(
7554 dm_new_crtc_state->stream,
7556 dm_state->context)) {
7558 dc_plane_state_release(dc_new_plane_state);
7562 dm_new_plane_state->dc_state = dc_new_plane_state;
7564 /* Tell DC to do a full surface update every time there
7565 * is a plane change. Inefficient, but works for now.
7567 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7569 *lock_and_validation_needed = true;
7577 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7578 struct drm_atomic_state *state,
7579 enum surface_update_type *out_type)
7581 struct dc *dc = dm->dc;
7582 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7583 int i, j, num_plane, ret = 0;
7584 struct drm_plane_state *old_plane_state, *new_plane_state;
7585 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7586 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7587 struct drm_plane *plane;
7589 struct drm_crtc *crtc;
7590 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7591 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7592 struct dc_stream_status *status = NULL;
7594 struct dc_surface_update *updates;
7595 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7597 updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
7600 DRM_ERROR("Failed to allocate plane updates\n");
7601 /* Set type to FULL to avoid crashing in DC*/
7602 update_type = UPDATE_TYPE_FULL;
7606 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7607 struct dc_scaling_info scaling_info;
7608 struct dc_stream_update stream_update;
7610 memset(&stream_update, 0, sizeof(stream_update));
7612 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7613 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7616 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7617 update_type = UPDATE_TYPE_FULL;
7621 if (!new_dm_crtc_state->stream)
7624 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
7625 const struct amdgpu_framebuffer *amdgpu_fb =
7626 to_amdgpu_framebuffer(new_plane_state->fb);
7627 struct dc_plane_info plane_info;
7628 struct dc_flip_addrs flip_addr;
7629 uint64_t tiling_flags;
7631 new_plane_crtc = new_plane_state->crtc;
7632 old_plane_crtc = old_plane_state->crtc;
7633 new_dm_plane_state = to_dm_plane_state(new_plane_state);
7634 old_dm_plane_state = to_dm_plane_state(old_plane_state);
7636 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7639 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
7640 update_type = UPDATE_TYPE_FULL;
7644 if (crtc != new_plane_crtc)
7647 updates[num_plane].surface = new_dm_plane_state->dc_state;
7649 if (new_crtc_state->mode_changed) {
7650 stream_update.dst = new_dm_crtc_state->stream->dst;
7651 stream_update.src = new_dm_crtc_state->stream->src;
7654 if (new_crtc_state->color_mgmt_changed) {
7655 updates[num_plane].gamma =
7656 new_dm_plane_state->dc_state->gamma_correction;
7657 updates[num_plane].in_transfer_func =
7658 new_dm_plane_state->dc_state->in_transfer_func;
7659 stream_update.gamut_remap =
7660 &new_dm_crtc_state->stream->gamut_remap_matrix;
7661 stream_update.output_csc_transform =
7662 &new_dm_crtc_state->stream->csc_color_matrix;
7663 stream_update.out_transfer_func =
7664 new_dm_crtc_state->stream->out_transfer_func;
7667 ret = fill_dc_scaling_info(new_plane_state,
7672 updates[num_plane].scaling_info = &scaling_info;
7675 ret = get_fb_info(amdgpu_fb, &tiling_flags);
7679 memset(&flip_addr, 0, sizeof(flip_addr));
7681 ret = fill_dc_plane_info_and_addr(
7682 dm->adev, new_plane_state, tiling_flags,
7684 &flip_addr.address);
7688 updates[num_plane].plane_info = &plane_info;
7689 updates[num_plane].flip_addr = &flip_addr;
7698 ret = dm_atomic_get_state(state, &dm_state);
7702 old_dm_state = dm_atomic_get_old_state(state);
7703 if (!old_dm_state) {
7708 status = dc_stream_get_status_from_state(old_dm_state->context,
7709 new_dm_crtc_state->stream);
7710 stream_update.stream = new_dm_crtc_state->stream;
7712 * TODO: DC modifies the surface during this call so we need
7713 * to lock here - find a way to do this without locking.
7715 mutex_lock(&dm->dc_lock);
7716 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
7717 &stream_update, status);
7718 mutex_unlock(&dm->dc_lock);
7720 if (update_type > UPDATE_TYPE_MED) {
7721 update_type = UPDATE_TYPE_FULL;
7729 *out_type = update_type;
7734 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
7735 * @dev: The DRM device
7736 * @state: The atomic state to commit
7738 * Validate that the given atomic state is programmable by DC into hardware.
7739 * This involves constructing a &struct dc_state reflecting the new hardware
7740 * state we wish to commit, then querying DC to see if it is programmable. It's
7741 * important not to modify the existing DC state. Otherwise, atomic_check
7742 * may unexpectedly commit hardware changes.
7744 * When validating the DC state, it's important that the right locks are
7745 * acquired. For full updates case which removes/adds/updates streams on one
7746 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
7747 * that any such full update commit will wait for completion of any outstanding
7748 * flip using DRMs synchronization events. See
7749 * dm_determine_update_type_for_commit()
7751 * Note that DM adds the affected connectors for all CRTCs in state, when that
7752 * might not seem necessary. This is because DC stream creation requires the
7753 * DC sink, which is tied to the DRM connector state. Cleaning this up should
7754 * be possible but non-trivial - a possible TODO item.
7756 * Return: -Error code if validation failed.
7758 static int amdgpu_dm_atomic_check(struct drm_device *dev,
7759 struct drm_atomic_state *state)
7761 struct amdgpu_device *adev = dev->dev_private;
7762 struct dm_atomic_state *dm_state = NULL;
7763 struct dc *dc = adev->dm.dc;
7764 struct drm_connector *connector;
7765 struct drm_connector_state *old_con_state, *new_con_state;
7766 struct drm_crtc *crtc;
7767 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7768 struct drm_plane *plane;
7769 struct drm_plane_state *old_plane_state, *new_plane_state;
7770 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7771 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
7776 * This bool will be set for true for any modeset/reset
7777 * or plane update which implies non fast surface update.
7779 bool lock_and_validation_needed = false;
7781 ret = drm_atomic_helper_check_modeset(dev, state);
7785 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7786 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
7787 !new_crtc_state->color_mgmt_changed &&
7788 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
7791 if (!new_crtc_state->enable)
7794 ret = drm_atomic_add_affected_connectors(state, crtc);
7798 ret = drm_atomic_add_affected_planes(state, crtc);
7804 * Add all primary and overlay planes on the CRTC to the state
7805 * whenever a plane is enabled to maintain correct z-ordering
7806 * and to enable fast surface updates.
7808 drm_for_each_crtc(crtc, dev) {
7809 bool modified = false;
7811 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7812 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7815 if (new_plane_state->crtc == crtc ||
7816 old_plane_state->crtc == crtc) {
7825 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
7826 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7830 drm_atomic_get_plane_state(state, plane);
7832 if (IS_ERR(new_plane_state)) {
7833 ret = PTR_ERR(new_plane_state);
7839 /* Remove exiting planes if they are modified */
7840 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7841 ret = dm_update_plane_state(dc, state, plane,
7845 &lock_and_validation_needed);
7850 /* Disable all crtcs which require disable */
7851 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7852 ret = dm_update_crtc_state(&adev->dm, state, crtc,
7856 &lock_and_validation_needed);
7861 /* Enable all crtcs which require enable */
7862 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7863 ret = dm_update_crtc_state(&adev->dm, state, crtc,
7867 &lock_and_validation_needed);
7872 /* Add new/modified planes */
7873 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7874 ret = dm_update_plane_state(dc, state, plane,
7878 &lock_and_validation_needed);
7883 /* Run this here since we want to validate the streams we created */
7884 ret = drm_atomic_helper_check_planes(dev, state);
7888 /* Perform validation of MST topology in the state*/
7889 ret = drm_dp_mst_atomic_check(state);
7893 if (state->legacy_cursor_update) {
7895 * This is a fast cursor update coming from the plane update
7896 * helper, check if it can be done asynchronously for better
7899 state->async_update =
7900 !drm_atomic_helper_async_check(dev, state);
7903 * Skip the remaining global validation if this is an async
7904 * update. Cursor updates can be done without affecting
7905 * state or bandwidth calcs and this avoids the performance
7906 * penalty of locking the private state object and
7907 * allocating a new dc_state.
7909 if (state->async_update)
7913 /* Check scaling and underscan changes*/
7914 /* TODO Removed scaling changes validation due to inability to commit
7915 * new stream into context w\o causing full reset. Need to
7916 * decide how to handle.
7918 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7919 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7920 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7921 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7923 /* Skip any modesets/resets */
7924 if (!acrtc || drm_atomic_crtc_needs_modeset(
7925 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
7928 /* Skip any thing not scale or underscan changes */
7929 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
7932 overall_update_type = UPDATE_TYPE_FULL;
7933 lock_and_validation_needed = true;
7936 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
7940 if (overall_update_type < update_type)
7941 overall_update_type = update_type;
7944 * lock_and_validation_needed was an old way to determine if we need to set
7945 * the global lock. Leaving it in to check if we broke any corner cases
7946 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
7947 * lock_and_validation_needed false = UPDATE_TYPE_FAST
7949 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
7950 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
7952 if (overall_update_type > UPDATE_TYPE_FAST) {
7953 ret = dm_atomic_get_state(state, &dm_state);
7957 ret = do_aquire_global_lock(dev, state);
7961 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
7967 * The commit is a fast update. Fast updates shouldn't change
7968 * the DC context, affect global validation, and can have their
7969 * commit work done in parallel with other commits not touching
7970 * the same resource. If we have a new DC context as part of
7971 * the DM atomic state from validation we need to free it and
7972 * retain the existing one instead.
7974 struct dm_atomic_state *new_dm_state, *old_dm_state;
7976 new_dm_state = dm_atomic_get_new_state(state);
7977 old_dm_state = dm_atomic_get_old_state(state);
7979 if (new_dm_state && old_dm_state) {
7980 if (new_dm_state->context)
7981 dc_release_state(new_dm_state->context);
7983 new_dm_state->context = old_dm_state->context;
7985 if (old_dm_state->context)
7986 dc_retain_state(old_dm_state->context);
7990 /* Store the overall update type for use later in atomic check. */
7991 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
7992 struct dm_crtc_state *dm_new_crtc_state =
7993 to_dm_crtc_state(new_crtc_state);
7995 dm_new_crtc_state->update_type = (int)overall_update_type;
7998 /* Must be success */
8003 if (ret == -EDEADLK)
8004 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8005 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8006 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8008 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8013 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8014 struct amdgpu_dm_connector *amdgpu_dm_connector)
8017 bool capable = false;
8019 if (amdgpu_dm_connector->dc_link &&
8020 dm_helpers_dp_read_dpcd(
8022 amdgpu_dm_connector->dc_link,
8023 DP_DOWN_STREAM_PORT_COUNT,
8025 sizeof(dpcd_data))) {
8026 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8031 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8035 bool edid_check_required;
8036 struct detailed_timing *timing;
8037 struct detailed_non_pixel *data;
8038 struct detailed_data_monitor_range *range;
8039 struct amdgpu_dm_connector *amdgpu_dm_connector =
8040 to_amdgpu_dm_connector(connector);
8041 struct dm_connector_state *dm_con_state = NULL;
8043 struct drm_device *dev = connector->dev;
8044 struct amdgpu_device *adev = dev->dev_private;
8045 bool freesync_capable = false;
8047 if (!connector->state) {
8048 DRM_ERROR("%s - Connector has no state", __func__);
8053 dm_con_state = to_dm_connector_state(connector->state);
8055 amdgpu_dm_connector->min_vfreq = 0;
8056 amdgpu_dm_connector->max_vfreq = 0;
8057 amdgpu_dm_connector->pixel_clock_mhz = 0;
8062 dm_con_state = to_dm_connector_state(connector->state);
8064 edid_check_required = false;
8065 if (!amdgpu_dm_connector->dc_sink) {
8066 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8069 if (!adev->dm.freesync_module)
8072 * if edid non zero restrict freesync only for dp and edp
8075 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8076 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8077 edid_check_required = is_dp_capable_without_timing_msa(
8079 amdgpu_dm_connector);
8082 if (edid_check_required == true && (edid->version > 1 ||
8083 (edid->version == 1 && edid->revision > 1))) {
8084 for (i = 0; i < 4; i++) {
8086 timing = &edid->detailed_timings[i];
8087 data = &timing->data.other_data;
8088 range = &data->data.range;
8090 * Check if monitor has continuous frequency mode
8092 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8095 * Check for flag range limits only. If flag == 1 then
8096 * no additional timing information provided.
8097 * Default GTF, GTF Secondary curve and CVT are not
8100 if (range->flags != 1)
8103 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8104 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8105 amdgpu_dm_connector->pixel_clock_mhz =
8106 range->pixel_clock_mhz * 10;
8110 if (amdgpu_dm_connector->max_vfreq -
8111 amdgpu_dm_connector->min_vfreq > 10) {
8113 freesync_capable = true;
8119 dm_con_state->freesync_capable = freesync_capable;
8121 if (connector->vrr_capable_property)
8122 drm_connector_set_vrr_capable_property(connector,
8126 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8128 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8130 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8132 if (link->type == dc_connection_none)
8134 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8135 dpcd_data, sizeof(dpcd_data))) {
8136 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8137 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8142 * amdgpu_dm_link_setup_psr() - configure psr link
8143 * @stream: stream state
8145 * Return: true if success
8147 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8149 struct dc_link *link = NULL;
8150 struct psr_config psr_config = {0};
8151 struct psr_context psr_context = {0};
8152 struct dc *dc = NULL;
8158 link = stream->link;
8161 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8163 if (psr_config.psr_version > 0) {
8164 psr_config.psr_exit_link_training_required = 0x1;
8165 psr_config.psr_frame_capture_indication_req = 0;
8166 psr_config.psr_rfb_setup_time = 0x37;
8167 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8168 psr_config.allow_smu_optimizations = 0x0;
8170 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8173 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8179 * amdgpu_dm_psr_enable() - enable psr f/w
8180 * @stream: stream state
8182 * Return: true if success
8184 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8186 struct dc_link *link = stream->link;
8187 struct dc_static_screen_events triggers = {0};
8189 DRM_DEBUG_DRIVER("Enabling psr...\n");
8191 triggers.cursor_update = true;
8192 triggers.overlay_update = true;
8193 triggers.surface_update = true;
8195 dc_stream_set_static_screen_events(link->ctx->dc,
8199 return dc_link_set_psr_allow_active(link, true, false);
8203 * amdgpu_dm_psr_disable() - disable psr f/w
8204 * @stream: stream state
8206 * Return: true if success
8208 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8211 DRM_DEBUG_DRIVER("Disabling psr...\n");
8213 return dc_link_set_psr_allow_active(stream->link, false, true);