2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
101 /* Number of bytes in PSP header for firmware. */
102 #define PSP_HEADER_BYTES 0x100
104 /* Number of bytes in PSP footer for firmware. */
105 #define PSP_FOOTER_BYTES 0x100
110 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
111 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
112 * requests into DC requests, and DC responses into DRM responses.
114 * The root control structure is &struct amdgpu_display_manager.
117 /* basic init/fini API */
118 static int amdgpu_dm_init(struct amdgpu_device *adev);
119 static void amdgpu_dm_fini(struct amdgpu_device *adev);
122 * initializes drm_device display related structures, based on the information
123 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
124 * drm_encoder, drm_mode_config
126 * Returns 0 on success
128 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
129 /* removes and deallocates the drm structures, created by the above function */
130 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
133 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
145 struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
161 static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
185 if (crtc >= adev->mode_info.num_crtc)
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state->stream,
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
238 static bool dm_is_idle(void *handle)
244 static int dm_wait_for_idle(void *handle)
250 static bool dm_check_soft_reset(void *handle)
255 static int dm_soft_reset(void *handle)
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
269 if (otg_inst == -1) {
271 return adev->mode_info.crtcs[0];
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
277 if (amdgpu_crtc->otg_inst == otg_inst)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params)
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
387 amdgpu_crtc->crtc_id);
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
397 static void dm_vupdate_high_irq(void *interrupt_params)
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
410 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
411 amdgpu_dm_vrr_active(acrtc_state));
413 /* Core vblank handling is done here after end of front-porch in
414 * vrr mode, as vblank timestamping will give valid results
415 * while now done after front-porch. This will also deliver
416 * page-flip completion events that have been queued to us
417 * if a pageflip happened inside front-porch.
419 if (amdgpu_dm_vrr_active(acrtc_state)) {
420 drm_crtc_handle_vblank(&acrtc->base);
422 /* BTR processing for pre-DCE12 ASICs */
423 if (acrtc_state->stream &&
424 adev->family < AMDGPU_FAMILY_AI) {
425 spin_lock_irqsave(&adev->ddev->event_lock, flags);
426 mod_freesync_handle_v_update(
427 adev->dm.freesync_module,
429 &acrtc_state->vrr_params);
431 dc_stream_adjust_vmin_vmax(
434 &acrtc_state->vrr_params.adjust);
435 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
442 * dm_crtc_high_irq() - Handles CRTC interrupt
443 * @interrupt_params: ignored
445 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
448 static void dm_crtc_high_irq(void *interrupt_params)
450 struct common_irq_params *irq_params = interrupt_params;
451 struct amdgpu_device *adev = irq_params->adev;
452 struct amdgpu_crtc *acrtc;
453 struct dm_crtc_state *acrtc_state;
456 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
459 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
462 amdgpu_dm_vrr_active(acrtc_state));
464 /* Core vblank handling at start of front-porch is only possible
465 * in non-vrr mode, as only there vblank timestamping will give
466 * valid results while done in front-porch. Otherwise defer it
467 * to dm_vupdate_high_irq after end of front-porch.
469 if (!amdgpu_dm_vrr_active(acrtc_state))
470 drm_crtc_handle_vblank(&acrtc->base);
472 /* Following stuff must happen at start of vblank, for crc
473 * computation and below-the-range btr support in vrr mode.
475 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
477 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
478 acrtc_state->vrr_params.supported &&
479 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
480 spin_lock_irqsave(&adev->ddev->event_lock, flags);
481 mod_freesync_handle_v_update(
482 adev->dm.freesync_module,
484 &acrtc_state->vrr_params);
486 dc_stream_adjust_vmin_vmax(
489 &acrtc_state->vrr_params.adjust);
490 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
495 #if defined(CONFIG_DRM_AMD_DC_DCN)
497 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
498 * @interrupt params - interrupt parameters
500 * Notify DRM's vblank event handler at VSTARTUP
502 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
503 * * We are close enough to VUPDATE - the point of no return for hw
504 * * We are in the fixed portion of variable front porch when vrr is enabled
505 * * We are before VUPDATE, where double-buffered vrr registers are swapped
507 * It is therefore the correct place to signal vblank, send user flip events,
510 static void dm_dcn_crtc_high_irq(void *interrupt_params)
512 struct common_irq_params *irq_params = interrupt_params;
513 struct amdgpu_device *adev = irq_params->adev;
514 struct amdgpu_crtc *acrtc;
515 struct dm_crtc_state *acrtc_state;
518 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
523 acrtc_state = to_dm_crtc_state(acrtc->base.state);
525 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
526 amdgpu_dm_vrr_active(acrtc_state));
528 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
529 drm_crtc_handle_vblank(&acrtc->base);
531 spin_lock_irqsave(&adev->ddev->event_lock, flags);
533 if (acrtc_state->vrr_params.supported &&
534 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
535 mod_freesync_handle_v_update(
536 adev->dm.freesync_module,
538 &acrtc_state->vrr_params);
540 dc_stream_adjust_vmin_vmax(
543 &acrtc_state->vrr_params.adjust);
546 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
548 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
550 drm_crtc_vblank_put(&acrtc->base);
552 acrtc->pflip_status = AMDGPU_FLIP_NONE;
555 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
559 static int dm_set_clockgating_state(void *handle,
560 enum amd_clockgating_state state)
565 static int dm_set_powergating_state(void *handle,
566 enum amd_powergating_state state)
571 /* Prototypes of private functions */
572 static int dm_early_init(void* handle);
574 /* Allocate memory for FBC compressed data */
575 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
577 struct drm_device *dev = connector->dev;
578 struct amdgpu_device *adev = dev->dev_private;
579 struct dm_comressor_info *compressor = &adev->dm.compressor;
580 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
581 struct drm_display_mode *mode;
582 unsigned long max_size = 0;
584 if (adev->dm.dc->fbc_compressor == NULL)
587 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
590 if (compressor->bo_ptr)
594 list_for_each_entry(mode, &connector->modes, head) {
595 if (max_size < mode->htotal * mode->vtotal)
596 max_size = mode->htotal * mode->vtotal;
600 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
601 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
602 &compressor->gpu_addr, &compressor->cpu_addr);
605 DRM_ERROR("DM: Failed to initialize FBC\n");
607 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
608 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
615 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
616 int pipe, bool *enabled,
617 unsigned char *buf, int max_bytes)
619 struct drm_device *dev = dev_get_drvdata(kdev);
620 struct amdgpu_device *adev = dev->dev_private;
621 struct drm_connector *connector;
622 struct drm_connector_list_iter conn_iter;
623 struct amdgpu_dm_connector *aconnector;
628 mutex_lock(&adev->dm.audio_lock);
630 drm_connector_list_iter_begin(dev, &conn_iter);
631 drm_for_each_connector_iter(connector, &conn_iter) {
632 aconnector = to_amdgpu_dm_connector(connector);
633 if (aconnector->audio_inst != port)
637 ret = drm_eld_size(connector->eld);
638 memcpy(buf, connector->eld, min(max_bytes, ret));
642 drm_connector_list_iter_end(&conn_iter);
644 mutex_unlock(&adev->dm.audio_lock);
646 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
651 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
652 .get_eld = amdgpu_dm_audio_component_get_eld,
655 static int amdgpu_dm_audio_component_bind(struct device *kdev,
656 struct device *hda_kdev, void *data)
658 struct drm_device *dev = dev_get_drvdata(kdev);
659 struct amdgpu_device *adev = dev->dev_private;
660 struct drm_audio_component *acomp = data;
662 acomp->ops = &amdgpu_dm_audio_component_ops;
664 adev->dm.audio_component = acomp;
669 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
670 struct device *hda_kdev, void *data)
672 struct drm_device *dev = dev_get_drvdata(kdev);
673 struct amdgpu_device *adev = dev->dev_private;
674 struct drm_audio_component *acomp = data;
678 adev->dm.audio_component = NULL;
681 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
682 .bind = amdgpu_dm_audio_component_bind,
683 .unbind = amdgpu_dm_audio_component_unbind,
686 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
693 adev->mode_info.audio.enabled = true;
695 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
697 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
698 adev->mode_info.audio.pin[i].channels = -1;
699 adev->mode_info.audio.pin[i].rate = -1;
700 adev->mode_info.audio.pin[i].bits_per_sample = -1;
701 adev->mode_info.audio.pin[i].status_bits = 0;
702 adev->mode_info.audio.pin[i].category_code = 0;
703 adev->mode_info.audio.pin[i].connected = false;
704 adev->mode_info.audio.pin[i].id =
705 adev->dm.dc->res_pool->audios[i]->inst;
706 adev->mode_info.audio.pin[i].offset = 0;
709 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
713 adev->dm.audio_registered = true;
718 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
723 if (!adev->mode_info.audio.enabled)
726 if (adev->dm.audio_registered) {
727 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
728 adev->dm.audio_registered = false;
731 /* TODO: Disable audio? */
733 adev->mode_info.audio.enabled = false;
736 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
738 struct drm_audio_component *acomp = adev->dm.audio_component;
740 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
741 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
743 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
748 static int dm_dmub_hw_init(struct amdgpu_device *adev)
750 const struct dmcub_firmware_header_v1_0 *hdr;
751 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
752 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
753 const struct firmware *dmub_fw = adev->dm.dmub_fw;
754 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
755 struct abm *abm = adev->dm.dc->res_pool->abm;
756 struct dmub_srv_hw_params hw_params;
757 enum dmub_status status;
758 const unsigned char *fw_inst_const, *fw_bss_data;
759 uint32_t i, fw_inst_const_size, fw_bss_data_size;
763 /* DMUB isn't supported on the ASIC. */
767 DRM_ERROR("No framebuffer info for DMUB service.\n");
772 /* Firmware required for DMUB support. */
773 DRM_ERROR("No firmware provided for DMUB.\n");
777 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
778 if (status != DMUB_STATUS_OK) {
779 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
783 if (!has_hw_support) {
784 DRM_INFO("DMUB unsupported on ASIC\n");
788 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
790 fw_inst_const = dmub_fw->data +
791 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
794 fw_bss_data = dmub_fw->data +
795 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
796 le32_to_cpu(hdr->inst_const_bytes);
798 /* Copy firmware and bios info into FB memory. */
799 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
800 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
802 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
804 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
806 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
808 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
811 /* Reset regions that need to be reset. */
812 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
813 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
815 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
816 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
818 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
819 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
821 /* Initialize hardware. */
822 memset(&hw_params, 0, sizeof(hw_params));
823 hw_params.fb_base = adev->gmc.fb_start;
824 hw_params.fb_offset = adev->gmc.aper_base;
827 hw_params.psp_version = dmcu->psp_version;
829 for (i = 0; i < fb_info->num_fb; ++i)
830 hw_params.fb[i] = &fb_info->fb[i];
832 status = dmub_srv_hw_init(dmub_srv, &hw_params);
833 if (status != DMUB_STATUS_OK) {
834 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
838 /* Wait for firmware load to finish. */
839 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
840 if (status != DMUB_STATUS_OK)
841 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
843 /* Init DMCU and ABM if available. */
845 dmcu->funcs->dmcu_init(dmcu);
846 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
849 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
850 if (!adev->dm.dc->ctx->dmub_srv) {
851 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
855 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
856 adev->dm.dmcub_fw_version);
861 static int amdgpu_dm_init(struct amdgpu_device *adev)
863 struct dc_init_data init_data;
864 #ifdef CONFIG_DRM_AMD_DC_HDCP
865 struct dc_callback_init init_params;
869 adev->dm.ddev = adev->ddev;
870 adev->dm.adev = adev;
872 /* Zero all the fields */
873 memset(&init_data, 0, sizeof(init_data));
874 #ifdef CONFIG_DRM_AMD_DC_HDCP
875 memset(&init_params, 0, sizeof(init_params));
878 mutex_init(&adev->dm.dc_lock);
879 mutex_init(&adev->dm.audio_lock);
881 if(amdgpu_dm_irq_init(adev)) {
882 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
886 init_data.asic_id.chip_family = adev->family;
888 init_data.asic_id.pci_revision_id = adev->rev_id;
889 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
891 init_data.asic_id.vram_width = adev->gmc.vram_width;
892 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
893 init_data.asic_id.atombios_base_address =
894 adev->mode_info.atom_context->bios;
896 init_data.driver = adev;
898 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
900 if (!adev->dm.cgs_device) {
901 DRM_ERROR("amdgpu: failed to create cgs device.\n");
905 init_data.cgs_device = adev->dm.cgs_device;
907 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
909 switch (adev->asic_type) {
914 init_data.flags.gpu_vm_support = true;
920 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
921 init_data.flags.fbc_support = true;
923 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
924 init_data.flags.multi_mon_pp_mclk_switch = true;
926 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
927 init_data.flags.disable_fractional_pwm = true;
929 init_data.flags.power_down_display_on_boot = true;
931 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
933 /* Display Core create. */
934 adev->dm.dc = dc_create(&init_data);
937 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
939 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
943 r = dm_dmub_hw_init(adev);
945 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
949 dc_hardware_init(adev->dm.dc);
951 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
952 if (!adev->dm.freesync_module) {
954 "amdgpu: failed to initialize freesync_module.\n");
956 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
957 adev->dm.freesync_module);
959 amdgpu_dm_init_color_mod();
961 #ifdef CONFIG_DRM_AMD_DC_HDCP
962 if (adev->asic_type >= CHIP_RAVEN) {
963 adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
965 if (!adev->dm.hdcp_workqueue)
966 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970 dc_init_callbacks(adev->dm.dc, &init_params);
973 if (amdgpu_dm_initialize_drm_device(adev)) {
975 "amdgpu: failed to initialize sw for display support.\n");
979 /* Update the actual used number of crtc */
980 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982 /* TODO: Add_display_info? */
984 /* TODO use dynamic cursor width */
985 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
986 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
988 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
990 "amdgpu: failed to initialize sw for display support.\n");
994 #if defined(CONFIG_DEBUG_FS)
995 if (dtn_debugfs_init(adev))
996 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
999 DRM_DEBUG_DRIVER("KMS initialized.\n");
1003 amdgpu_dm_fini(adev);
1008 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1010 amdgpu_dm_audio_fini(adev);
1012 amdgpu_dm_destroy_drm_device(&adev->dm);
1014 #ifdef CONFIG_DRM_AMD_DC_HDCP
1015 if (adev->dm.hdcp_workqueue) {
1016 hdcp_destroy(adev->dm.hdcp_workqueue);
1017 adev->dm.hdcp_workqueue = NULL;
1021 dc_deinit_callbacks(adev->dm.dc);
1023 if (adev->dm.dc->ctx->dmub_srv) {
1024 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1025 adev->dm.dc->ctx->dmub_srv = NULL;
1028 if (adev->dm.dmub_bo)
1029 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1030 &adev->dm.dmub_bo_gpu_addr,
1031 &adev->dm.dmub_bo_cpu_addr);
1033 /* DC Destroy TODO: Replace destroy DAL */
1035 dc_destroy(&adev->dm.dc);
1037 * TODO: pageflip, vlank interrupt
1039 * amdgpu_dm_irq_fini(adev);
1042 if (adev->dm.cgs_device) {
1043 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1044 adev->dm.cgs_device = NULL;
1046 if (adev->dm.freesync_module) {
1047 mod_freesync_destroy(adev->dm.freesync_module);
1048 adev->dm.freesync_module = NULL;
1051 mutex_destroy(&adev->dm.audio_lock);
1052 mutex_destroy(&adev->dm.dc_lock);
1057 static int load_dmcu_fw(struct amdgpu_device *adev)
1059 const char *fw_name_dmcu = NULL;
1061 const struct dmcu_firmware_header_v1_0 *hdr;
1063 switch(adev->asic_type) {
1073 case CHIP_POLARIS11:
1074 case CHIP_POLARIS10:
1075 case CHIP_POLARIS12:
1086 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1087 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1088 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1089 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1094 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1098 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1099 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1103 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1105 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1106 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1107 adev->dm.fw_dmcu = NULL;
1111 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1116 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1118 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1120 release_firmware(adev->dm.fw_dmcu);
1121 adev->dm.fw_dmcu = NULL;
1125 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1126 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1127 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1128 adev->firmware.fw_size +=
1129 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1131 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1132 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1133 adev->firmware.fw_size +=
1134 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1136 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1138 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1143 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1145 struct amdgpu_device *adev = ctx;
1147 return dm_read_reg(adev->dm.dc->ctx, address);
1150 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1153 struct amdgpu_device *adev = ctx;
1155 return dm_write_reg(adev->dm.dc->ctx, address, value);
1158 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1160 struct dmub_srv_create_params create_params;
1161 struct dmub_srv_region_params region_params;
1162 struct dmub_srv_region_info region_info;
1163 struct dmub_srv_fb_params fb_params;
1164 struct dmub_srv_fb_info *fb_info;
1165 struct dmub_srv *dmub_srv;
1166 const struct dmcub_firmware_header_v1_0 *hdr;
1167 const char *fw_name_dmub;
1168 enum dmub_asic dmub_asic;
1169 enum dmub_status status;
1172 switch (adev->asic_type) {
1174 dmub_asic = DMUB_ASIC_DCN21;
1175 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1179 /* ASIC doesn't support DMUB. */
1183 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1185 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1189 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1191 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1195 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1196 DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
1200 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1201 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1202 AMDGPU_UCODE_ID_DMCUB;
1203 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
1204 adev->firmware.fw_size +=
1205 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1207 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1209 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1210 adev->dm.dmcub_fw_version);
1212 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1213 dmub_srv = adev->dm.dmub_srv;
1216 DRM_ERROR("Failed to allocate DMUB service!\n");
1220 memset(&create_params, 0, sizeof(create_params));
1221 create_params.user_ctx = adev;
1222 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1223 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1224 create_params.asic = dmub_asic;
1226 /* Create the DMUB service. */
1227 status = dmub_srv_create(dmub_srv, &create_params);
1228 if (status != DMUB_STATUS_OK) {
1229 DRM_ERROR("Error creating DMUB service: %d\n", status);
1233 /* Calculate the size of all the regions for the DMUB service. */
1234 memset(®ion_params, 0, sizeof(region_params));
1236 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1237 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1238 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1239 region_params.vbios_size = adev->bios_size;
1240 region_params.fw_bss_data =
1241 adev->dm.dmub_fw->data +
1242 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1243 le32_to_cpu(hdr->inst_const_bytes);
1245 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1248 if (status != DMUB_STATUS_OK) {
1249 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1254 * Allocate a framebuffer based on the total size of all the regions.
1255 * TODO: Move this into GART.
1257 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1258 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1259 &adev->dm.dmub_bo_gpu_addr,
1260 &adev->dm.dmub_bo_cpu_addr);
1264 /* Rebase the regions on the framebuffer address. */
1265 memset(&fb_params, 0, sizeof(fb_params));
1266 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1267 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1268 fb_params.region_info = ®ion_info;
1270 adev->dm.dmub_fb_info =
1271 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1272 fb_info = adev->dm.dmub_fb_info;
1276 "Failed to allocate framebuffer info for DMUB service!\n");
1280 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1281 if (status != DMUB_STATUS_OK) {
1282 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1289 static int dm_sw_init(void *handle)
1291 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294 r = dm_dmub_sw_init(adev);
1298 return load_dmcu_fw(adev);
1301 static int dm_sw_fini(void *handle)
1303 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305 kfree(adev->dm.dmub_fb_info);
1306 adev->dm.dmub_fb_info = NULL;
1308 if (adev->dm.dmub_srv) {
1309 dmub_srv_destroy(adev->dm.dmub_srv);
1310 adev->dm.dmub_srv = NULL;
1313 if (adev->dm.dmub_fw) {
1314 release_firmware(adev->dm.dmub_fw);
1315 adev->dm.dmub_fw = NULL;
1318 if(adev->dm.fw_dmcu) {
1319 release_firmware(adev->dm.fw_dmcu);
1320 adev->dm.fw_dmcu = NULL;
1326 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1328 struct amdgpu_dm_connector *aconnector;
1329 struct drm_connector *connector;
1330 struct drm_connector_list_iter iter;
1333 drm_connector_list_iter_begin(dev, &iter);
1334 drm_for_each_connector_iter(connector, &iter) {
1335 aconnector = to_amdgpu_dm_connector(connector);
1336 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1337 aconnector->mst_mgr.aux) {
1338 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1340 aconnector->base.base.id);
1342 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1344 DRM_ERROR("DM_MST: Failed to start MST\n");
1345 aconnector->dc_link->type =
1346 dc_connection_single;
1351 drm_connector_list_iter_end(&iter);
1356 static int dm_late_init(void *handle)
1358 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1360 struct dmcu_iram_parameters params;
1361 unsigned int linear_lut[16];
1363 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1366 for (i = 0; i < 16; i++)
1367 linear_lut[i] = 0xFFFF * i / 15;
1370 params.backlight_ramping_start = 0xCCCC;
1371 params.backlight_ramping_reduction = 0xCCCCCCCC;
1372 params.backlight_lut_array_size = 16;
1373 params.backlight_lut_array = linear_lut;
1375 /* Min backlight level after ABM reduction, Don't allow below 1%
1376 * 0xFFFF x 0.01 = 0x28F
1378 params.min_abm_backlight = 0x28F;
1380 /* todo will enable for navi10 */
1381 if (adev->asic_type <= CHIP_RAVEN) {
1382 ret = dmcu_load_iram(dmcu, params);
1388 return detect_mst_link_for_all_connectors(adev->ddev);
1391 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1393 struct amdgpu_dm_connector *aconnector;
1394 struct drm_connector *connector;
1395 struct drm_connector_list_iter iter;
1396 struct drm_dp_mst_topology_mgr *mgr;
1398 bool need_hotplug = false;
1400 drm_connector_list_iter_begin(dev, &iter);
1401 drm_for_each_connector_iter(connector, &iter) {
1402 aconnector = to_amdgpu_dm_connector(connector);
1403 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1404 aconnector->mst_port)
1407 mgr = &aconnector->mst_mgr;
1410 drm_dp_mst_topology_mgr_suspend(mgr);
1412 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1414 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1415 need_hotplug = true;
1419 drm_connector_list_iter_end(&iter);
1422 drm_kms_helper_hotplug_event(dev);
1425 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1427 struct smu_context *smu = &adev->smu;
1430 if (!is_support_sw_smu(adev))
1433 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1434 * on window driver dc implementation.
1435 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1436 * should be passed to smu during boot up and resume from s3.
1437 * boot up: dc calculate dcn watermark clock settings within dc_create,
1438 * dcn20_resource_construct
1439 * then call pplib functions below to pass the settings to smu:
1440 * smu_set_watermarks_for_clock_ranges
1441 * smu_set_watermarks_table
1442 * navi10_set_watermarks_table
1443 * smu_write_watermarks_table
1445 * For Renoir, clock settings of dcn watermark are also fixed values.
1446 * dc has implemented different flow for window driver:
1447 * dc_hardware_init / dc_set_power_state
1452 * smu_set_watermarks_for_clock_ranges
1453 * renoir_set_watermarks_table
1454 * smu_write_watermarks_table
1457 * dc_hardware_init -> amdgpu_dm_init
1458 * dc_set_power_state --> dm_resume
1460 * therefore, this function apply to navi10/12/14 but not Renoir
1463 switch(adev->asic_type) {
1472 mutex_lock(&smu->mutex);
1474 /* pass data to smu controller */
1475 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1476 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1477 ret = smu_write_watermarks_table(smu);
1480 mutex_unlock(&smu->mutex);
1481 DRM_ERROR("Failed to update WMTABLE!\n");
1484 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1487 mutex_unlock(&smu->mutex);
1493 * dm_hw_init() - Initialize DC device
1494 * @handle: The base driver device containing the amdgpu_dm device.
1496 * Initialize the &struct amdgpu_display_manager device. This involves calling
1497 * the initializers of each DM component, then populating the struct with them.
1499 * Although the function implies hardware initialization, both hardware and
1500 * software are initialized here. Splitting them out to their relevant init
1501 * hooks is a future TODO item.
1503 * Some notable things that are initialized here:
1505 * - Display Core, both software and hardware
1506 * - DC modules that we need (freesync and color management)
1507 * - DRM software states
1508 * - Interrupt sources and handlers
1510 * - Debug FS entries, if enabled
1512 static int dm_hw_init(void *handle)
1514 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1515 /* Create DAL display manager */
1516 amdgpu_dm_init(adev);
1517 amdgpu_dm_hpd_init(adev);
1523 * dm_hw_fini() - Teardown DC device
1524 * @handle: The base driver device containing the amdgpu_dm device.
1526 * Teardown components within &struct amdgpu_display_manager that require
1527 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1528 * were loaded. Also flush IRQ workqueues and disable them.
1530 static int dm_hw_fini(void *handle)
1532 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1534 amdgpu_dm_hpd_fini(adev);
1536 amdgpu_dm_irq_fini(adev);
1537 amdgpu_dm_fini(adev);
1541 static int dm_suspend(void *handle)
1543 struct amdgpu_device *adev = handle;
1544 struct amdgpu_display_manager *dm = &adev->dm;
1547 WARN_ON(adev->dm.cached_state);
1548 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1550 s3_handle_mst(adev->ddev, true);
1552 amdgpu_dm_irq_suspend(adev);
1555 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1560 static struct amdgpu_dm_connector *
1561 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1562 struct drm_crtc *crtc)
1565 struct drm_connector_state *new_con_state;
1566 struct drm_connector *connector;
1567 struct drm_crtc *crtc_from_state;
1569 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1570 crtc_from_state = new_con_state->crtc;
1572 if (crtc_from_state == crtc)
1573 return to_amdgpu_dm_connector(connector);
1579 static void emulated_link_detect(struct dc_link *link)
1581 struct dc_sink_init_data sink_init_data = { 0 };
1582 struct display_sink_capability sink_caps = { 0 };
1583 enum dc_edid_status edid_status;
1584 struct dc_context *dc_ctx = link->ctx;
1585 struct dc_sink *sink = NULL;
1586 struct dc_sink *prev_sink = NULL;
1588 link->type = dc_connection_none;
1589 prev_sink = link->local_sink;
1591 if (prev_sink != NULL)
1592 dc_sink_retain(prev_sink);
1594 switch (link->connector_signal) {
1595 case SIGNAL_TYPE_HDMI_TYPE_A: {
1596 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1597 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1601 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1602 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1603 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1607 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1608 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1609 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1613 case SIGNAL_TYPE_LVDS: {
1614 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1615 sink_caps.signal = SIGNAL_TYPE_LVDS;
1619 case SIGNAL_TYPE_EDP: {
1620 sink_caps.transaction_type =
1621 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1622 sink_caps.signal = SIGNAL_TYPE_EDP;
1626 case SIGNAL_TYPE_DISPLAY_PORT: {
1627 sink_caps.transaction_type =
1628 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1629 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1634 DC_ERROR("Invalid connector type! signal:%d\n",
1635 link->connector_signal);
1639 sink_init_data.link = link;
1640 sink_init_data.sink_signal = sink_caps.signal;
1642 sink = dc_sink_create(&sink_init_data);
1644 DC_ERROR("Failed to create sink!\n");
1648 /* dc_sink_create returns a new reference */
1649 link->local_sink = sink;
1651 edid_status = dm_helpers_read_local_edid(
1656 if (edid_status != EDID_OK)
1657 DC_ERROR("Failed to read EDID");
1661 static int dm_resume(void *handle)
1663 struct amdgpu_device *adev = handle;
1664 struct drm_device *ddev = adev->ddev;
1665 struct amdgpu_display_manager *dm = &adev->dm;
1666 struct amdgpu_dm_connector *aconnector;
1667 struct drm_connector *connector;
1668 struct drm_connector_list_iter iter;
1669 struct drm_crtc *crtc;
1670 struct drm_crtc_state *new_crtc_state;
1671 struct dm_crtc_state *dm_new_crtc_state;
1672 struct drm_plane *plane;
1673 struct drm_plane_state *new_plane_state;
1674 struct dm_plane_state *dm_new_plane_state;
1675 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1676 enum dc_connection_type new_connection_type = dc_connection_none;
1679 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1680 dc_release_state(dm_state->context);
1681 dm_state->context = dc_create_state(dm->dc);
1682 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1683 dc_resource_state_construct(dm->dc, dm_state->context);
1685 /* Before powering on DC we need to re-initialize DMUB. */
1686 r = dm_dmub_hw_init(adev);
1688 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1690 /* power on hardware */
1691 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1693 /* program HPD filter */
1697 * early enable HPD Rx IRQ, should be done before set mode as short
1698 * pulse interrupts are used for MST
1700 amdgpu_dm_irq_resume_early(adev);
1702 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1703 s3_handle_mst(ddev, false);
1706 drm_connector_list_iter_begin(ddev, &iter);
1707 drm_for_each_connector_iter(connector, &iter) {
1708 aconnector = to_amdgpu_dm_connector(connector);
1711 * this is the case when traversing through already created
1712 * MST connectors, should be skipped
1714 if (aconnector->mst_port)
1717 mutex_lock(&aconnector->hpd_lock);
1718 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1719 DRM_ERROR("KMS: Failed to detect connector\n");
1721 if (aconnector->base.force && new_connection_type == dc_connection_none)
1722 emulated_link_detect(aconnector->dc_link);
1724 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1726 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1727 aconnector->fake_enable = false;
1729 if (aconnector->dc_sink)
1730 dc_sink_release(aconnector->dc_sink);
1731 aconnector->dc_sink = NULL;
1732 amdgpu_dm_update_connector_after_detect(aconnector);
1733 mutex_unlock(&aconnector->hpd_lock);
1735 drm_connector_list_iter_end(&iter);
1737 /* Force mode set in atomic commit */
1738 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1739 new_crtc_state->active_changed = true;
1742 * atomic_check is expected to create the dc states. We need to release
1743 * them here, since they were duplicated as part of the suspend
1746 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1747 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1748 if (dm_new_crtc_state->stream) {
1749 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1750 dc_stream_release(dm_new_crtc_state->stream);
1751 dm_new_crtc_state->stream = NULL;
1755 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1756 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1757 if (dm_new_plane_state->dc_state) {
1758 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1759 dc_plane_state_release(dm_new_plane_state->dc_state);
1760 dm_new_plane_state->dc_state = NULL;
1764 drm_atomic_helper_resume(ddev, dm->cached_state);
1766 dm->cached_state = NULL;
1768 amdgpu_dm_irq_resume_late(adev);
1770 amdgpu_dm_smu_write_watermarks_table(adev);
1778 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1779 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1780 * the base driver's device list to be initialized and torn down accordingly.
1782 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1785 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1787 .early_init = dm_early_init,
1788 .late_init = dm_late_init,
1789 .sw_init = dm_sw_init,
1790 .sw_fini = dm_sw_fini,
1791 .hw_init = dm_hw_init,
1792 .hw_fini = dm_hw_fini,
1793 .suspend = dm_suspend,
1794 .resume = dm_resume,
1795 .is_idle = dm_is_idle,
1796 .wait_for_idle = dm_wait_for_idle,
1797 .check_soft_reset = dm_check_soft_reset,
1798 .soft_reset = dm_soft_reset,
1799 .set_clockgating_state = dm_set_clockgating_state,
1800 .set_powergating_state = dm_set_powergating_state,
1803 const struct amdgpu_ip_block_version dm_ip_block =
1805 .type = AMD_IP_BLOCK_TYPE_DCE,
1809 .funcs = &amdgpu_dm_funcs,
1819 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1820 .fb_create = amdgpu_display_user_framebuffer_create,
1821 .output_poll_changed = drm_fb_helper_output_poll_changed,
1822 .atomic_check = amdgpu_dm_atomic_check,
1823 .atomic_commit = amdgpu_dm_atomic_commit,
1826 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1827 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1831 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1833 struct drm_connector *connector = &aconnector->base;
1834 struct drm_device *dev = connector->dev;
1835 struct dc_sink *sink;
1837 /* MST handled by drm_mst framework */
1838 if (aconnector->mst_mgr.mst_state == true)
1842 sink = aconnector->dc_link->local_sink;
1844 dc_sink_retain(sink);
1847 * Edid mgmt connector gets first update only in mode_valid hook and then
1848 * the connector sink is set to either fake or physical sink depends on link status.
1849 * Skip if already done during boot.
1851 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1852 && aconnector->dc_em_sink) {
1855 * For S3 resume with headless use eml_sink to fake stream
1856 * because on resume connector->sink is set to NULL
1858 mutex_lock(&dev->mode_config.mutex);
1861 if (aconnector->dc_sink) {
1862 amdgpu_dm_update_freesync_caps(connector, NULL);
1864 * retain and release below are used to
1865 * bump up refcount for sink because the link doesn't point
1866 * to it anymore after disconnect, so on next crtc to connector
1867 * reshuffle by UMD we will get into unwanted dc_sink release
1869 dc_sink_release(aconnector->dc_sink);
1871 aconnector->dc_sink = sink;
1872 dc_sink_retain(aconnector->dc_sink);
1873 amdgpu_dm_update_freesync_caps(connector,
1876 amdgpu_dm_update_freesync_caps(connector, NULL);
1877 if (!aconnector->dc_sink) {
1878 aconnector->dc_sink = aconnector->dc_em_sink;
1879 dc_sink_retain(aconnector->dc_sink);
1883 mutex_unlock(&dev->mode_config.mutex);
1886 dc_sink_release(sink);
1891 * TODO: temporary guard to look for proper fix
1892 * if this sink is MST sink, we should not do anything
1894 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1895 dc_sink_release(sink);
1899 if (aconnector->dc_sink == sink) {
1901 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1904 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1905 aconnector->connector_id);
1907 dc_sink_release(sink);
1911 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1912 aconnector->connector_id, aconnector->dc_sink, sink);
1914 mutex_lock(&dev->mode_config.mutex);
1917 * 1. Update status of the drm connector
1918 * 2. Send an event and let userspace tell us what to do
1922 * TODO: check if we still need the S3 mode update workaround.
1923 * If yes, put it here.
1925 if (aconnector->dc_sink)
1926 amdgpu_dm_update_freesync_caps(connector, NULL);
1928 aconnector->dc_sink = sink;
1929 dc_sink_retain(aconnector->dc_sink);
1930 if (sink->dc_edid.length == 0) {
1931 aconnector->edid = NULL;
1932 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1935 (struct edid *) sink->dc_edid.raw_edid;
1938 drm_connector_update_edid_property(connector,
1940 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1943 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1946 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1947 amdgpu_dm_update_freesync_caps(connector, NULL);
1948 drm_connector_update_edid_property(connector, NULL);
1949 aconnector->num_modes = 0;
1950 dc_sink_release(aconnector->dc_sink);
1951 aconnector->dc_sink = NULL;
1952 aconnector->edid = NULL;
1953 #ifdef CONFIG_DRM_AMD_DC_HDCP
1954 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1955 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1956 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1960 mutex_unlock(&dev->mode_config.mutex);
1963 dc_sink_release(sink);
1966 static void handle_hpd_irq(void *param)
1968 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1969 struct drm_connector *connector = &aconnector->base;
1970 struct drm_device *dev = connector->dev;
1971 enum dc_connection_type new_connection_type = dc_connection_none;
1972 #ifdef CONFIG_DRM_AMD_DC_HDCP
1973 struct amdgpu_device *adev = dev->dev_private;
1977 * In case of failure or MST no need to update connector status or notify the OS
1978 * since (for MST case) MST does this in its own context.
1980 mutex_lock(&aconnector->hpd_lock);
1982 #ifdef CONFIG_DRM_AMD_DC_HDCP
1983 if (adev->dm.hdcp_workqueue)
1984 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
1986 if (aconnector->fake_enable)
1987 aconnector->fake_enable = false;
1989 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1990 DRM_ERROR("KMS: Failed to detect connector\n");
1992 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1993 emulated_link_detect(aconnector->dc_link);
1996 drm_modeset_lock_all(dev);
1997 dm_restore_drm_connector_state(dev, connector);
1998 drm_modeset_unlock_all(dev);
2000 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2001 drm_kms_helper_hotplug_event(dev);
2003 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2004 amdgpu_dm_update_connector_after_detect(aconnector);
2007 drm_modeset_lock_all(dev);
2008 dm_restore_drm_connector_state(dev, connector);
2009 drm_modeset_unlock_all(dev);
2011 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2012 drm_kms_helper_hotplug_event(dev);
2014 mutex_unlock(&aconnector->hpd_lock);
2018 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2020 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2022 bool new_irq_handled = false;
2024 int dpcd_bytes_to_read;
2026 const int max_process_count = 30;
2027 int process_count = 0;
2029 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2031 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2032 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2033 /* DPCD 0x200 - 0x201 for downstream IRQ */
2034 dpcd_addr = DP_SINK_COUNT;
2036 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2037 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2038 dpcd_addr = DP_SINK_COUNT_ESI;
2041 dret = drm_dp_dpcd_read(
2042 &aconnector->dm_dp_aux.aux,
2045 dpcd_bytes_to_read);
2047 while (dret == dpcd_bytes_to_read &&
2048 process_count < max_process_count) {
2054 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2055 /* handle HPD short pulse irq */
2056 if (aconnector->mst_mgr.mst_state)
2058 &aconnector->mst_mgr,
2062 if (new_irq_handled) {
2063 /* ACK at DPCD to notify down stream */
2064 const int ack_dpcd_bytes_to_write =
2065 dpcd_bytes_to_read - 1;
2067 for (retry = 0; retry < 3; retry++) {
2070 wret = drm_dp_dpcd_write(
2071 &aconnector->dm_dp_aux.aux,
2074 ack_dpcd_bytes_to_write);
2075 if (wret == ack_dpcd_bytes_to_write)
2079 /* check if there is new irq to be handled */
2080 dret = drm_dp_dpcd_read(
2081 &aconnector->dm_dp_aux.aux,
2084 dpcd_bytes_to_read);
2086 new_irq_handled = false;
2092 if (process_count == max_process_count)
2093 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2096 static void handle_hpd_rx_irq(void *param)
2098 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2099 struct drm_connector *connector = &aconnector->base;
2100 struct drm_device *dev = connector->dev;
2101 struct dc_link *dc_link = aconnector->dc_link;
2102 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2103 enum dc_connection_type new_connection_type = dc_connection_none;
2104 #ifdef CONFIG_DRM_AMD_DC_HDCP
2105 union hpd_irq_data hpd_irq_data;
2106 struct amdgpu_device *adev = dev->dev_private;
2108 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2112 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2113 * conflict, after implement i2c helper, this mutex should be
2116 if (dc_link->type != dc_connection_mst_branch)
2117 mutex_lock(&aconnector->hpd_lock);
2120 #ifdef CONFIG_DRM_AMD_DC_HDCP
2121 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2123 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2125 !is_mst_root_connector) {
2126 /* Downstream Port status changed. */
2127 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2128 DRM_ERROR("KMS: Failed to detect connector\n");
2130 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2131 emulated_link_detect(dc_link);
2133 if (aconnector->fake_enable)
2134 aconnector->fake_enable = false;
2136 amdgpu_dm_update_connector_after_detect(aconnector);
2139 drm_modeset_lock_all(dev);
2140 dm_restore_drm_connector_state(dev, connector);
2141 drm_modeset_unlock_all(dev);
2143 drm_kms_helper_hotplug_event(dev);
2144 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2146 if (aconnector->fake_enable)
2147 aconnector->fake_enable = false;
2149 amdgpu_dm_update_connector_after_detect(aconnector);
2152 drm_modeset_lock_all(dev);
2153 dm_restore_drm_connector_state(dev, connector);
2154 drm_modeset_unlock_all(dev);
2156 drm_kms_helper_hotplug_event(dev);
2159 #ifdef CONFIG_DRM_AMD_DC_HDCP
2160 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2161 if (adev->dm.hdcp_workqueue)
2162 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2165 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2166 (dc_link->type == dc_connection_mst_branch))
2167 dm_handle_hpd_rx_irq(aconnector);
2169 if (dc_link->type != dc_connection_mst_branch) {
2170 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2171 mutex_unlock(&aconnector->hpd_lock);
2175 static void register_hpd_handlers(struct amdgpu_device *adev)
2177 struct drm_device *dev = adev->ddev;
2178 struct drm_connector *connector;
2179 struct amdgpu_dm_connector *aconnector;
2180 const struct dc_link *dc_link;
2181 struct dc_interrupt_params int_params = {0};
2183 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2184 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2186 list_for_each_entry(connector,
2187 &dev->mode_config.connector_list, head) {
2189 aconnector = to_amdgpu_dm_connector(connector);
2190 dc_link = aconnector->dc_link;
2192 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2193 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2194 int_params.irq_source = dc_link->irq_source_hpd;
2196 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2198 (void *) aconnector);
2201 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2203 /* Also register for DP short pulse (hpd_rx). */
2204 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2205 int_params.irq_source = dc_link->irq_source_hpd_rx;
2207 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2209 (void *) aconnector);
2214 /* Register IRQ sources and initialize IRQ callbacks */
2215 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2217 struct dc *dc = adev->dm.dc;
2218 struct common_irq_params *c_irq_params;
2219 struct dc_interrupt_params int_params = {0};
2222 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2224 if (adev->asic_type >= CHIP_VEGA10)
2225 client_id = SOC15_IH_CLIENTID_DCE;
2227 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2228 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2231 * Actions of amdgpu_irq_add_id():
2232 * 1. Register a set() function with base driver.
2233 * Base driver will call set() function to enable/disable an
2234 * interrupt in DC hardware.
2235 * 2. Register amdgpu_dm_irq_handler().
2236 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2237 * coming from DC hardware.
2238 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2239 * for acknowledging and handling. */
2241 /* Use VBLANK interrupt */
2242 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2243 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2245 DRM_ERROR("Failed to add crtc irq id!\n");
2249 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2250 int_params.irq_source =
2251 dc_interrupt_to_irq_source(dc, i, 0);
2253 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2255 c_irq_params->adev = adev;
2256 c_irq_params->irq_src = int_params.irq_source;
2258 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2259 dm_crtc_high_irq, c_irq_params);
2262 /* Use VUPDATE interrupt */
2263 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2264 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2266 DRM_ERROR("Failed to add vupdate irq id!\n");
2270 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2271 int_params.irq_source =
2272 dc_interrupt_to_irq_source(dc, i, 0);
2274 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2276 c_irq_params->adev = adev;
2277 c_irq_params->irq_src = int_params.irq_source;
2279 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2280 dm_vupdate_high_irq, c_irq_params);
2283 /* Use GRPH_PFLIP interrupt */
2284 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2285 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2286 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2288 DRM_ERROR("Failed to add page flip irq id!\n");
2292 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2293 int_params.irq_source =
2294 dc_interrupt_to_irq_source(dc, i, 0);
2296 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2298 c_irq_params->adev = adev;
2299 c_irq_params->irq_src = int_params.irq_source;
2301 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2302 dm_pflip_high_irq, c_irq_params);
2307 r = amdgpu_irq_add_id(adev, client_id,
2308 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2310 DRM_ERROR("Failed to add hpd irq id!\n");
2314 register_hpd_handlers(adev);
2319 #if defined(CONFIG_DRM_AMD_DC_DCN)
2320 /* Register IRQ sources and initialize IRQ callbacks */
2321 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2323 struct dc *dc = adev->dm.dc;
2324 struct common_irq_params *c_irq_params;
2325 struct dc_interrupt_params int_params = {0};
2329 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2330 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2333 * Actions of amdgpu_irq_add_id():
2334 * 1. Register a set() function with base driver.
2335 * Base driver will call set() function to enable/disable an
2336 * interrupt in DC hardware.
2337 * 2. Register amdgpu_dm_irq_handler().
2338 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2339 * coming from DC hardware.
2340 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2341 * for acknowledging and handling.
2344 /* Use VSTARTUP interrupt */
2345 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2346 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2348 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2351 DRM_ERROR("Failed to add crtc irq id!\n");
2355 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2356 int_params.irq_source =
2357 dc_interrupt_to_irq_source(dc, i, 0);
2359 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2361 c_irq_params->adev = adev;
2362 c_irq_params->irq_src = int_params.irq_source;
2364 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2365 dm_dcn_crtc_high_irq, c_irq_params);
2368 /* Use GRPH_PFLIP interrupt */
2369 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2370 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2372 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2374 DRM_ERROR("Failed to add page flip irq id!\n");
2378 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2379 int_params.irq_source =
2380 dc_interrupt_to_irq_source(dc, i, 0);
2382 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2384 c_irq_params->adev = adev;
2385 c_irq_params->irq_src = int_params.irq_source;
2387 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2388 dm_pflip_high_irq, c_irq_params);
2393 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2396 DRM_ERROR("Failed to add hpd irq id!\n");
2400 register_hpd_handlers(adev);
2407 * Acquires the lock for the atomic state object and returns
2408 * the new atomic state.
2410 * This should only be called during atomic check.
2412 static int dm_atomic_get_state(struct drm_atomic_state *state,
2413 struct dm_atomic_state **dm_state)
2415 struct drm_device *dev = state->dev;
2416 struct amdgpu_device *adev = dev->dev_private;
2417 struct amdgpu_display_manager *dm = &adev->dm;
2418 struct drm_private_state *priv_state;
2423 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2424 if (IS_ERR(priv_state))
2425 return PTR_ERR(priv_state);
2427 *dm_state = to_dm_atomic_state(priv_state);
2432 struct dm_atomic_state *
2433 dm_atomic_get_new_state(struct drm_atomic_state *state)
2435 struct drm_device *dev = state->dev;
2436 struct amdgpu_device *adev = dev->dev_private;
2437 struct amdgpu_display_manager *dm = &adev->dm;
2438 struct drm_private_obj *obj;
2439 struct drm_private_state *new_obj_state;
2442 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2443 if (obj->funcs == dm->atomic_obj.funcs)
2444 return to_dm_atomic_state(new_obj_state);
2450 struct dm_atomic_state *
2451 dm_atomic_get_old_state(struct drm_atomic_state *state)
2453 struct drm_device *dev = state->dev;
2454 struct amdgpu_device *adev = dev->dev_private;
2455 struct amdgpu_display_manager *dm = &adev->dm;
2456 struct drm_private_obj *obj;
2457 struct drm_private_state *old_obj_state;
2460 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2461 if (obj->funcs == dm->atomic_obj.funcs)
2462 return to_dm_atomic_state(old_obj_state);
2468 static struct drm_private_state *
2469 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2471 struct dm_atomic_state *old_state, *new_state;
2473 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2477 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2479 old_state = to_dm_atomic_state(obj->state);
2481 if (old_state && old_state->context)
2482 new_state->context = dc_copy_state(old_state->context);
2484 if (!new_state->context) {
2489 return &new_state->base;
2492 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2493 struct drm_private_state *state)
2495 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2497 if (dm_state && dm_state->context)
2498 dc_release_state(dm_state->context);
2503 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2504 .atomic_duplicate_state = dm_atomic_duplicate_state,
2505 .atomic_destroy_state = dm_atomic_destroy_state,
2508 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2510 struct dm_atomic_state *state;
2513 adev->mode_info.mode_config_initialized = true;
2515 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2516 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2518 adev->ddev->mode_config.max_width = 16384;
2519 adev->ddev->mode_config.max_height = 16384;
2521 adev->ddev->mode_config.preferred_depth = 24;
2522 adev->ddev->mode_config.prefer_shadow = 1;
2523 /* indicates support for immediate flip */
2524 adev->ddev->mode_config.async_page_flip = true;
2526 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2528 state = kzalloc(sizeof(*state), GFP_KERNEL);
2532 state->context = dc_create_state(adev->dm.dc);
2533 if (!state->context) {
2538 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2540 drm_atomic_private_obj_init(adev->ddev,
2541 &adev->dm.atomic_obj,
2543 &dm_atomic_state_funcs);
2545 r = amdgpu_display_modeset_create_props(adev);
2549 r = amdgpu_dm_audio_init(adev);
2556 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2557 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2559 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2560 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2562 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2564 #if defined(CONFIG_ACPI)
2565 struct amdgpu_dm_backlight_caps caps;
2567 if (dm->backlight_caps.caps_valid)
2570 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2571 if (caps.caps_valid) {
2572 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2573 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2574 dm->backlight_caps.caps_valid = true;
2576 dm->backlight_caps.min_input_signal =
2577 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2578 dm->backlight_caps.max_input_signal =
2579 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2582 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2583 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2587 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2589 struct amdgpu_display_manager *dm = bl_get_data(bd);
2590 struct amdgpu_dm_backlight_caps caps;
2591 uint32_t brightness = bd->props.brightness;
2593 amdgpu_dm_update_backlight_caps(dm);
2594 caps = dm->backlight_caps;
2596 * The brightness input is in the range 0-255
2597 * It needs to be rescaled to be between the
2598 * requested min and max input signal
2600 * It also needs to be scaled up by 0x101 to
2601 * match the DC interface which has a range of
2607 * (caps.max_input_signal - caps.min_input_signal)
2608 / AMDGPU_MAX_BL_LEVEL
2609 + caps.min_input_signal * 0x101;
2611 if (dc_link_set_backlight_level(dm->backlight_link,
2618 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2620 struct amdgpu_display_manager *dm = bl_get_data(bd);
2621 int ret = dc_link_get_backlight_level(dm->backlight_link);
2623 if (ret == DC_ERROR_UNEXPECTED)
2624 return bd->props.brightness;
2628 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2629 .options = BL_CORE_SUSPENDRESUME,
2630 .get_brightness = amdgpu_dm_backlight_get_brightness,
2631 .update_status = amdgpu_dm_backlight_update_status,
2635 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2638 struct backlight_properties props = { 0 };
2640 amdgpu_dm_update_backlight_caps(dm);
2642 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2643 props.brightness = AMDGPU_MAX_BL_LEVEL;
2644 props.type = BACKLIGHT_RAW;
2646 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2647 dm->adev->ddev->primary->index);
2649 dm->backlight_dev = backlight_device_register(bl_name,
2650 dm->adev->ddev->dev,
2652 &amdgpu_dm_backlight_ops,
2655 if (IS_ERR(dm->backlight_dev))
2656 DRM_ERROR("DM: Backlight registration failed!\n");
2658 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2663 static int initialize_plane(struct amdgpu_display_manager *dm,
2664 struct amdgpu_mode_info *mode_info, int plane_id,
2665 enum drm_plane_type plane_type,
2666 const struct dc_plane_cap *plane_cap)
2668 struct drm_plane *plane;
2669 unsigned long possible_crtcs;
2672 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2674 DRM_ERROR("KMS: Failed to allocate plane\n");
2677 plane->type = plane_type;
2680 * HACK: IGT tests expect that the primary plane for a CRTC
2681 * can only have one possible CRTC. Only expose support for
2682 * any CRTC if they're not going to be used as a primary plane
2683 * for a CRTC - like overlay or underlay planes.
2685 possible_crtcs = 1 << plane_id;
2686 if (plane_id >= dm->dc->caps.max_streams)
2687 possible_crtcs = 0xff;
2689 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2692 DRM_ERROR("KMS: Failed to initialize plane\n");
2698 mode_info->planes[plane_id] = plane;
2704 static void register_backlight_device(struct amdgpu_display_manager *dm,
2705 struct dc_link *link)
2707 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2708 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2710 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2711 link->type != dc_connection_none) {
2713 * Event if registration failed, we should continue with
2714 * DM initialization because not having a backlight control
2715 * is better then a black screen.
2717 amdgpu_dm_register_backlight_device(dm);
2719 if (dm->backlight_dev)
2720 dm->backlight_link = link;
2727 * In this architecture, the association
2728 * connector -> encoder -> crtc
2729 * id not really requried. The crtc and connector will hold the
2730 * display_index as an abstraction to use with DAL component
2732 * Returns 0 on success
2734 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2736 struct amdgpu_display_manager *dm = &adev->dm;
2738 struct amdgpu_dm_connector *aconnector = NULL;
2739 struct amdgpu_encoder *aencoder = NULL;
2740 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2742 int32_t primary_planes;
2743 enum dc_connection_type new_connection_type = dc_connection_none;
2744 const struct dc_plane_cap *plane;
2746 link_cnt = dm->dc->caps.max_links;
2747 if (amdgpu_dm_mode_config_init(dm->adev)) {
2748 DRM_ERROR("DM: Failed to initialize mode config\n");
2752 /* There is one primary plane per CRTC */
2753 primary_planes = dm->dc->caps.max_streams;
2754 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2757 * Initialize primary planes, implicit planes for legacy IOCTLS.
2758 * Order is reversed to match iteration order in atomic check.
2760 for (i = (primary_planes - 1); i >= 0; i--) {
2761 plane = &dm->dc->caps.planes[i];
2763 if (initialize_plane(dm, mode_info, i,
2764 DRM_PLANE_TYPE_PRIMARY, plane)) {
2765 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2771 * Initialize overlay planes, index starting after primary planes.
2772 * These planes have a higher DRM index than the primary planes since
2773 * they should be considered as having a higher z-order.
2774 * Order is reversed to match iteration order in atomic check.
2776 * Only support DCN for now, and only expose one so we don't encourage
2777 * userspace to use up all the pipes.
2779 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2780 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2782 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2785 if (!plane->blends_with_above || !plane->blends_with_below)
2788 if (!plane->pixel_format_support.argb8888)
2791 if (initialize_plane(dm, NULL, primary_planes + i,
2792 DRM_PLANE_TYPE_OVERLAY, plane)) {
2793 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2797 /* Only create one overlay plane. */
2801 for (i = 0; i < dm->dc->caps.max_streams; i++)
2802 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2803 DRM_ERROR("KMS: Failed to initialize crtc\n");
2807 dm->display_indexes_num = dm->dc->caps.max_streams;
2809 /* loops over all connectors on the board */
2810 for (i = 0; i < link_cnt; i++) {
2811 struct dc_link *link = NULL;
2813 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2815 "KMS: Cannot support more than %d display indexes\n",
2816 AMDGPU_DM_MAX_DISPLAY_INDEX);
2820 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2824 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2828 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2829 DRM_ERROR("KMS: Failed to initialize encoder\n");
2833 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2834 DRM_ERROR("KMS: Failed to initialize connector\n");
2838 link = dc_get_link_at_index(dm->dc, i);
2840 if (!dc_link_detect_sink(link, &new_connection_type))
2841 DRM_ERROR("KMS: Failed to detect connector\n");
2843 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2844 emulated_link_detect(link);
2845 amdgpu_dm_update_connector_after_detect(aconnector);
2847 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2848 amdgpu_dm_update_connector_after_detect(aconnector);
2849 register_backlight_device(dm, link);
2850 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2851 amdgpu_dm_set_psr_caps(link);
2857 /* Software is initialized. Now we can register interrupt handlers. */
2858 switch (adev->asic_type) {
2868 case CHIP_POLARIS11:
2869 case CHIP_POLARIS10:
2870 case CHIP_POLARIS12:
2875 if (dce110_register_irq_handlers(dm->adev)) {
2876 DRM_ERROR("DM: Failed to initialize IRQ\n");
2880 #if defined(CONFIG_DRM_AMD_DC_DCN)
2886 if (dcn10_register_irq_handlers(dm->adev)) {
2887 DRM_ERROR("DM: Failed to initialize IRQ\n");
2893 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2897 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2898 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2908 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2910 drm_mode_config_cleanup(dm->ddev);
2911 drm_atomic_private_obj_fini(&dm->atomic_obj);
2915 /******************************************************************************
2916 * amdgpu_display_funcs functions
2917 *****************************************************************************/
2920 * dm_bandwidth_update - program display watermarks
2922 * @adev: amdgpu_device pointer
2924 * Calculate and program the display watermarks and line buffer allocation.
2926 static void dm_bandwidth_update(struct amdgpu_device *adev)
2928 /* TODO: implement later */
2931 static const struct amdgpu_display_funcs dm_display_funcs = {
2932 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2933 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2934 .backlight_set_level = NULL, /* never called for DC */
2935 .backlight_get_level = NULL, /* never called for DC */
2936 .hpd_sense = NULL,/* called unconditionally */
2937 .hpd_set_polarity = NULL, /* called unconditionally */
2938 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2939 .page_flip_get_scanoutpos =
2940 dm_crtc_get_scanoutpos,/* called unconditionally */
2941 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2942 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2945 #if defined(CONFIG_DEBUG_KERNEL_DC)
2947 static ssize_t s3_debug_store(struct device *device,
2948 struct device_attribute *attr,
2954 struct drm_device *drm_dev = dev_get_drvdata(device);
2955 struct amdgpu_device *adev = drm_dev->dev_private;
2957 ret = kstrtoint(buf, 0, &s3_state);
2962 drm_kms_helper_hotplug_event(adev->ddev);
2967 return ret == 0 ? count : 0;
2970 DEVICE_ATTR_WO(s3_debug);
2974 static int dm_early_init(void *handle)
2976 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2978 switch (adev->asic_type) {
2981 adev->mode_info.num_crtc = 6;
2982 adev->mode_info.num_hpd = 6;
2983 adev->mode_info.num_dig = 6;
2986 adev->mode_info.num_crtc = 4;
2987 adev->mode_info.num_hpd = 6;
2988 adev->mode_info.num_dig = 7;
2992 adev->mode_info.num_crtc = 2;
2993 adev->mode_info.num_hpd = 6;
2994 adev->mode_info.num_dig = 6;
2998 adev->mode_info.num_crtc = 6;
2999 adev->mode_info.num_hpd = 6;
3000 adev->mode_info.num_dig = 7;
3003 adev->mode_info.num_crtc = 3;
3004 adev->mode_info.num_hpd = 6;
3005 adev->mode_info.num_dig = 9;
3008 adev->mode_info.num_crtc = 2;
3009 adev->mode_info.num_hpd = 6;
3010 adev->mode_info.num_dig = 9;
3012 case CHIP_POLARIS11:
3013 case CHIP_POLARIS12:
3014 adev->mode_info.num_crtc = 5;
3015 adev->mode_info.num_hpd = 5;
3016 adev->mode_info.num_dig = 5;
3018 case CHIP_POLARIS10:
3020 adev->mode_info.num_crtc = 6;
3021 adev->mode_info.num_hpd = 6;
3022 adev->mode_info.num_dig = 6;
3027 adev->mode_info.num_crtc = 6;
3028 adev->mode_info.num_hpd = 6;
3029 adev->mode_info.num_dig = 6;
3031 #if defined(CONFIG_DRM_AMD_DC_DCN)
3033 adev->mode_info.num_crtc = 4;
3034 adev->mode_info.num_hpd = 4;
3035 adev->mode_info.num_dig = 4;
3040 adev->mode_info.num_crtc = 6;
3041 adev->mode_info.num_hpd = 6;
3042 adev->mode_info.num_dig = 6;
3045 adev->mode_info.num_crtc = 5;
3046 adev->mode_info.num_hpd = 5;
3047 adev->mode_info.num_dig = 5;
3050 adev->mode_info.num_crtc = 4;
3051 adev->mode_info.num_hpd = 4;
3052 adev->mode_info.num_dig = 4;
3055 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3059 amdgpu_dm_set_irq_funcs(adev);
3061 if (adev->mode_info.funcs == NULL)
3062 adev->mode_info.funcs = &dm_display_funcs;
3065 * Note: Do NOT change adev->audio_endpt_rreg and
3066 * adev->audio_endpt_wreg because they are initialised in
3067 * amdgpu_device_init()
3069 #if defined(CONFIG_DEBUG_KERNEL_DC)
3072 &dev_attr_s3_debug);
3078 static bool modeset_required(struct drm_crtc_state *crtc_state,
3079 struct dc_stream_state *new_stream,
3080 struct dc_stream_state *old_stream)
3082 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3085 if (!crtc_state->enable)
3088 return crtc_state->active;
3091 static bool modereset_required(struct drm_crtc_state *crtc_state)
3093 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3096 return !crtc_state->enable || !crtc_state->active;
3099 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3101 drm_encoder_cleanup(encoder);
3105 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3106 .destroy = amdgpu_dm_encoder_destroy,
3110 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3111 struct dc_scaling_info *scaling_info)
3113 int scale_w, scale_h;
3115 memset(scaling_info, 0, sizeof(*scaling_info));
3117 /* Source is fixed 16.16 but we ignore mantissa for now... */
3118 scaling_info->src_rect.x = state->src_x >> 16;
3119 scaling_info->src_rect.y = state->src_y >> 16;
3121 scaling_info->src_rect.width = state->src_w >> 16;
3122 if (scaling_info->src_rect.width == 0)
3125 scaling_info->src_rect.height = state->src_h >> 16;
3126 if (scaling_info->src_rect.height == 0)
3129 scaling_info->dst_rect.x = state->crtc_x;
3130 scaling_info->dst_rect.y = state->crtc_y;
3132 if (state->crtc_w == 0)
3135 scaling_info->dst_rect.width = state->crtc_w;
3137 if (state->crtc_h == 0)
3140 scaling_info->dst_rect.height = state->crtc_h;
3142 /* DRM doesn't specify clipping on destination output. */
3143 scaling_info->clip_rect = scaling_info->dst_rect;
3145 /* TODO: Validate scaling per-format with DC plane caps */
3146 scale_w = scaling_info->dst_rect.width * 1000 /
3147 scaling_info->src_rect.width;
3149 if (scale_w < 250 || scale_w > 16000)
3152 scale_h = scaling_info->dst_rect.height * 1000 /
3153 scaling_info->src_rect.height;
3155 if (scale_h < 250 || scale_h > 16000)
3159 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3160 * assume reasonable defaults based on the format.
3166 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3167 uint64_t *tiling_flags)
3169 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3170 int r = amdgpu_bo_reserve(rbo, false);
3173 /* Don't show error message when returning -ERESTARTSYS */
3174 if (r != -ERESTARTSYS)
3175 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3180 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3182 amdgpu_bo_unreserve(rbo);
3187 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3189 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3191 return offset ? (address + offset * 256) : 0;
3195 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3196 const struct amdgpu_framebuffer *afb,
3197 const enum surface_pixel_format format,
3198 const enum dc_rotation_angle rotation,
3199 const struct plane_size *plane_size,
3200 const union dc_tiling_info *tiling_info,
3201 const uint64_t info,
3202 struct dc_plane_dcc_param *dcc,
3203 struct dc_plane_address *address)
3205 struct dc *dc = adev->dm.dc;
3206 struct dc_dcc_surface_param input;
3207 struct dc_surface_dcc_cap output;
3208 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3209 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3210 uint64_t dcc_address;
3212 memset(&input, 0, sizeof(input));
3213 memset(&output, 0, sizeof(output));
3218 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3221 if (!dc->cap_funcs.get_dcc_compression_cap)
3224 input.format = format;
3225 input.surface_size.width = plane_size->surface_size.width;
3226 input.surface_size.height = plane_size->surface_size.height;
3227 input.swizzle_mode = tiling_info->gfx9.swizzle;
3229 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3230 input.scan = SCAN_DIRECTION_HORIZONTAL;
3231 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3232 input.scan = SCAN_DIRECTION_VERTICAL;
3234 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3237 if (!output.capable)
3240 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3245 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3246 dcc->independent_64b_blks = i64b;
3248 dcc_address = get_dcc_address(afb->address, info);
3249 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3250 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3256 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3257 const struct amdgpu_framebuffer *afb,
3258 const enum surface_pixel_format format,
3259 const enum dc_rotation_angle rotation,
3260 const uint64_t tiling_flags,
3261 union dc_tiling_info *tiling_info,
3262 struct plane_size *plane_size,
3263 struct dc_plane_dcc_param *dcc,
3264 struct dc_plane_address *address)
3266 const struct drm_framebuffer *fb = &afb->base;
3269 memset(tiling_info, 0, sizeof(*tiling_info));
3270 memset(plane_size, 0, sizeof(*plane_size));
3271 memset(dcc, 0, sizeof(*dcc));
3272 memset(address, 0, sizeof(*address));
3274 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3275 plane_size->surface_size.x = 0;
3276 plane_size->surface_size.y = 0;
3277 plane_size->surface_size.width = fb->width;
3278 plane_size->surface_size.height = fb->height;
3279 plane_size->surface_pitch =
3280 fb->pitches[0] / fb->format->cpp[0];
3282 address->type = PLN_ADDR_TYPE_GRAPHICS;
3283 address->grph.addr.low_part = lower_32_bits(afb->address);
3284 address->grph.addr.high_part = upper_32_bits(afb->address);
3285 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3286 uint64_t chroma_addr = afb->address + fb->offsets[1];
3288 plane_size->surface_size.x = 0;
3289 plane_size->surface_size.y = 0;
3290 plane_size->surface_size.width = fb->width;
3291 plane_size->surface_size.height = fb->height;
3292 plane_size->surface_pitch =
3293 fb->pitches[0] / fb->format->cpp[0];
3295 plane_size->chroma_size.x = 0;
3296 plane_size->chroma_size.y = 0;
3297 /* TODO: set these based on surface format */
3298 plane_size->chroma_size.width = fb->width / 2;
3299 plane_size->chroma_size.height = fb->height / 2;
3301 plane_size->chroma_pitch =
3302 fb->pitches[1] / fb->format->cpp[1];
3304 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3305 address->video_progressive.luma_addr.low_part =
3306 lower_32_bits(afb->address);
3307 address->video_progressive.luma_addr.high_part =
3308 upper_32_bits(afb->address);
3309 address->video_progressive.chroma_addr.low_part =
3310 lower_32_bits(chroma_addr);
3311 address->video_progressive.chroma_addr.high_part =
3312 upper_32_bits(chroma_addr);
3315 /* Fill GFX8 params */
3316 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3317 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3319 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3320 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3321 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3322 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3323 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3325 /* XXX fix me for VI */
3326 tiling_info->gfx8.num_banks = num_banks;
3327 tiling_info->gfx8.array_mode =
3328 DC_ARRAY_2D_TILED_THIN1;
3329 tiling_info->gfx8.tile_split = tile_split;
3330 tiling_info->gfx8.bank_width = bankw;
3331 tiling_info->gfx8.bank_height = bankh;
3332 tiling_info->gfx8.tile_aspect = mtaspect;
3333 tiling_info->gfx8.tile_mode =
3334 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3335 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3336 == DC_ARRAY_1D_TILED_THIN1) {
3337 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3340 tiling_info->gfx8.pipe_config =
3341 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3343 if (adev->asic_type == CHIP_VEGA10 ||
3344 adev->asic_type == CHIP_VEGA12 ||
3345 adev->asic_type == CHIP_VEGA20 ||
3346 adev->asic_type == CHIP_NAVI10 ||
3347 adev->asic_type == CHIP_NAVI14 ||
3348 adev->asic_type == CHIP_NAVI12 ||
3349 adev->asic_type == CHIP_RENOIR ||
3350 adev->asic_type == CHIP_RAVEN) {
3351 /* Fill GFX9 params */
3352 tiling_info->gfx9.num_pipes =
3353 adev->gfx.config.gb_addr_config_fields.num_pipes;
3354 tiling_info->gfx9.num_banks =
3355 adev->gfx.config.gb_addr_config_fields.num_banks;
3356 tiling_info->gfx9.pipe_interleave =
3357 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3358 tiling_info->gfx9.num_shader_engines =
3359 adev->gfx.config.gb_addr_config_fields.num_se;
3360 tiling_info->gfx9.max_compressed_frags =
3361 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3362 tiling_info->gfx9.num_rb_per_se =
3363 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3364 tiling_info->gfx9.swizzle =
3365 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3366 tiling_info->gfx9.shaderEnable = 1;
3368 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3369 plane_size, tiling_info,
3370 tiling_flags, dcc, address);
3379 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3380 bool *per_pixel_alpha, bool *global_alpha,
3381 int *global_alpha_value)
3383 *per_pixel_alpha = false;
3384 *global_alpha = false;
3385 *global_alpha_value = 0xff;
3387 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3390 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3391 static const uint32_t alpha_formats[] = {
3392 DRM_FORMAT_ARGB8888,
3393 DRM_FORMAT_RGBA8888,
3394 DRM_FORMAT_ABGR8888,
3396 uint32_t format = plane_state->fb->format->format;
3399 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3400 if (format == alpha_formats[i]) {
3401 *per_pixel_alpha = true;
3407 if (plane_state->alpha < 0xffff) {
3408 *global_alpha = true;
3409 *global_alpha_value = plane_state->alpha >> 8;
3414 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3415 const enum surface_pixel_format format,
3416 enum dc_color_space *color_space)
3420 *color_space = COLOR_SPACE_SRGB;
3422 /* DRM color properties only affect non-RGB formats. */
3423 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3426 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3428 switch (plane_state->color_encoding) {
3429 case DRM_COLOR_YCBCR_BT601:
3431 *color_space = COLOR_SPACE_YCBCR601;
3433 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3436 case DRM_COLOR_YCBCR_BT709:
3438 *color_space = COLOR_SPACE_YCBCR709;
3440 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3443 case DRM_COLOR_YCBCR_BT2020:
3445 *color_space = COLOR_SPACE_2020_YCBCR;
3458 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3459 const struct drm_plane_state *plane_state,
3460 const uint64_t tiling_flags,
3461 struct dc_plane_info *plane_info,
3462 struct dc_plane_address *address)
3464 const struct drm_framebuffer *fb = plane_state->fb;
3465 const struct amdgpu_framebuffer *afb =
3466 to_amdgpu_framebuffer(plane_state->fb);
3467 struct drm_format_name_buf format_name;
3470 memset(plane_info, 0, sizeof(*plane_info));
3472 switch (fb->format->format) {
3474 plane_info->format =
3475 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3477 case DRM_FORMAT_RGB565:
3478 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3480 case DRM_FORMAT_XRGB8888:
3481 case DRM_FORMAT_ARGB8888:
3482 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3484 case DRM_FORMAT_XRGB2101010:
3485 case DRM_FORMAT_ARGB2101010:
3486 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3488 case DRM_FORMAT_XBGR2101010:
3489 case DRM_FORMAT_ABGR2101010:
3490 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3492 case DRM_FORMAT_XBGR8888:
3493 case DRM_FORMAT_ABGR8888:
3494 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3496 case DRM_FORMAT_NV21:
3497 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3499 case DRM_FORMAT_NV12:
3500 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3504 "Unsupported screen format %s\n",
3505 drm_get_format_name(fb->format->format, &format_name));
3509 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3510 case DRM_MODE_ROTATE_0:
3511 plane_info->rotation = ROTATION_ANGLE_0;
3513 case DRM_MODE_ROTATE_90:
3514 plane_info->rotation = ROTATION_ANGLE_90;
3516 case DRM_MODE_ROTATE_180:
3517 plane_info->rotation = ROTATION_ANGLE_180;
3519 case DRM_MODE_ROTATE_270:
3520 plane_info->rotation = ROTATION_ANGLE_270;
3523 plane_info->rotation = ROTATION_ANGLE_0;
3527 plane_info->visible = true;
3528 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3530 plane_info->layer_index = 0;
3532 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3533 &plane_info->color_space);
3537 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3538 plane_info->rotation, tiling_flags,
3539 &plane_info->tiling_info,
3540 &plane_info->plane_size,
3541 &plane_info->dcc, address);
3545 fill_blending_from_plane_state(
3546 plane_state, &plane_info->per_pixel_alpha,
3547 &plane_info->global_alpha, &plane_info->global_alpha_value);
3552 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3553 struct dc_plane_state *dc_plane_state,
3554 struct drm_plane_state *plane_state,
3555 struct drm_crtc_state *crtc_state)
3557 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3558 const struct amdgpu_framebuffer *amdgpu_fb =
3559 to_amdgpu_framebuffer(plane_state->fb);
3560 struct dc_scaling_info scaling_info;
3561 struct dc_plane_info plane_info;
3562 uint64_t tiling_flags;
3565 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3569 dc_plane_state->src_rect = scaling_info.src_rect;
3570 dc_plane_state->dst_rect = scaling_info.dst_rect;
3571 dc_plane_state->clip_rect = scaling_info.clip_rect;
3572 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3574 ret = get_fb_info(amdgpu_fb, &tiling_flags);
3578 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3580 &dc_plane_state->address);
3584 dc_plane_state->format = plane_info.format;
3585 dc_plane_state->color_space = plane_info.color_space;
3586 dc_plane_state->format = plane_info.format;
3587 dc_plane_state->plane_size = plane_info.plane_size;
3588 dc_plane_state->rotation = plane_info.rotation;
3589 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3590 dc_plane_state->stereo_format = plane_info.stereo_format;
3591 dc_plane_state->tiling_info = plane_info.tiling_info;
3592 dc_plane_state->visible = plane_info.visible;
3593 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3594 dc_plane_state->global_alpha = plane_info.global_alpha;
3595 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3596 dc_plane_state->dcc = plane_info.dcc;
3597 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3600 * Always set input transfer function, since plane state is refreshed
3603 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3610 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3611 const struct dm_connector_state *dm_state,
3612 struct dc_stream_state *stream)
3614 enum amdgpu_rmx_type rmx_type;
3616 struct rect src = { 0 }; /* viewport in composition space*/
3617 struct rect dst = { 0 }; /* stream addressable area */
3619 /* no mode. nothing to be done */
3623 /* Full screen scaling by default */
3624 src.width = mode->hdisplay;
3625 src.height = mode->vdisplay;
3626 dst.width = stream->timing.h_addressable;
3627 dst.height = stream->timing.v_addressable;
3630 rmx_type = dm_state->scaling;
3631 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3632 if (src.width * dst.height <
3633 src.height * dst.width) {
3634 /* height needs less upscaling/more downscaling */
3635 dst.width = src.width *
3636 dst.height / src.height;
3638 /* width needs less upscaling/more downscaling */
3639 dst.height = src.height *
3640 dst.width / src.width;
3642 } else if (rmx_type == RMX_CENTER) {
3646 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3647 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3649 if (dm_state->underscan_enable) {
3650 dst.x += dm_state->underscan_hborder / 2;
3651 dst.y += dm_state->underscan_vborder / 2;
3652 dst.width -= dm_state->underscan_hborder;
3653 dst.height -= dm_state->underscan_vborder;
3660 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3661 dst.x, dst.y, dst.width, dst.height);
3665 static enum dc_color_depth
3666 convert_color_depth_from_display_info(const struct drm_connector *connector,
3667 const struct drm_connector_state *state,
3675 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3676 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3678 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3680 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3683 bpc = (uint8_t)connector->display_info.bpc;
3684 /* Assume 8 bpc by default if no bpc is specified. */
3685 bpc = bpc ? bpc : 8;
3689 state = connector->state;
3693 * Cap display bpc based on the user requested value.
3695 * The value for state->max_bpc may not correctly updated
3696 * depending on when the connector gets added to the state
3697 * or if this was called outside of atomic check, so it
3698 * can't be used directly.
3700 bpc = min(bpc, state->max_requested_bpc);
3702 /* Round down to the nearest even number. */
3703 bpc = bpc - (bpc & 1);
3709 * Temporary Work around, DRM doesn't parse color depth for
3710 * EDID revision before 1.4
3711 * TODO: Fix edid parsing
3713 return COLOR_DEPTH_888;
3715 return COLOR_DEPTH_666;
3717 return COLOR_DEPTH_888;
3719 return COLOR_DEPTH_101010;
3721 return COLOR_DEPTH_121212;
3723 return COLOR_DEPTH_141414;
3725 return COLOR_DEPTH_161616;
3727 return COLOR_DEPTH_UNDEFINED;
3731 static enum dc_aspect_ratio
3732 get_aspect_ratio(const struct drm_display_mode *mode_in)
3734 /* 1-1 mapping, since both enums follow the HDMI spec. */
3735 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3738 static enum dc_color_space
3739 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3741 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3743 switch (dc_crtc_timing->pixel_encoding) {
3744 case PIXEL_ENCODING_YCBCR422:
3745 case PIXEL_ENCODING_YCBCR444:
3746 case PIXEL_ENCODING_YCBCR420:
3749 * 27030khz is the separation point between HDTV and SDTV
3750 * according to HDMI spec, we use YCbCr709 and YCbCr601
3753 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3754 if (dc_crtc_timing->flags.Y_ONLY)
3756 COLOR_SPACE_YCBCR709_LIMITED;
3758 color_space = COLOR_SPACE_YCBCR709;
3760 if (dc_crtc_timing->flags.Y_ONLY)
3762 COLOR_SPACE_YCBCR601_LIMITED;
3764 color_space = COLOR_SPACE_YCBCR601;
3769 case PIXEL_ENCODING_RGB:
3770 color_space = COLOR_SPACE_SRGB;
3781 static bool adjust_colour_depth_from_display_info(
3782 struct dc_crtc_timing *timing_out,
3783 const struct drm_display_info *info)
3785 enum dc_color_depth depth = timing_out->display_color_depth;
3788 normalized_clk = timing_out->pix_clk_100hz / 10;
3789 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3790 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3791 normalized_clk /= 2;
3792 /* Adjusting pix clock following on HDMI spec based on colour depth */
3794 case COLOR_DEPTH_888:
3796 case COLOR_DEPTH_101010:
3797 normalized_clk = (normalized_clk * 30) / 24;
3799 case COLOR_DEPTH_121212:
3800 normalized_clk = (normalized_clk * 36) / 24;
3802 case COLOR_DEPTH_161616:
3803 normalized_clk = (normalized_clk * 48) / 24;
3806 /* The above depths are the only ones valid for HDMI. */
3809 if (normalized_clk <= info->max_tmds_clock) {
3810 timing_out->display_color_depth = depth;
3813 } while (--depth > COLOR_DEPTH_666);
3817 static void fill_stream_properties_from_drm_display_mode(
3818 struct dc_stream_state *stream,
3819 const struct drm_display_mode *mode_in,
3820 const struct drm_connector *connector,
3821 const struct drm_connector_state *connector_state,
3822 const struct dc_stream_state *old_stream)
3824 struct dc_crtc_timing *timing_out = &stream->timing;
3825 const struct drm_display_info *info = &connector->display_info;
3826 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3827 struct hdmi_vendor_infoframe hv_frame;
3828 struct hdmi_avi_infoframe avi_frame;
3830 memset(&hv_frame, 0, sizeof(hv_frame));
3831 memset(&avi_frame, 0, sizeof(avi_frame));
3833 timing_out->h_border_left = 0;
3834 timing_out->h_border_right = 0;
3835 timing_out->v_border_top = 0;
3836 timing_out->v_border_bottom = 0;
3837 /* TODO: un-hardcode */
3838 if (drm_mode_is_420_only(info, mode_in)
3839 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3840 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3841 else if (drm_mode_is_420_also(info, mode_in)
3842 && aconnector->force_yuv420_output)
3843 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3844 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3845 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3846 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3848 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3850 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3851 timing_out->display_color_depth = convert_color_depth_from_display_info(
3852 connector, connector_state,
3853 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3854 timing_out->scan_type = SCANNING_TYPE_NODATA;
3855 timing_out->hdmi_vic = 0;
3858 timing_out->vic = old_stream->timing.vic;
3859 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3860 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3862 timing_out->vic = drm_match_cea_mode(mode_in);
3863 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3864 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3865 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3866 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3869 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3870 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
3871 timing_out->vic = avi_frame.video_code;
3872 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
3873 timing_out->hdmi_vic = hv_frame.vic;
3876 timing_out->h_addressable = mode_in->crtc_hdisplay;
3877 timing_out->h_total = mode_in->crtc_htotal;
3878 timing_out->h_sync_width =
3879 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3880 timing_out->h_front_porch =
3881 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3882 timing_out->v_total = mode_in->crtc_vtotal;
3883 timing_out->v_addressable = mode_in->crtc_vdisplay;
3884 timing_out->v_front_porch =
3885 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3886 timing_out->v_sync_width =
3887 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3888 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3889 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3891 stream->output_color_space = get_output_color_space(timing_out);
3893 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3894 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3895 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3896 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
3897 drm_mode_is_420_also(info, mode_in) &&
3898 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
3899 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3900 adjust_colour_depth_from_display_info(timing_out, info);
3905 static void fill_audio_info(struct audio_info *audio_info,
3906 const struct drm_connector *drm_connector,
3907 const struct dc_sink *dc_sink)
3910 int cea_revision = 0;
3911 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3913 audio_info->manufacture_id = edid_caps->manufacturer_id;
3914 audio_info->product_id = edid_caps->product_id;
3916 cea_revision = drm_connector->display_info.cea_rev;
3918 strscpy(audio_info->display_name,
3919 edid_caps->display_name,
3920 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3922 if (cea_revision >= 3) {
3923 audio_info->mode_count = edid_caps->audio_mode_count;
3925 for (i = 0; i < audio_info->mode_count; ++i) {
3926 audio_info->modes[i].format_code =
3927 (enum audio_format_code)
3928 (edid_caps->audio_modes[i].format_code);
3929 audio_info->modes[i].channel_count =
3930 edid_caps->audio_modes[i].channel_count;
3931 audio_info->modes[i].sample_rates.all =
3932 edid_caps->audio_modes[i].sample_rate;
3933 audio_info->modes[i].sample_size =
3934 edid_caps->audio_modes[i].sample_size;
3938 audio_info->flags.all = edid_caps->speaker_flags;
3940 /* TODO: We only check for the progressive mode, check for interlace mode too */
3941 if (drm_connector->latency_present[0]) {
3942 audio_info->video_latency = drm_connector->video_latency[0];
3943 audio_info->audio_latency = drm_connector->audio_latency[0];
3946 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3951 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3952 struct drm_display_mode *dst_mode)
3954 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3955 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3956 dst_mode->crtc_clock = src_mode->crtc_clock;
3957 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3958 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3959 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
3960 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3961 dst_mode->crtc_htotal = src_mode->crtc_htotal;
3962 dst_mode->crtc_hskew = src_mode->crtc_hskew;
3963 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3964 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3965 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3966 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3967 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3971 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3972 const struct drm_display_mode *native_mode,
3975 if (scale_enabled) {
3976 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3977 } else if (native_mode->clock == drm_mode->clock &&
3978 native_mode->htotal == drm_mode->htotal &&
3979 native_mode->vtotal == drm_mode->vtotal) {
3980 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3982 /* no scaling nor amdgpu inserted, no need to patch */
3986 static struct dc_sink *
3987 create_fake_sink(struct amdgpu_dm_connector *aconnector)
3989 struct dc_sink_init_data sink_init_data = { 0 };
3990 struct dc_sink *sink = NULL;
3991 sink_init_data.link = aconnector->dc_link;
3992 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3994 sink = dc_sink_create(&sink_init_data);
3996 DRM_ERROR("Failed to create sink!\n");
3999 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4004 static void set_multisync_trigger_params(
4005 struct dc_stream_state *stream)
4007 if (stream->triggered_crtc_reset.enabled) {
4008 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4009 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4013 static void set_master_stream(struct dc_stream_state *stream_set[],
4016 int j, highest_rfr = 0, master_stream = 0;
4018 for (j = 0; j < stream_count; j++) {
4019 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4020 int refresh_rate = 0;
4022 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4023 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4024 if (refresh_rate > highest_rfr) {
4025 highest_rfr = refresh_rate;
4030 for (j = 0; j < stream_count; j++) {
4032 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4036 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4040 if (context->stream_count < 2)
4042 for (i = 0; i < context->stream_count ; i++) {
4043 if (!context->streams[i])
4046 * TODO: add a function to read AMD VSDB bits and set
4047 * crtc_sync_master.multi_sync_enabled flag
4048 * For now it's set to false
4050 set_multisync_trigger_params(context->streams[i]);
4052 set_master_stream(context->streams, context->stream_count);
4055 static struct dc_stream_state *
4056 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4057 const struct drm_display_mode *drm_mode,
4058 const struct dm_connector_state *dm_state,
4059 const struct dc_stream_state *old_stream)
4061 struct drm_display_mode *preferred_mode = NULL;
4062 struct drm_connector *drm_connector;
4063 const struct drm_connector_state *con_state =
4064 dm_state ? &dm_state->base : NULL;
4065 struct dc_stream_state *stream = NULL;
4066 struct drm_display_mode mode = *drm_mode;
4067 bool native_mode_found = false;
4068 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4070 int preferred_refresh = 0;
4071 #if defined(CONFIG_DRM_AMD_DC_DCN)
4072 struct dsc_dec_dpcd_caps dsc_caps;
4074 uint32_t link_bandwidth_kbps;
4076 struct dc_sink *sink = NULL;
4077 if (aconnector == NULL) {
4078 DRM_ERROR("aconnector is NULL!\n");
4082 drm_connector = &aconnector->base;
4084 if (!aconnector->dc_sink) {
4085 sink = create_fake_sink(aconnector);
4089 sink = aconnector->dc_sink;
4090 dc_sink_retain(sink);
4093 stream = dc_create_stream_for_sink(sink);
4095 if (stream == NULL) {
4096 DRM_ERROR("Failed to create stream for sink!\n");
4100 stream->dm_stream_context = aconnector;
4102 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4103 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4105 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4106 /* Search for preferred mode */
4107 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4108 native_mode_found = true;
4112 if (!native_mode_found)
4113 preferred_mode = list_first_entry_or_null(
4114 &aconnector->base.modes,
4115 struct drm_display_mode,
4118 mode_refresh = drm_mode_vrefresh(&mode);
4120 if (preferred_mode == NULL) {
4122 * This may not be an error, the use case is when we have no
4123 * usermode calls to reset and set mode upon hotplug. In this
4124 * case, we call set mode ourselves to restore the previous mode
4125 * and the modelist may not be filled in in time.
4127 DRM_DEBUG_DRIVER("No preferred mode found\n");
4129 decide_crtc_timing_for_drm_display_mode(
4130 &mode, preferred_mode,
4131 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4132 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4136 drm_mode_set_crtcinfo(&mode, 0);
4139 * If scaling is enabled and refresh rate didn't change
4140 * we copy the vic and polarities of the old timings
4142 if (!scale || mode_refresh != preferred_refresh)
4143 fill_stream_properties_from_drm_display_mode(stream,
4144 &mode, &aconnector->base, con_state, NULL);
4146 fill_stream_properties_from_drm_display_mode(stream,
4147 &mode, &aconnector->base, con_state, old_stream);
4149 stream->timing.flags.DSC = 0;
4151 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4152 #if defined(CONFIG_DRM_AMD_DC_DCN)
4153 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4154 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4155 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4158 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4159 dc_link_get_link_cap(aconnector->dc_link));
4161 #if defined(CONFIG_DRM_AMD_DC_DCN)
4162 if (dsc_caps.is_dsc_supported)
4163 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4165 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4166 link_bandwidth_kbps,
4168 &stream->timing.dsc_cfg))
4169 stream->timing.flags.DSC = 1;
4173 update_stream_scaling_settings(&mode, dm_state, stream);
4176 &stream->audio_info,
4180 update_stream_signal(stream, sink);
4182 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4183 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4184 if (stream->link->psr_feature_enabled) {
4185 struct dc *core_dc = stream->link->ctx->dc;
4187 if (dc_is_dmcu_initialized(core_dc)) {
4188 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4190 stream->psr_version = dmcu->dmcu_version.psr_version;
4191 mod_build_vsc_infopacket(stream,
4192 &stream->vsc_infopacket,
4193 &stream->use_vsc_sdp_for_colorimetry);
4197 dc_sink_release(sink);
4202 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4204 drm_crtc_cleanup(crtc);
4208 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4209 struct drm_crtc_state *state)
4211 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4213 /* TODO Destroy dc_stream objects are stream object is flattened */
4215 dc_stream_release(cur->stream);
4218 __drm_atomic_helper_crtc_destroy_state(state);
4224 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4226 struct dm_crtc_state *state;
4229 dm_crtc_destroy_state(crtc, crtc->state);
4231 state = kzalloc(sizeof(*state), GFP_KERNEL);
4232 if (WARN_ON(!state))
4235 crtc->state = &state->base;
4236 crtc->state->crtc = crtc;
4240 static struct drm_crtc_state *
4241 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4243 struct dm_crtc_state *state, *cur;
4245 cur = to_dm_crtc_state(crtc->state);
4247 if (WARN_ON(!crtc->state))
4250 state = kzalloc(sizeof(*state), GFP_KERNEL);
4254 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4257 state->stream = cur->stream;
4258 dc_stream_retain(state->stream);
4261 state->active_planes = cur->active_planes;
4262 state->interrupts_enabled = cur->interrupts_enabled;
4263 state->vrr_params = cur->vrr_params;
4264 state->vrr_infopacket = cur->vrr_infopacket;
4265 state->abm_level = cur->abm_level;
4266 state->vrr_supported = cur->vrr_supported;
4267 state->freesync_config = cur->freesync_config;
4268 state->crc_src = cur->crc_src;
4269 state->cm_has_degamma = cur->cm_has_degamma;
4270 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4272 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4274 return &state->base;
4277 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4279 enum dc_irq_source irq_source;
4280 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4281 struct amdgpu_device *adev = crtc->dev->dev_private;
4284 /* Do not set vupdate for DCN hardware */
4285 if (adev->family > AMDGPU_FAMILY_AI)
4288 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4290 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4292 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4293 acrtc->crtc_id, enable ? "en" : "dis", rc);
4297 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4299 enum dc_irq_source irq_source;
4300 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4301 struct amdgpu_device *adev = crtc->dev->dev_private;
4302 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4306 /* vblank irq on -> Only need vupdate irq in vrr mode */
4307 if (amdgpu_dm_vrr_active(acrtc_state))
4308 rc = dm_set_vupdate_irq(crtc, true);
4310 /* vblank irq off -> vupdate irq off */
4311 rc = dm_set_vupdate_irq(crtc, false);
4317 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4318 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4321 static int dm_enable_vblank(struct drm_crtc *crtc)
4323 return dm_set_vblank(crtc, true);
4326 static void dm_disable_vblank(struct drm_crtc *crtc)
4328 dm_set_vblank(crtc, false);
4331 /* Implemented only the options currently availible for the driver */
4332 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4333 .reset = dm_crtc_reset_state,
4334 .destroy = amdgpu_dm_crtc_destroy,
4335 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4336 .set_config = drm_atomic_helper_set_config,
4337 .page_flip = drm_atomic_helper_page_flip,
4338 .atomic_duplicate_state = dm_crtc_duplicate_state,
4339 .atomic_destroy_state = dm_crtc_destroy_state,
4340 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4341 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4342 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4343 .enable_vblank = dm_enable_vblank,
4344 .disable_vblank = dm_disable_vblank,
4347 static enum drm_connector_status
4348 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4351 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4355 * 1. This interface is NOT called in context of HPD irq.
4356 * 2. This interface *is called* in context of user-mode ioctl. Which
4357 * makes it a bad place for *any* MST-related activity.
4360 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4361 !aconnector->fake_enable)
4362 connected = (aconnector->dc_sink != NULL);
4364 connected = (aconnector->base.force == DRM_FORCE_ON);
4366 return (connected ? connector_status_connected :
4367 connector_status_disconnected);
4370 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4371 struct drm_connector_state *connector_state,
4372 struct drm_property *property,
4375 struct drm_device *dev = connector->dev;
4376 struct amdgpu_device *adev = dev->dev_private;
4377 struct dm_connector_state *dm_old_state =
4378 to_dm_connector_state(connector->state);
4379 struct dm_connector_state *dm_new_state =
4380 to_dm_connector_state(connector_state);
4384 if (property == dev->mode_config.scaling_mode_property) {
4385 enum amdgpu_rmx_type rmx_type;
4388 case DRM_MODE_SCALE_CENTER:
4389 rmx_type = RMX_CENTER;
4391 case DRM_MODE_SCALE_ASPECT:
4392 rmx_type = RMX_ASPECT;
4394 case DRM_MODE_SCALE_FULLSCREEN:
4395 rmx_type = RMX_FULL;
4397 case DRM_MODE_SCALE_NONE:
4403 if (dm_old_state->scaling == rmx_type)
4406 dm_new_state->scaling = rmx_type;
4408 } else if (property == adev->mode_info.underscan_hborder_property) {
4409 dm_new_state->underscan_hborder = val;
4411 } else if (property == adev->mode_info.underscan_vborder_property) {
4412 dm_new_state->underscan_vborder = val;
4414 } else if (property == adev->mode_info.underscan_property) {
4415 dm_new_state->underscan_enable = val;
4417 } else if (property == adev->mode_info.abm_level_property) {
4418 dm_new_state->abm_level = val;
4425 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4426 const struct drm_connector_state *state,
4427 struct drm_property *property,
4430 struct drm_device *dev = connector->dev;
4431 struct amdgpu_device *adev = dev->dev_private;
4432 struct dm_connector_state *dm_state =
4433 to_dm_connector_state(state);
4436 if (property == dev->mode_config.scaling_mode_property) {
4437 switch (dm_state->scaling) {
4439 *val = DRM_MODE_SCALE_CENTER;
4442 *val = DRM_MODE_SCALE_ASPECT;
4445 *val = DRM_MODE_SCALE_FULLSCREEN;
4449 *val = DRM_MODE_SCALE_NONE;
4453 } else if (property == adev->mode_info.underscan_hborder_property) {
4454 *val = dm_state->underscan_hborder;
4456 } else if (property == adev->mode_info.underscan_vborder_property) {
4457 *val = dm_state->underscan_vborder;
4459 } else if (property == adev->mode_info.underscan_property) {
4460 *val = dm_state->underscan_enable;
4462 } else if (property == adev->mode_info.abm_level_property) {
4463 *val = dm_state->abm_level;
4470 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4472 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4474 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4477 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4479 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4480 const struct dc_link *link = aconnector->dc_link;
4481 struct amdgpu_device *adev = connector->dev->dev_private;
4482 struct amdgpu_display_manager *dm = &adev->dm;
4484 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4485 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4487 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4488 link->type != dc_connection_none &&
4489 dm->backlight_dev) {
4490 backlight_device_unregister(dm->backlight_dev);
4491 dm->backlight_dev = NULL;
4495 if (aconnector->dc_em_sink)
4496 dc_sink_release(aconnector->dc_em_sink);
4497 aconnector->dc_em_sink = NULL;
4498 if (aconnector->dc_sink)
4499 dc_sink_release(aconnector->dc_sink);
4500 aconnector->dc_sink = NULL;
4502 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4503 drm_connector_unregister(connector);
4504 drm_connector_cleanup(connector);
4505 if (aconnector->i2c) {
4506 i2c_del_adapter(&aconnector->i2c->base);
4507 kfree(aconnector->i2c);
4513 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4515 struct dm_connector_state *state =
4516 to_dm_connector_state(connector->state);
4518 if (connector->state)
4519 __drm_atomic_helper_connector_destroy_state(connector->state);
4523 state = kzalloc(sizeof(*state), GFP_KERNEL);
4526 state->scaling = RMX_OFF;
4527 state->underscan_enable = false;
4528 state->underscan_hborder = 0;
4529 state->underscan_vborder = 0;
4530 state->base.max_requested_bpc = 8;
4531 state->vcpi_slots = 0;
4533 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4534 state->abm_level = amdgpu_dm_abm_level;
4536 __drm_atomic_helper_connector_reset(connector, &state->base);
4540 struct drm_connector_state *
4541 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4543 struct dm_connector_state *state =
4544 to_dm_connector_state(connector->state);
4546 struct dm_connector_state *new_state =
4547 kmemdup(state, sizeof(*state), GFP_KERNEL);
4552 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4554 new_state->freesync_capable = state->freesync_capable;
4555 new_state->abm_level = state->abm_level;
4556 new_state->scaling = state->scaling;
4557 new_state->underscan_enable = state->underscan_enable;
4558 new_state->underscan_hborder = state->underscan_hborder;
4559 new_state->underscan_vborder = state->underscan_vborder;
4560 new_state->vcpi_slots = state->vcpi_slots;
4561 new_state->pbn = state->pbn;
4562 return &new_state->base;
4565 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4566 .reset = amdgpu_dm_connector_funcs_reset,
4567 .detect = amdgpu_dm_connector_detect,
4568 .fill_modes = drm_helper_probe_single_connector_modes,
4569 .destroy = amdgpu_dm_connector_destroy,
4570 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4571 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4572 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4573 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4574 .early_unregister = amdgpu_dm_connector_unregister
4577 static int get_modes(struct drm_connector *connector)
4579 return amdgpu_dm_connector_get_modes(connector);
4582 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4584 struct dc_sink_init_data init_params = {
4585 .link = aconnector->dc_link,
4586 .sink_signal = SIGNAL_TYPE_VIRTUAL
4590 if (!aconnector->base.edid_blob_ptr) {
4591 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4592 aconnector->base.name);
4594 aconnector->base.force = DRM_FORCE_OFF;
4595 aconnector->base.override_edid = false;
4599 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4601 aconnector->edid = edid;
4603 aconnector->dc_em_sink = dc_link_add_remote_sink(
4604 aconnector->dc_link,
4606 (edid->extensions + 1) * EDID_LENGTH,
4609 if (aconnector->base.force == DRM_FORCE_ON) {
4610 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4611 aconnector->dc_link->local_sink :
4612 aconnector->dc_em_sink;
4613 dc_sink_retain(aconnector->dc_sink);
4617 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4619 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4622 * In case of headless boot with force on for DP managed connector
4623 * Those settings have to be != 0 to get initial modeset
4625 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4626 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4627 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4631 aconnector->base.override_edid = true;
4632 create_eml_sink(aconnector);
4635 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4636 struct drm_display_mode *mode)
4638 int result = MODE_ERROR;
4639 struct dc_sink *dc_sink;
4640 struct amdgpu_device *adev = connector->dev->dev_private;
4641 /* TODO: Unhardcode stream count */
4642 struct dc_stream_state *stream;
4643 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4644 enum dc_status dc_result = DC_OK;
4646 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4647 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4651 * Only run this the first time mode_valid is called to initilialize
4654 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4655 !aconnector->dc_em_sink)
4656 handle_edid_mgmt(aconnector);
4658 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4660 if (dc_sink == NULL) {
4661 DRM_ERROR("dc_sink is NULL!\n");
4665 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4666 if (stream == NULL) {
4667 DRM_ERROR("Failed to create stream for sink!\n");
4671 dc_result = dc_validate_stream(adev->dm.dc, stream);
4673 if (dc_result == DC_OK)
4676 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4682 dc_stream_release(stream);
4685 /* TODO: error handling*/
4689 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4690 struct dc_info_packet *out)
4692 struct hdmi_drm_infoframe frame;
4693 unsigned char buf[30]; /* 26 + 4 */
4697 memset(out, 0, sizeof(*out));
4699 if (!state->hdr_output_metadata)
4702 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4706 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4710 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4714 /* Prepare the infopacket for DC. */
4715 switch (state->connector->connector_type) {
4716 case DRM_MODE_CONNECTOR_HDMIA:
4717 out->hb0 = 0x87; /* type */
4718 out->hb1 = 0x01; /* version */
4719 out->hb2 = 0x1A; /* length */
4720 out->sb[0] = buf[3]; /* checksum */
4724 case DRM_MODE_CONNECTOR_DisplayPort:
4725 case DRM_MODE_CONNECTOR_eDP:
4726 out->hb0 = 0x00; /* sdp id, zero */
4727 out->hb1 = 0x87; /* type */
4728 out->hb2 = 0x1D; /* payload len - 1 */
4729 out->hb3 = (0x13 << 2); /* sdp version */
4730 out->sb[0] = 0x01; /* version */
4731 out->sb[1] = 0x1A; /* length */
4739 memcpy(&out->sb[i], &buf[4], 26);
4742 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4743 sizeof(out->sb), false);
4749 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4750 const struct drm_connector_state *new_state)
4752 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4753 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4755 if (old_blob != new_blob) {
4756 if (old_blob && new_blob &&
4757 old_blob->length == new_blob->length)
4758 return memcmp(old_blob->data, new_blob->data,
4768 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4769 struct drm_atomic_state *state)
4771 struct drm_connector_state *new_con_state =
4772 drm_atomic_get_new_connector_state(state, conn);
4773 struct drm_connector_state *old_con_state =
4774 drm_atomic_get_old_connector_state(state, conn);
4775 struct drm_crtc *crtc = new_con_state->crtc;
4776 struct drm_crtc_state *new_crtc_state;
4782 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4783 struct dc_info_packet hdr_infopacket;
4785 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4789 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4790 if (IS_ERR(new_crtc_state))
4791 return PTR_ERR(new_crtc_state);
4794 * DC considers the stream backends changed if the
4795 * static metadata changes. Forcing the modeset also
4796 * gives a simple way for userspace to switch from
4797 * 8bpc to 10bpc when setting the metadata to enter
4800 * Changing the static metadata after it's been
4801 * set is permissible, however. So only force a
4802 * modeset if we're entering or exiting HDR.
4804 new_crtc_state->mode_changed =
4805 !old_con_state->hdr_output_metadata ||
4806 !new_con_state->hdr_output_metadata;
4812 static const struct drm_connector_helper_funcs
4813 amdgpu_dm_connector_helper_funcs = {
4815 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4816 * modes will be filtered by drm_mode_validate_size(), and those modes
4817 * are missing after user start lightdm. So we need to renew modes list.
4818 * in get_modes call back, not just return the modes count
4820 .get_modes = get_modes,
4821 .mode_valid = amdgpu_dm_connector_mode_valid,
4822 .atomic_check = amdgpu_dm_connector_atomic_check,
4825 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4829 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4831 struct drm_device *dev = new_crtc_state->crtc->dev;
4832 struct drm_plane *plane;
4834 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4835 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4842 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4844 struct drm_atomic_state *state = new_crtc_state->state;
4845 struct drm_plane *plane;
4848 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4849 struct drm_plane_state *new_plane_state;
4851 /* Cursor planes are "fake". */
4852 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4855 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4857 if (!new_plane_state) {
4859 * The plane is enable on the CRTC and hasn't changed
4860 * state. This means that it previously passed
4861 * validation and is therefore enabled.
4867 /* We need a framebuffer to be considered enabled. */
4868 num_active += (new_plane_state->fb != NULL);
4875 * Sets whether interrupts should be enabled on a specific CRTC.
4876 * We require that the stream be enabled and that there exist active
4877 * DC planes on the stream.
4880 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4881 struct drm_crtc_state *new_crtc_state)
4883 struct dm_crtc_state *dm_new_crtc_state =
4884 to_dm_crtc_state(new_crtc_state);
4886 dm_new_crtc_state->active_planes = 0;
4887 dm_new_crtc_state->interrupts_enabled = false;
4889 if (!dm_new_crtc_state->stream)
4892 dm_new_crtc_state->active_planes =
4893 count_crtc_active_planes(new_crtc_state);
4895 dm_new_crtc_state->interrupts_enabled =
4896 dm_new_crtc_state->active_planes > 0;
4899 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4900 struct drm_crtc_state *state)
4902 struct amdgpu_device *adev = crtc->dev->dev_private;
4903 struct dc *dc = adev->dm.dc;
4904 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4908 * Update interrupt state for the CRTC. This needs to happen whenever
4909 * the CRTC has changed or whenever any of its planes have changed.
4910 * Atomic check satisfies both of these requirements since the CRTC
4911 * is added to the state by DRM during drm_atomic_helper_check_planes.
4913 dm_update_crtc_interrupt_state(crtc, state);
4915 if (unlikely(!dm_crtc_state->stream &&
4916 modeset_required(state, NULL, dm_crtc_state->stream))) {
4921 /* In some use cases, like reset, no stream is attached */
4922 if (!dm_crtc_state->stream)
4926 * We want at least one hardware plane enabled to use
4927 * the stream with a cursor enabled.
4929 if (state->enable && state->active &&
4930 does_crtc_have_active_cursor(state) &&
4931 dm_crtc_state->active_planes == 0)
4934 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
4940 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4941 const struct drm_display_mode *mode,
4942 struct drm_display_mode *adjusted_mode)
4947 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4948 .disable = dm_crtc_helper_disable,
4949 .atomic_check = dm_crtc_helper_atomic_check,
4950 .mode_fixup = dm_crtc_helper_mode_fixup
4953 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4958 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
4960 switch (display_color_depth) {
4961 case COLOR_DEPTH_666:
4963 case COLOR_DEPTH_888:
4965 case COLOR_DEPTH_101010:
4967 case COLOR_DEPTH_121212:
4969 case COLOR_DEPTH_141414:
4971 case COLOR_DEPTH_161616:
4979 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4980 struct drm_crtc_state *crtc_state,
4981 struct drm_connector_state *conn_state)
4983 struct drm_atomic_state *state = crtc_state->state;
4984 struct drm_connector *connector = conn_state->connector;
4985 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4986 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
4987 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
4988 struct drm_dp_mst_topology_mgr *mst_mgr;
4989 struct drm_dp_mst_port *mst_port;
4990 enum dc_color_depth color_depth;
4992 bool is_y420 = false;
4994 if (!aconnector->port || !aconnector->dc_sink)
4997 mst_port = aconnector->port;
4998 mst_mgr = &aconnector->mst_port->mst_mgr;
5000 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5003 if (!state->duplicated) {
5004 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5005 aconnector->force_yuv420_output;
5006 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5008 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5009 clock = adjusted_mode->clock;
5010 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5012 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5015 dm_new_connector_state->pbn,
5017 if (dm_new_connector_state->vcpi_slots < 0) {
5018 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5019 return dm_new_connector_state->vcpi_slots;
5024 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5025 .disable = dm_encoder_helper_disable,
5026 .atomic_check = dm_encoder_helper_atomic_check
5029 #if defined(CONFIG_DRM_AMD_DC_DCN)
5030 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5031 struct dc_state *dc_state)
5033 struct dc_stream_state *stream = NULL;
5034 struct drm_connector *connector;
5035 struct drm_connector_state *new_con_state, *old_con_state;
5036 struct amdgpu_dm_connector *aconnector;
5037 struct dm_connector_state *dm_conn_state;
5038 int i, j, clock, bpp;
5039 int vcpi, pbn_div, pbn = 0;
5041 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5043 aconnector = to_amdgpu_dm_connector(connector);
5045 if (!aconnector->port)
5048 if (!new_con_state || !new_con_state->crtc)
5051 dm_conn_state = to_dm_connector_state(new_con_state);
5053 for (j = 0; j < dc_state->stream_count; j++) {
5054 stream = dc_state->streams[j];
5058 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5067 if (stream->timing.flags.DSC != 1) {
5068 drm_dp_mst_atomic_enable_dsc(state,
5076 pbn_div = dm_mst_get_pbn_divider(stream->link);
5077 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5078 clock = stream->timing.pix_clk_100hz / 10;
5079 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5080 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5087 dm_conn_state->pbn = pbn;
5088 dm_conn_state->vcpi_slots = vcpi;
5094 static void dm_drm_plane_reset(struct drm_plane *plane)
5096 struct dm_plane_state *amdgpu_state = NULL;
5099 plane->funcs->atomic_destroy_state(plane, plane->state);
5101 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5102 WARN_ON(amdgpu_state == NULL);
5105 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5108 static struct drm_plane_state *
5109 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5111 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5113 old_dm_plane_state = to_dm_plane_state(plane->state);
5114 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5115 if (!dm_plane_state)
5118 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5120 if (old_dm_plane_state->dc_state) {
5121 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5122 dc_plane_state_retain(dm_plane_state->dc_state);
5125 return &dm_plane_state->base;
5128 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5129 struct drm_plane_state *state)
5131 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5133 if (dm_plane_state->dc_state)
5134 dc_plane_state_release(dm_plane_state->dc_state);
5136 drm_atomic_helper_plane_destroy_state(plane, state);
5139 static const struct drm_plane_funcs dm_plane_funcs = {
5140 .update_plane = drm_atomic_helper_update_plane,
5141 .disable_plane = drm_atomic_helper_disable_plane,
5142 .destroy = drm_primary_helper_destroy,
5143 .reset = dm_drm_plane_reset,
5144 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5145 .atomic_destroy_state = dm_drm_plane_destroy_state,
5148 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5149 struct drm_plane_state *new_state)
5151 struct amdgpu_framebuffer *afb;
5152 struct drm_gem_object *obj;
5153 struct amdgpu_device *adev;
5154 struct amdgpu_bo *rbo;
5155 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5156 struct list_head list;
5157 struct ttm_validate_buffer tv;
5158 struct ww_acquire_ctx ticket;
5159 uint64_t tiling_flags;
5163 dm_plane_state_old = to_dm_plane_state(plane->state);
5164 dm_plane_state_new = to_dm_plane_state(new_state);
5166 if (!new_state->fb) {
5167 DRM_DEBUG_DRIVER("No FB bound\n");
5171 afb = to_amdgpu_framebuffer(new_state->fb);
5172 obj = new_state->fb->obj[0];
5173 rbo = gem_to_amdgpu_bo(obj);
5174 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5175 INIT_LIST_HEAD(&list);
5179 list_add(&tv.head, &list);
5181 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5183 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5187 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5188 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5190 domain = AMDGPU_GEM_DOMAIN_VRAM;
5192 r = amdgpu_bo_pin(rbo, domain);
5193 if (unlikely(r != 0)) {
5194 if (r != -ERESTARTSYS)
5195 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5196 ttm_eu_backoff_reservation(&ticket, &list);
5200 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5201 if (unlikely(r != 0)) {
5202 amdgpu_bo_unpin(rbo);
5203 ttm_eu_backoff_reservation(&ticket, &list);
5204 DRM_ERROR("%p bind failed\n", rbo);
5208 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5210 ttm_eu_backoff_reservation(&ticket, &list);
5212 afb->address = amdgpu_bo_gpu_offset(rbo);
5216 if (dm_plane_state_new->dc_state &&
5217 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5218 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5220 fill_plane_buffer_attributes(
5221 adev, afb, plane_state->format, plane_state->rotation,
5222 tiling_flags, &plane_state->tiling_info,
5223 &plane_state->plane_size, &plane_state->dcc,
5224 &plane_state->address);
5230 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5231 struct drm_plane_state *old_state)
5233 struct amdgpu_bo *rbo;
5239 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5240 r = amdgpu_bo_reserve(rbo, false);
5242 DRM_ERROR("failed to reserve rbo before unpin\n");
5246 amdgpu_bo_unpin(rbo);
5247 amdgpu_bo_unreserve(rbo);
5248 amdgpu_bo_unref(&rbo);
5251 static int dm_plane_atomic_check(struct drm_plane *plane,
5252 struct drm_plane_state *state)
5254 struct amdgpu_device *adev = plane->dev->dev_private;
5255 struct dc *dc = adev->dm.dc;
5256 struct dm_plane_state *dm_plane_state;
5257 struct dc_scaling_info scaling_info;
5260 dm_plane_state = to_dm_plane_state(state);
5262 if (!dm_plane_state->dc_state)
5265 ret = fill_dc_scaling_info(state, &scaling_info);
5269 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5275 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5276 struct drm_plane_state *new_plane_state)
5278 /* Only support async updates on cursor planes. */
5279 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5285 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5286 struct drm_plane_state *new_state)
5288 struct drm_plane_state *old_state =
5289 drm_atomic_get_old_plane_state(new_state->state, plane);
5291 swap(plane->state->fb, new_state->fb);
5293 plane->state->src_x = new_state->src_x;
5294 plane->state->src_y = new_state->src_y;
5295 plane->state->src_w = new_state->src_w;
5296 plane->state->src_h = new_state->src_h;
5297 plane->state->crtc_x = new_state->crtc_x;
5298 plane->state->crtc_y = new_state->crtc_y;
5299 plane->state->crtc_w = new_state->crtc_w;
5300 plane->state->crtc_h = new_state->crtc_h;
5302 handle_cursor_update(plane, old_state);
5305 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5306 .prepare_fb = dm_plane_helper_prepare_fb,
5307 .cleanup_fb = dm_plane_helper_cleanup_fb,
5308 .atomic_check = dm_plane_atomic_check,
5309 .atomic_async_check = dm_plane_atomic_async_check,
5310 .atomic_async_update = dm_plane_atomic_async_update
5314 * TODO: these are currently initialized to rgb formats only.
5315 * For future use cases we should either initialize them dynamically based on
5316 * plane capabilities, or initialize this array to all formats, so internal drm
5317 * check will succeed, and let DC implement proper check
5319 static const uint32_t rgb_formats[] = {
5320 DRM_FORMAT_XRGB8888,
5321 DRM_FORMAT_ARGB8888,
5322 DRM_FORMAT_RGBA8888,
5323 DRM_FORMAT_XRGB2101010,
5324 DRM_FORMAT_XBGR2101010,
5325 DRM_FORMAT_ARGB2101010,
5326 DRM_FORMAT_ABGR2101010,
5327 DRM_FORMAT_XBGR8888,
5328 DRM_FORMAT_ABGR8888,
5332 static const uint32_t overlay_formats[] = {
5333 DRM_FORMAT_XRGB8888,
5334 DRM_FORMAT_ARGB8888,
5335 DRM_FORMAT_RGBA8888,
5336 DRM_FORMAT_XBGR8888,
5337 DRM_FORMAT_ABGR8888,
5341 static const u32 cursor_formats[] = {
5345 static int get_plane_formats(const struct drm_plane *plane,
5346 const struct dc_plane_cap *plane_cap,
5347 uint32_t *formats, int max_formats)
5349 int i, num_formats = 0;
5352 * TODO: Query support for each group of formats directly from
5353 * DC plane caps. This will require adding more formats to the
5357 switch (plane->type) {
5358 case DRM_PLANE_TYPE_PRIMARY:
5359 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5360 if (num_formats >= max_formats)
5363 formats[num_formats++] = rgb_formats[i];
5366 if (plane_cap && plane_cap->pixel_format_support.nv12)
5367 formats[num_formats++] = DRM_FORMAT_NV12;
5370 case DRM_PLANE_TYPE_OVERLAY:
5371 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5372 if (num_formats >= max_formats)
5375 formats[num_formats++] = overlay_formats[i];
5379 case DRM_PLANE_TYPE_CURSOR:
5380 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5381 if (num_formats >= max_formats)
5384 formats[num_formats++] = cursor_formats[i];
5392 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5393 struct drm_plane *plane,
5394 unsigned long possible_crtcs,
5395 const struct dc_plane_cap *plane_cap)
5397 uint32_t formats[32];
5401 num_formats = get_plane_formats(plane, plane_cap, formats,
5402 ARRAY_SIZE(formats));
5404 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5405 &dm_plane_funcs, formats, num_formats,
5406 NULL, plane->type, NULL);
5410 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5411 plane_cap && plane_cap->per_pixel_alpha) {
5412 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5413 BIT(DRM_MODE_BLEND_PREMULTI);
5415 drm_plane_create_alpha_property(plane);
5416 drm_plane_create_blend_mode_property(plane, blend_caps);
5419 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5420 plane_cap && plane_cap->pixel_format_support.nv12) {
5421 /* This only affects YUV formats. */
5422 drm_plane_create_color_properties(
5424 BIT(DRM_COLOR_YCBCR_BT601) |
5425 BIT(DRM_COLOR_YCBCR_BT709),
5426 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5427 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5428 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5431 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5433 /* Create (reset) the plane state */
5434 if (plane->funcs->reset)
5435 plane->funcs->reset(plane);
5440 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5441 struct drm_plane *plane,
5442 uint32_t crtc_index)
5444 struct amdgpu_crtc *acrtc = NULL;
5445 struct drm_plane *cursor_plane;
5449 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5453 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5454 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5456 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5460 res = drm_crtc_init_with_planes(
5465 &amdgpu_dm_crtc_funcs, NULL);
5470 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5472 /* Create (reset) the plane state */
5473 if (acrtc->base.funcs->reset)
5474 acrtc->base.funcs->reset(&acrtc->base);
5476 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5477 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5479 acrtc->crtc_id = crtc_index;
5480 acrtc->base.enabled = false;
5481 acrtc->otg_inst = -1;
5483 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5484 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5485 true, MAX_COLOR_LUT_ENTRIES);
5486 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5492 kfree(cursor_plane);
5497 static int to_drm_connector_type(enum signal_type st)
5500 case SIGNAL_TYPE_HDMI_TYPE_A:
5501 return DRM_MODE_CONNECTOR_HDMIA;
5502 case SIGNAL_TYPE_EDP:
5503 return DRM_MODE_CONNECTOR_eDP;
5504 case SIGNAL_TYPE_LVDS:
5505 return DRM_MODE_CONNECTOR_LVDS;
5506 case SIGNAL_TYPE_RGB:
5507 return DRM_MODE_CONNECTOR_VGA;
5508 case SIGNAL_TYPE_DISPLAY_PORT:
5509 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5510 return DRM_MODE_CONNECTOR_DisplayPort;
5511 case SIGNAL_TYPE_DVI_DUAL_LINK:
5512 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5513 return DRM_MODE_CONNECTOR_DVID;
5514 case SIGNAL_TYPE_VIRTUAL:
5515 return DRM_MODE_CONNECTOR_VIRTUAL;
5518 return DRM_MODE_CONNECTOR_Unknown;
5522 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5524 struct drm_encoder *encoder;
5526 /* There is only one encoder per connector */
5527 drm_connector_for_each_possible_encoder(connector, encoder)
5533 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5535 struct drm_encoder *encoder;
5536 struct amdgpu_encoder *amdgpu_encoder;
5538 encoder = amdgpu_dm_connector_to_encoder(connector);
5540 if (encoder == NULL)
5543 amdgpu_encoder = to_amdgpu_encoder(encoder);
5545 amdgpu_encoder->native_mode.clock = 0;
5547 if (!list_empty(&connector->probed_modes)) {
5548 struct drm_display_mode *preferred_mode = NULL;
5550 list_for_each_entry(preferred_mode,
5551 &connector->probed_modes,
5553 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5554 amdgpu_encoder->native_mode = *preferred_mode;
5562 static struct drm_display_mode *
5563 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5565 int hdisplay, int vdisplay)
5567 struct drm_device *dev = encoder->dev;
5568 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5569 struct drm_display_mode *mode = NULL;
5570 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5572 mode = drm_mode_duplicate(dev, native_mode);
5577 mode->hdisplay = hdisplay;
5578 mode->vdisplay = vdisplay;
5579 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5580 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5586 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5587 struct drm_connector *connector)
5589 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5590 struct drm_display_mode *mode = NULL;
5591 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5592 struct amdgpu_dm_connector *amdgpu_dm_connector =
5593 to_amdgpu_dm_connector(connector);
5597 char name[DRM_DISPLAY_MODE_LEN];
5600 } common_modes[] = {
5601 { "640x480", 640, 480},
5602 { "800x600", 800, 600},
5603 { "1024x768", 1024, 768},
5604 { "1280x720", 1280, 720},
5605 { "1280x800", 1280, 800},
5606 {"1280x1024", 1280, 1024},
5607 { "1440x900", 1440, 900},
5608 {"1680x1050", 1680, 1050},
5609 {"1600x1200", 1600, 1200},
5610 {"1920x1080", 1920, 1080},
5611 {"1920x1200", 1920, 1200}
5614 n = ARRAY_SIZE(common_modes);
5616 for (i = 0; i < n; i++) {
5617 struct drm_display_mode *curmode = NULL;
5618 bool mode_existed = false;
5620 if (common_modes[i].w > native_mode->hdisplay ||
5621 common_modes[i].h > native_mode->vdisplay ||
5622 (common_modes[i].w == native_mode->hdisplay &&
5623 common_modes[i].h == native_mode->vdisplay))
5626 list_for_each_entry(curmode, &connector->probed_modes, head) {
5627 if (common_modes[i].w == curmode->hdisplay &&
5628 common_modes[i].h == curmode->vdisplay) {
5629 mode_existed = true;
5637 mode = amdgpu_dm_create_common_mode(encoder,
5638 common_modes[i].name, common_modes[i].w,
5640 drm_mode_probed_add(connector, mode);
5641 amdgpu_dm_connector->num_modes++;
5645 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5648 struct amdgpu_dm_connector *amdgpu_dm_connector =
5649 to_amdgpu_dm_connector(connector);
5652 /* empty probed_modes */
5653 INIT_LIST_HEAD(&connector->probed_modes);
5654 amdgpu_dm_connector->num_modes =
5655 drm_add_edid_modes(connector, edid);
5657 /* sorting the probed modes before calling function
5658 * amdgpu_dm_get_native_mode() since EDID can have
5659 * more than one preferred mode. The modes that are
5660 * later in the probed mode list could be of higher
5661 * and preferred resolution. For example, 3840x2160
5662 * resolution in base EDID preferred timing and 4096x2160
5663 * preferred resolution in DID extension block later.
5665 drm_mode_sort(&connector->probed_modes);
5666 amdgpu_dm_get_native_mode(connector);
5668 amdgpu_dm_connector->num_modes = 0;
5672 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5674 struct amdgpu_dm_connector *amdgpu_dm_connector =
5675 to_amdgpu_dm_connector(connector);
5676 struct drm_encoder *encoder;
5677 struct edid *edid = amdgpu_dm_connector->edid;
5679 encoder = amdgpu_dm_connector_to_encoder(connector);
5681 if (!edid || !drm_edid_is_valid(edid)) {
5682 amdgpu_dm_connector->num_modes =
5683 drm_add_modes_noedid(connector, 640, 480);
5685 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5686 amdgpu_dm_connector_add_common_modes(encoder, connector);
5688 amdgpu_dm_fbc_init(connector);
5690 return amdgpu_dm_connector->num_modes;
5693 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5694 struct amdgpu_dm_connector *aconnector,
5696 struct dc_link *link,
5699 struct amdgpu_device *adev = dm->ddev->dev_private;
5702 * Some of the properties below require access to state, like bpc.
5703 * Allocate some default initial connector state with our reset helper.
5705 if (aconnector->base.funcs->reset)
5706 aconnector->base.funcs->reset(&aconnector->base);
5708 aconnector->connector_id = link_index;
5709 aconnector->dc_link = link;
5710 aconnector->base.interlace_allowed = false;
5711 aconnector->base.doublescan_allowed = false;
5712 aconnector->base.stereo_allowed = false;
5713 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5714 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5715 aconnector->audio_inst = -1;
5716 mutex_init(&aconnector->hpd_lock);
5719 * configure support HPD hot plug connector_>polled default value is 0
5720 * which means HPD hot plug not supported
5722 switch (connector_type) {
5723 case DRM_MODE_CONNECTOR_HDMIA:
5724 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5725 aconnector->base.ycbcr_420_allowed =
5726 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5728 case DRM_MODE_CONNECTOR_DisplayPort:
5729 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5730 aconnector->base.ycbcr_420_allowed =
5731 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5733 case DRM_MODE_CONNECTOR_DVID:
5734 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5740 drm_object_attach_property(&aconnector->base.base,
5741 dm->ddev->mode_config.scaling_mode_property,
5742 DRM_MODE_SCALE_NONE);
5744 drm_object_attach_property(&aconnector->base.base,
5745 adev->mode_info.underscan_property,
5747 drm_object_attach_property(&aconnector->base.base,
5748 adev->mode_info.underscan_hborder_property,
5750 drm_object_attach_property(&aconnector->base.base,
5751 adev->mode_info.underscan_vborder_property,
5754 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5756 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5757 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5758 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5760 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5761 dc_is_dmcu_initialized(adev->dm.dc)) {
5762 drm_object_attach_property(&aconnector->base.base,
5763 adev->mode_info.abm_level_property, 0);
5766 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5767 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5768 connector_type == DRM_MODE_CONNECTOR_eDP) {
5769 drm_object_attach_property(
5770 &aconnector->base.base,
5771 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5773 drm_connector_attach_vrr_capable_property(
5775 #ifdef CONFIG_DRM_AMD_DC_HDCP
5776 if (adev->dm.hdcp_workqueue)
5777 drm_connector_attach_content_protection_property(&aconnector->base, true);
5782 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5783 struct i2c_msg *msgs, int num)
5785 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5786 struct ddc_service *ddc_service = i2c->ddc_service;
5787 struct i2c_command cmd;
5791 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5796 cmd.number_of_payloads = num;
5797 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5800 for (i = 0; i < num; i++) {
5801 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5802 cmd.payloads[i].address = msgs[i].addr;
5803 cmd.payloads[i].length = msgs[i].len;
5804 cmd.payloads[i].data = msgs[i].buf;
5808 ddc_service->ctx->dc,
5809 ddc_service->ddc_pin->hw_info.ddc_channel,
5813 kfree(cmd.payloads);
5817 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5819 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5822 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5823 .master_xfer = amdgpu_dm_i2c_xfer,
5824 .functionality = amdgpu_dm_i2c_func,
5827 static struct amdgpu_i2c_adapter *
5828 create_i2c(struct ddc_service *ddc_service,
5832 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5833 struct amdgpu_i2c_adapter *i2c;
5835 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
5838 i2c->base.owner = THIS_MODULE;
5839 i2c->base.class = I2C_CLASS_DDC;
5840 i2c->base.dev.parent = &adev->pdev->dev;
5841 i2c->base.algo = &amdgpu_dm_i2c_algo;
5842 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
5843 i2c_set_adapdata(&i2c->base, i2c);
5844 i2c->ddc_service = ddc_service;
5845 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
5852 * Note: this function assumes that dc_link_detect() was called for the
5853 * dc_link which will be represented by this aconnector.
5855 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5856 struct amdgpu_dm_connector *aconnector,
5857 uint32_t link_index,
5858 struct amdgpu_encoder *aencoder)
5862 struct dc *dc = dm->dc;
5863 struct dc_link *link = dc_get_link_at_index(dc, link_index);
5864 struct amdgpu_i2c_adapter *i2c;
5866 link->priv = aconnector;
5868 DRM_DEBUG_DRIVER("%s()\n", __func__);
5870 i2c = create_i2c(link->ddc, link->link_index, &res);
5872 DRM_ERROR("Failed to create i2c adapter data\n");
5876 aconnector->i2c = i2c;
5877 res = i2c_add_adapter(&i2c->base);
5880 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
5884 connector_type = to_drm_connector_type(link->connector_signal);
5886 res = drm_connector_init_with_ddc(
5889 &amdgpu_dm_connector_funcs,
5894 DRM_ERROR("connector_init failed\n");
5895 aconnector->connector_id = -1;
5899 drm_connector_helper_add(
5901 &amdgpu_dm_connector_helper_funcs);
5903 amdgpu_dm_connector_init_helper(
5910 drm_connector_attach_encoder(
5911 &aconnector->base, &aencoder->base);
5913 drm_connector_register(&aconnector->base);
5914 #if defined(CONFIG_DEBUG_FS)
5915 connector_debugfs_init(aconnector);
5916 aconnector->debugfs_dpcd_address = 0;
5917 aconnector->debugfs_dpcd_size = 0;
5920 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
5921 || connector_type == DRM_MODE_CONNECTOR_eDP)
5922 amdgpu_dm_initialize_dp_connector(dm, aconnector);
5927 aconnector->i2c = NULL;
5932 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
5934 switch (adev->mode_info.num_crtc) {
5951 static int amdgpu_dm_encoder_init(struct drm_device *dev,
5952 struct amdgpu_encoder *aencoder,
5953 uint32_t link_index)
5955 struct amdgpu_device *adev = dev->dev_private;
5957 int res = drm_encoder_init(dev,
5959 &amdgpu_dm_encoder_funcs,
5960 DRM_MODE_ENCODER_TMDS,
5963 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5966 aencoder->encoder_id = link_index;
5968 aencoder->encoder_id = -1;
5970 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5975 static void manage_dm_interrupts(struct amdgpu_device *adev,
5976 struct amdgpu_crtc *acrtc,
5980 * this is not correct translation but will work as soon as VBLANK
5981 * constant is the same as PFLIP
5984 amdgpu_display_crtc_idx_to_irq_type(
5989 drm_crtc_vblank_on(&acrtc->base);
5992 &adev->pageflip_irq,
5998 &adev->pageflip_irq,
6000 drm_crtc_vblank_off(&acrtc->base);
6005 is_scaling_state_different(const struct dm_connector_state *dm_state,
6006 const struct dm_connector_state *old_dm_state)
6008 if (dm_state->scaling != old_dm_state->scaling)
6010 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6011 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6013 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6014 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6016 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6017 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6022 #ifdef CONFIG_DRM_AMD_DC_HDCP
6023 static bool is_content_protection_different(struct drm_connector_state *state,
6024 const struct drm_connector_state *old_state,
6025 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6027 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6029 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6030 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6031 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6035 /* CP is being re enabled, ignore this */
6036 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6037 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6038 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6042 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6043 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6044 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6045 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6047 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6048 * hot-plug, headless s3, dpms
6050 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6051 aconnector->dc_sink != NULL)
6054 if (old_state->content_protection == state->content_protection)
6057 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6064 static void remove_stream(struct amdgpu_device *adev,
6065 struct amdgpu_crtc *acrtc,
6066 struct dc_stream_state *stream)
6068 /* this is the update mode case */
6070 acrtc->otg_inst = -1;
6071 acrtc->enabled = false;
6074 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6075 struct dc_cursor_position *position)
6077 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6079 int xorigin = 0, yorigin = 0;
6081 position->enable = false;
6085 if (!crtc || !plane->state->fb)
6088 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6089 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6090 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6092 plane->state->crtc_w,
6093 plane->state->crtc_h);
6097 x = plane->state->crtc_x;
6098 y = plane->state->crtc_y;
6100 if (x <= -amdgpu_crtc->max_cursor_width ||
6101 y <= -amdgpu_crtc->max_cursor_height)
6104 if (crtc->primary->state) {
6105 /* avivo cursor are offset into the total surface */
6106 x += crtc->primary->state->src_x >> 16;
6107 y += crtc->primary->state->src_y >> 16;
6111 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6115 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6118 position->enable = true;
6121 position->x_hotspot = xorigin;
6122 position->y_hotspot = yorigin;
6127 static void handle_cursor_update(struct drm_plane *plane,
6128 struct drm_plane_state *old_plane_state)
6130 struct amdgpu_device *adev = plane->dev->dev_private;
6131 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6132 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6133 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6134 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6135 uint64_t address = afb ? afb->address : 0;
6136 struct dc_cursor_position position;
6137 struct dc_cursor_attributes attributes;
6140 if (!plane->state->fb && !old_plane_state->fb)
6143 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6145 amdgpu_crtc->crtc_id,
6146 plane->state->crtc_w,
6147 plane->state->crtc_h);
6149 ret = get_cursor_position(plane, crtc, &position);
6153 if (!position.enable) {
6154 /* turn off cursor */
6155 if (crtc_state && crtc_state->stream) {
6156 mutex_lock(&adev->dm.dc_lock);
6157 dc_stream_set_cursor_position(crtc_state->stream,
6159 mutex_unlock(&adev->dm.dc_lock);
6164 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6165 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6167 memset(&attributes, 0, sizeof(attributes));
6168 attributes.address.high_part = upper_32_bits(address);
6169 attributes.address.low_part = lower_32_bits(address);
6170 attributes.width = plane->state->crtc_w;
6171 attributes.height = plane->state->crtc_h;
6172 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6173 attributes.rotation_angle = 0;
6174 attributes.attribute_flags.value = 0;
6176 attributes.pitch = attributes.width;
6178 if (crtc_state->stream) {
6179 mutex_lock(&adev->dm.dc_lock);
6180 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6182 DRM_ERROR("DC failed to set cursor attributes\n");
6184 if (!dc_stream_set_cursor_position(crtc_state->stream,
6186 DRM_ERROR("DC failed to set cursor position\n");
6187 mutex_unlock(&adev->dm.dc_lock);
6191 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6194 assert_spin_locked(&acrtc->base.dev->event_lock);
6195 WARN_ON(acrtc->event);
6197 acrtc->event = acrtc->base.state->event;
6199 /* Set the flip status */
6200 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6202 /* Mark this event as consumed */
6203 acrtc->base.state->event = NULL;
6205 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6209 static void update_freesync_state_on_stream(
6210 struct amdgpu_display_manager *dm,
6211 struct dm_crtc_state *new_crtc_state,
6212 struct dc_stream_state *new_stream,
6213 struct dc_plane_state *surface,
6214 u32 flip_timestamp_in_us)
6216 struct mod_vrr_params vrr_params;
6217 struct dc_info_packet vrr_infopacket = {0};
6218 struct amdgpu_device *adev = dm->adev;
6219 unsigned long flags;
6225 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6226 * For now it's sufficient to just guard against these conditions.
6229 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6232 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6233 vrr_params = new_crtc_state->vrr_params;
6236 mod_freesync_handle_preflip(
6237 dm->freesync_module,
6240 flip_timestamp_in_us,
6243 if (adev->family < AMDGPU_FAMILY_AI &&
6244 amdgpu_dm_vrr_active(new_crtc_state)) {
6245 mod_freesync_handle_v_update(dm->freesync_module,
6246 new_stream, &vrr_params);
6248 /* Need to call this before the frame ends. */
6249 dc_stream_adjust_vmin_vmax(dm->dc,
6250 new_crtc_state->stream,
6251 &vrr_params.adjust);
6255 mod_freesync_build_vrr_infopacket(
6256 dm->freesync_module,
6260 TRANSFER_FUNC_UNKNOWN,
6263 new_crtc_state->freesync_timing_changed |=
6264 (memcmp(&new_crtc_state->vrr_params.adjust,
6266 sizeof(vrr_params.adjust)) != 0);
6268 new_crtc_state->freesync_vrr_info_changed |=
6269 (memcmp(&new_crtc_state->vrr_infopacket,
6271 sizeof(vrr_infopacket)) != 0);
6273 new_crtc_state->vrr_params = vrr_params;
6274 new_crtc_state->vrr_infopacket = vrr_infopacket;
6276 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6277 new_stream->vrr_infopacket = vrr_infopacket;
6279 if (new_crtc_state->freesync_vrr_info_changed)
6280 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6281 new_crtc_state->base.crtc->base.id,
6282 (int)new_crtc_state->base.vrr_enabled,
6283 (int)vrr_params.state);
6285 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6288 static void pre_update_freesync_state_on_stream(
6289 struct amdgpu_display_manager *dm,
6290 struct dm_crtc_state *new_crtc_state)
6292 struct dc_stream_state *new_stream = new_crtc_state->stream;
6293 struct mod_vrr_params vrr_params;
6294 struct mod_freesync_config config = new_crtc_state->freesync_config;
6295 struct amdgpu_device *adev = dm->adev;
6296 unsigned long flags;
6302 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6303 * For now it's sufficient to just guard against these conditions.
6305 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6308 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6309 vrr_params = new_crtc_state->vrr_params;
6311 if (new_crtc_state->vrr_supported &&
6312 config.min_refresh_in_uhz &&
6313 config.max_refresh_in_uhz) {
6314 config.state = new_crtc_state->base.vrr_enabled ?
6315 VRR_STATE_ACTIVE_VARIABLE :
6318 config.state = VRR_STATE_UNSUPPORTED;
6321 mod_freesync_build_vrr_params(dm->freesync_module,
6323 &config, &vrr_params);
6325 new_crtc_state->freesync_timing_changed |=
6326 (memcmp(&new_crtc_state->vrr_params.adjust,
6328 sizeof(vrr_params.adjust)) != 0);
6330 new_crtc_state->vrr_params = vrr_params;
6331 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6334 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6335 struct dm_crtc_state *new_state)
6337 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6338 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6340 if (!old_vrr_active && new_vrr_active) {
6341 /* Transition VRR inactive -> active:
6342 * While VRR is active, we must not disable vblank irq, as a
6343 * reenable after disable would compute bogus vblank/pflip
6344 * timestamps if it likely happened inside display front-porch.
6346 * We also need vupdate irq for the actual core vblank handling
6349 dm_set_vupdate_irq(new_state->base.crtc, true);
6350 drm_crtc_vblank_get(new_state->base.crtc);
6351 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6352 __func__, new_state->base.crtc->base.id);
6353 } else if (old_vrr_active && !new_vrr_active) {
6354 /* Transition VRR active -> inactive:
6355 * Allow vblank irq disable again for fixed refresh rate.
6357 dm_set_vupdate_irq(new_state->base.crtc, false);
6358 drm_crtc_vblank_put(new_state->base.crtc);
6359 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6360 __func__, new_state->base.crtc->base.id);
6364 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6366 struct drm_plane *plane;
6367 struct drm_plane_state *old_plane_state, *new_plane_state;
6371 * TODO: Make this per-stream so we don't issue redundant updates for
6372 * commits with multiple streams.
6374 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6376 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6377 handle_cursor_update(plane, old_plane_state);
6380 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6381 struct dc_state *dc_state,
6382 struct drm_device *dev,
6383 struct amdgpu_display_manager *dm,
6384 struct drm_crtc *pcrtc,
6385 bool wait_for_vblank)
6388 uint64_t timestamp_ns;
6389 struct drm_plane *plane;
6390 struct drm_plane_state *old_plane_state, *new_plane_state;
6391 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6392 struct drm_crtc_state *new_pcrtc_state =
6393 drm_atomic_get_new_crtc_state(state, pcrtc);
6394 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6395 struct dm_crtc_state *dm_old_crtc_state =
6396 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6397 int planes_count = 0, vpos, hpos;
6399 unsigned long flags;
6400 struct amdgpu_bo *abo;
6401 uint64_t tiling_flags;
6402 uint32_t target_vblank, last_flip_vblank;
6403 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6404 bool pflip_present = false;
6405 bool swizzle = true;
6407 struct dc_surface_update surface_updates[MAX_SURFACES];
6408 struct dc_plane_info plane_infos[MAX_SURFACES];
6409 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6410 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6411 struct dc_stream_update stream_update;
6414 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6417 dm_error("Failed to allocate update bundle\n");
6422 * Disable the cursor first if we're disabling all the planes.
6423 * It'll remain on the screen after the planes are re-enabled
6426 if (acrtc_state->active_planes == 0)
6427 amdgpu_dm_commit_cursors(state);
6429 /* update planes when needed */
6430 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6431 struct drm_crtc *crtc = new_plane_state->crtc;
6432 struct drm_crtc_state *new_crtc_state;
6433 struct drm_framebuffer *fb = new_plane_state->fb;
6434 bool plane_needs_flip;
6435 struct dc_plane_state *dc_plane;
6436 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6438 /* Cursor plane is handled after stream updates */
6439 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6442 if (!fb || !crtc || pcrtc != crtc)
6445 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6446 if (!new_crtc_state->active)
6449 dc_plane = dm_new_plane_state->dc_state;
6451 if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
6454 bundle->surface_updates[planes_count].surface = dc_plane;
6455 if (new_pcrtc_state->color_mgmt_changed) {
6456 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6457 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6460 fill_dc_scaling_info(new_plane_state,
6461 &bundle->scaling_infos[planes_count]);
6463 bundle->surface_updates[planes_count].scaling_info =
6464 &bundle->scaling_infos[planes_count];
6466 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6468 pflip_present = pflip_present || plane_needs_flip;
6470 if (!plane_needs_flip) {
6475 abo = gem_to_amdgpu_bo(fb->obj[0]);
6478 * Wait for all fences on this FB. Do limited wait to avoid
6479 * deadlock during GPU reset when this fence will not signal
6480 * but we hold reservation lock for the BO.
6482 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6484 msecs_to_jiffies(5000));
6485 if (unlikely(r <= 0))
6486 DRM_ERROR("Waiting for fences timed out!");
6489 * TODO This might fail and hence better not used, wait
6490 * explicitly on fences instead
6491 * and in general should be called for
6492 * blocking commit to as per framework helpers
6494 r = amdgpu_bo_reserve(abo, true);
6495 if (unlikely(r != 0))
6496 DRM_ERROR("failed to reserve buffer before flip\n");
6498 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6500 amdgpu_bo_unreserve(abo);
6502 fill_dc_plane_info_and_addr(
6503 dm->adev, new_plane_state, tiling_flags,
6504 &bundle->plane_infos[planes_count],
6505 &bundle->flip_addrs[planes_count].address);
6507 bundle->surface_updates[planes_count].plane_info =
6508 &bundle->plane_infos[planes_count];
6511 * Only allow immediate flips for fast updates that don't
6512 * change FB pitch, DCC state, rotation or mirroing.
6514 bundle->flip_addrs[planes_count].flip_immediate =
6515 crtc->state->async_flip &&
6516 acrtc_state->update_type == UPDATE_TYPE_FAST;
6518 timestamp_ns = ktime_get_ns();
6519 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6520 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6521 bundle->surface_updates[planes_count].surface = dc_plane;
6523 if (!bundle->surface_updates[planes_count].surface) {
6524 DRM_ERROR("No surface for CRTC: id=%d\n",
6525 acrtc_attach->crtc_id);
6529 if (plane == pcrtc->primary)
6530 update_freesync_state_on_stream(
6533 acrtc_state->stream,
6535 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6537 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6539 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6540 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6546 if (pflip_present) {
6548 /* Use old throttling in non-vrr fixed refresh rate mode
6549 * to keep flip scheduling based on target vblank counts
6550 * working in a backwards compatible way, e.g., for
6551 * clients using the GLX_OML_sync_control extension or
6552 * DRI3/Present extension with defined target_msc.
6554 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
6557 /* For variable refresh rate mode only:
6558 * Get vblank of last completed flip to avoid > 1 vrr
6559 * flips per video frame by use of throttling, but allow
6560 * flip programming anywhere in the possibly large
6561 * variable vrr vblank interval for fine-grained flip
6562 * timing control and more opportunity to avoid stutter
6563 * on late submission of flips.
6565 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6566 last_flip_vblank = acrtc_attach->last_flip_vblank;
6567 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6570 target_vblank = last_flip_vblank + wait_for_vblank;
6573 * Wait until we're out of the vertical blank period before the one
6574 * targeted by the flip
6576 while ((acrtc_attach->enabled &&
6577 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6578 0, &vpos, &hpos, NULL,
6579 NULL, &pcrtc->hwmode)
6580 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6581 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6582 (int)(target_vblank -
6583 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
6584 usleep_range(1000, 1100);
6587 if (acrtc_attach->base.state->event) {
6588 drm_crtc_vblank_get(pcrtc);
6590 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6592 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6593 prepare_flip_isr(acrtc_attach);
6595 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6598 if (acrtc_state->stream) {
6599 if (acrtc_state->freesync_vrr_info_changed)
6600 bundle->stream_update.vrr_infopacket =
6601 &acrtc_state->stream->vrr_infopacket;
6605 /* Update the planes if changed or disable if we don't have any. */
6606 if ((planes_count || acrtc_state->active_planes == 0) &&
6607 acrtc_state->stream) {
6608 bundle->stream_update.stream = acrtc_state->stream;
6609 if (new_pcrtc_state->mode_changed) {
6610 bundle->stream_update.src = acrtc_state->stream->src;
6611 bundle->stream_update.dst = acrtc_state->stream->dst;
6614 if (new_pcrtc_state->color_mgmt_changed) {
6616 * TODO: This isn't fully correct since we've actually
6617 * already modified the stream in place.
6619 bundle->stream_update.gamut_remap =
6620 &acrtc_state->stream->gamut_remap_matrix;
6621 bundle->stream_update.output_csc_transform =
6622 &acrtc_state->stream->csc_color_matrix;
6623 bundle->stream_update.out_transfer_func =
6624 acrtc_state->stream->out_transfer_func;
6627 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6628 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6629 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6632 * If FreeSync state on the stream has changed then we need to
6633 * re-adjust the min/max bounds now that DC doesn't handle this
6634 * as part of commit.
6636 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6637 amdgpu_dm_vrr_active(acrtc_state)) {
6638 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6639 dc_stream_adjust_vmin_vmax(
6640 dm->dc, acrtc_state->stream,
6641 &acrtc_state->vrr_params.adjust);
6642 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6644 mutex_lock(&dm->dc_lock);
6645 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6646 acrtc_state->stream->link->psr_allow_active)
6647 amdgpu_dm_psr_disable(acrtc_state->stream);
6649 dc_commit_updates_for_stream(dm->dc,
6650 bundle->surface_updates,
6652 acrtc_state->stream,
6653 &bundle->stream_update,
6656 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6657 acrtc_state->stream->psr_version &&
6658 !acrtc_state->stream->link->psr_feature_enabled)
6659 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6660 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6661 acrtc_state->stream->link->psr_feature_enabled &&
6662 !acrtc_state->stream->link->psr_allow_active &&
6664 amdgpu_dm_psr_enable(acrtc_state->stream);
6667 mutex_unlock(&dm->dc_lock);
6671 * Update cursor state *after* programming all the planes.
6672 * This avoids redundant programming in the case where we're going
6673 * to be disabling a single plane - those pipes are being disabled.
6675 if (acrtc_state->active_planes)
6676 amdgpu_dm_commit_cursors(state);
6682 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6683 struct drm_atomic_state *state)
6685 struct amdgpu_device *adev = dev->dev_private;
6686 struct amdgpu_dm_connector *aconnector;
6687 struct drm_connector *connector;
6688 struct drm_connector_state *old_con_state, *new_con_state;
6689 struct drm_crtc_state *new_crtc_state;
6690 struct dm_crtc_state *new_dm_crtc_state;
6691 const struct dc_stream_status *status;
6694 /* Notify device removals. */
6695 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6696 if (old_con_state->crtc != new_con_state->crtc) {
6697 /* CRTC changes require notification. */
6701 if (!new_con_state->crtc)
6704 new_crtc_state = drm_atomic_get_new_crtc_state(
6705 state, new_con_state->crtc);
6707 if (!new_crtc_state)
6710 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6714 aconnector = to_amdgpu_dm_connector(connector);
6716 mutex_lock(&adev->dm.audio_lock);
6717 inst = aconnector->audio_inst;
6718 aconnector->audio_inst = -1;
6719 mutex_unlock(&adev->dm.audio_lock);
6721 amdgpu_dm_audio_eld_notify(adev, inst);
6724 /* Notify audio device additions. */
6725 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6726 if (!new_con_state->crtc)
6729 new_crtc_state = drm_atomic_get_new_crtc_state(
6730 state, new_con_state->crtc);
6732 if (!new_crtc_state)
6735 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6738 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6739 if (!new_dm_crtc_state->stream)
6742 status = dc_stream_get_status(new_dm_crtc_state->stream);
6746 aconnector = to_amdgpu_dm_connector(connector);
6748 mutex_lock(&adev->dm.audio_lock);
6749 inst = status->audio_inst;
6750 aconnector->audio_inst = inst;
6751 mutex_unlock(&adev->dm.audio_lock);
6753 amdgpu_dm_audio_eld_notify(adev, inst);
6758 * Enable interrupts on CRTCs that are newly active, undergone
6759 * a modeset, or have active planes again.
6761 * Done in two passes, based on the for_modeset flag:
6762 * Pass 1: For CRTCs going through modeset
6763 * Pass 2: For CRTCs going from 0 to n active planes
6765 * Interrupts can only be enabled after the planes are programmed,
6766 * so this requires a two-pass approach since we don't want to
6767 * just defer the interrupts until after commit planes every time.
6769 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6770 struct drm_atomic_state *state,
6773 struct amdgpu_device *adev = dev->dev_private;
6774 struct drm_crtc *crtc;
6775 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6777 #ifdef CONFIG_DEBUG_FS
6778 enum amdgpu_dm_pipe_crc_source source;
6781 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6782 new_crtc_state, i) {
6783 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6784 struct dm_crtc_state *dm_new_crtc_state =
6785 to_dm_crtc_state(new_crtc_state);
6786 struct dm_crtc_state *dm_old_crtc_state =
6787 to_dm_crtc_state(old_crtc_state);
6788 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6791 run_pass = (for_modeset && modeset) ||
6792 (!for_modeset && !modeset &&
6793 !dm_old_crtc_state->interrupts_enabled);
6798 if (!dm_new_crtc_state->interrupts_enabled)
6801 manage_dm_interrupts(adev, acrtc, true);
6803 #ifdef CONFIG_DEBUG_FS
6804 /* The stream has changed so CRC capture needs to re-enabled. */
6805 source = dm_new_crtc_state->crc_src;
6806 if (amdgpu_dm_is_valid_crc_source(source)) {
6807 amdgpu_dm_crtc_configure_crc_source(
6808 crtc, dm_new_crtc_state,
6809 dm_new_crtc_state->crc_src);
6816 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6817 * @crtc_state: the DRM CRTC state
6818 * @stream_state: the DC stream state.
6820 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6821 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6823 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6824 struct dc_stream_state *stream_state)
6826 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6829 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6830 struct drm_atomic_state *state,
6833 struct drm_crtc *crtc;
6834 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6835 struct amdgpu_device *adev = dev->dev_private;
6839 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6840 * a modeset, being disabled, or have no active planes.
6842 * It's done in atomic commit rather than commit tail for now since
6843 * some of these interrupt handlers access the current CRTC state and
6844 * potentially the stream pointer itself.
6846 * Since the atomic state is swapped within atomic commit and not within
6847 * commit tail this would leave to new state (that hasn't been committed yet)
6848 * being accesssed from within the handlers.
6850 * TODO: Fix this so we can do this in commit tail and not have to block
6853 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6854 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6855 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6856 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6858 if (dm_old_crtc_state->interrupts_enabled &&
6859 (!dm_new_crtc_state->interrupts_enabled ||
6860 drm_atomic_crtc_needs_modeset(new_crtc_state)))
6861 manage_dm_interrupts(adev, acrtc, false);
6864 * Add check here for SoC's that support hardware cursor plane, to
6865 * unset legacy_cursor_update
6868 return drm_atomic_helper_commit(dev, state, nonblock);
6870 /*TODO Handle EINTR, reenable IRQ*/
6874 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6875 * @state: The atomic state to commit
6877 * This will tell DC to commit the constructed DC state from atomic_check,
6878 * programming the hardware. Any failures here implies a hardware failure, since
6879 * atomic check should have filtered anything non-kosher.
6881 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
6883 struct drm_device *dev = state->dev;
6884 struct amdgpu_device *adev = dev->dev_private;
6885 struct amdgpu_display_manager *dm = &adev->dm;
6886 struct dm_atomic_state *dm_state;
6887 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
6889 struct drm_crtc *crtc;
6890 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6891 unsigned long flags;
6892 bool wait_for_vblank = true;
6893 struct drm_connector *connector;
6894 struct drm_connector_state *old_con_state, *new_con_state;
6895 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6896 int crtc_disable_count = 0;
6898 drm_atomic_helper_update_legacy_modeset_state(dev, state);
6900 dm_state = dm_atomic_get_new_state(state);
6901 if (dm_state && dm_state->context) {
6902 dc_state = dm_state->context;
6904 /* No state changes, retain current state. */
6905 dc_state_temp = dc_create_state(dm->dc);
6906 ASSERT(dc_state_temp);
6907 dc_state = dc_state_temp;
6908 dc_resource_state_copy_construct_current(dm->dc, dc_state);
6911 /* update changed items */
6912 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6913 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6915 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6916 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6919 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6920 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6921 "connectors_changed:%d\n",
6923 new_crtc_state->enable,
6924 new_crtc_state->active,
6925 new_crtc_state->planes_changed,
6926 new_crtc_state->mode_changed,
6927 new_crtc_state->active_changed,
6928 new_crtc_state->connectors_changed);
6930 /* Copy all transient state flags into dc state */
6931 if (dm_new_crtc_state->stream) {
6932 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
6933 dm_new_crtc_state->stream);
6936 /* handles headless hotplug case, updating new_state and
6937 * aconnector as needed
6940 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
6942 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
6944 if (!dm_new_crtc_state->stream) {
6946 * this could happen because of issues with
6947 * userspace notifications delivery.
6948 * In this case userspace tries to set mode on
6949 * display which is disconnected in fact.
6950 * dc_sink is NULL in this case on aconnector.
6951 * We expect reset mode will come soon.
6953 * This can also happen when unplug is done
6954 * during resume sequence ended
6956 * In this case, we want to pretend we still
6957 * have a sink to keep the pipe running so that
6958 * hw state is consistent with the sw state
6960 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6961 __func__, acrtc->base.base.id);
6965 if (dm_old_crtc_state->stream)
6966 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6968 pm_runtime_get_noresume(dev->dev);
6970 acrtc->enabled = true;
6971 acrtc->hw_mode = new_crtc_state->mode;
6972 crtc->hwmode = new_crtc_state->mode;
6973 } else if (modereset_required(new_crtc_state)) {
6974 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
6975 /* i.e. reset mode */
6976 if (dm_old_crtc_state->stream) {
6977 if (dm_old_crtc_state->stream->link->psr_allow_active)
6978 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
6980 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6983 } /* for_each_crtc_in_state() */
6986 dm_enable_per_frame_crtc_master_sync(dc_state);
6987 mutex_lock(&dm->dc_lock);
6988 WARN_ON(!dc_commit_state(dm->dc, dc_state));
6989 mutex_unlock(&dm->dc_lock);
6992 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6993 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6995 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6997 if (dm_new_crtc_state->stream != NULL) {
6998 const struct dc_stream_status *status =
6999 dc_stream_get_status(dm_new_crtc_state->stream);
7002 status = dc_stream_get_status_from_state(dc_state,
7003 dm_new_crtc_state->stream);
7006 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7008 acrtc->otg_inst = status->primary_otg_inst;
7011 #ifdef CONFIG_DRM_AMD_DC_HDCP
7012 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7013 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7014 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7015 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7017 new_crtc_state = NULL;
7020 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7022 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7024 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7025 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7026 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7027 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7031 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7032 hdcp_update_display(
7033 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7034 new_con_state->hdcp_content_type,
7035 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7040 /* Handle connector state changes */
7041 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7042 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7043 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7044 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7045 struct dc_surface_update dummy_updates[MAX_SURFACES];
7046 struct dc_stream_update stream_update;
7047 struct dc_info_packet hdr_packet;
7048 struct dc_stream_status *status = NULL;
7049 bool abm_changed, hdr_changed, scaling_changed;
7051 memset(&dummy_updates, 0, sizeof(dummy_updates));
7052 memset(&stream_update, 0, sizeof(stream_update));
7055 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7056 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7059 /* Skip any modesets/resets */
7060 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7063 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7064 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7066 scaling_changed = is_scaling_state_different(dm_new_con_state,
7069 abm_changed = dm_new_crtc_state->abm_level !=
7070 dm_old_crtc_state->abm_level;
7073 is_hdr_metadata_different(old_con_state, new_con_state);
7075 if (!scaling_changed && !abm_changed && !hdr_changed)
7078 stream_update.stream = dm_new_crtc_state->stream;
7079 if (scaling_changed) {
7080 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7081 dm_new_con_state, dm_new_crtc_state->stream);
7083 stream_update.src = dm_new_crtc_state->stream->src;
7084 stream_update.dst = dm_new_crtc_state->stream->dst;
7088 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7090 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7094 fill_hdr_info_packet(new_con_state, &hdr_packet);
7095 stream_update.hdr_static_metadata = &hdr_packet;
7098 status = dc_stream_get_status(dm_new_crtc_state->stream);
7100 WARN_ON(!status->plane_count);
7103 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7104 * Here we create an empty update on each plane.
7105 * To fix this, DC should permit updating only stream properties.
7107 for (j = 0; j < status->plane_count; j++)
7108 dummy_updates[j].surface = status->plane_states[0];
7111 mutex_lock(&dm->dc_lock);
7112 dc_commit_updates_for_stream(dm->dc,
7114 status->plane_count,
7115 dm_new_crtc_state->stream,
7118 mutex_unlock(&dm->dc_lock);
7121 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7122 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7123 new_crtc_state, i) {
7124 if (old_crtc_state->active && !new_crtc_state->active)
7125 crtc_disable_count++;
7127 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7128 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7130 /* Update freesync active state. */
7131 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7133 /* Handle vrr on->off / off->on transitions */
7134 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7138 /* Enable interrupts for CRTCs going through a modeset. */
7139 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7141 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7142 if (new_crtc_state->async_flip)
7143 wait_for_vblank = false;
7145 /* update planes when needed per crtc*/
7146 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7147 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7149 if (dm_new_crtc_state->stream)
7150 amdgpu_dm_commit_planes(state, dc_state, dev,
7151 dm, crtc, wait_for_vblank);
7154 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7155 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7157 /* Update audio instances for each connector. */
7158 amdgpu_dm_commit_audio(dev, state);
7161 * send vblank event on all events not handled in flip and
7162 * mark consumed event for drm_atomic_helper_commit_hw_done
7164 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7165 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7167 if (new_crtc_state->event)
7168 drm_send_event_locked(dev, &new_crtc_state->event->base);
7170 new_crtc_state->event = NULL;
7172 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7174 /* Signal HW programming completion */
7175 drm_atomic_helper_commit_hw_done(state);
7177 if (wait_for_vblank)
7178 drm_atomic_helper_wait_for_flip_done(dev, state);
7180 drm_atomic_helper_cleanup_planes(dev, state);
7183 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7184 * so we can put the GPU into runtime suspend if we're not driving any
7187 for (i = 0; i < crtc_disable_count; i++)
7188 pm_runtime_put_autosuspend(dev->dev);
7189 pm_runtime_mark_last_busy(dev->dev);
7192 dc_release_state(dc_state_temp);
7196 static int dm_force_atomic_commit(struct drm_connector *connector)
7199 struct drm_device *ddev = connector->dev;
7200 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7201 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7202 struct drm_plane *plane = disconnected_acrtc->base.primary;
7203 struct drm_connector_state *conn_state;
7204 struct drm_crtc_state *crtc_state;
7205 struct drm_plane_state *plane_state;
7210 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7212 /* Construct an atomic state to restore previous display setting */
7215 * Attach connectors to drm_atomic_state
7217 conn_state = drm_atomic_get_connector_state(state, connector);
7219 ret = PTR_ERR_OR_ZERO(conn_state);
7223 /* Attach crtc to drm_atomic_state*/
7224 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7226 ret = PTR_ERR_OR_ZERO(crtc_state);
7230 /* force a restore */
7231 crtc_state->mode_changed = true;
7233 /* Attach plane to drm_atomic_state */
7234 plane_state = drm_atomic_get_plane_state(state, plane);
7236 ret = PTR_ERR_OR_ZERO(plane_state);
7241 /* Call commit internally with the state we just constructed */
7242 ret = drm_atomic_commit(state);
7247 DRM_ERROR("Restoring old state failed with %i\n", ret);
7248 drm_atomic_state_put(state);
7254 * This function handles all cases when set mode does not come upon hotplug.
7255 * This includes when a display is unplugged then plugged back into the
7256 * same port and when running without usermode desktop manager supprot
7258 void dm_restore_drm_connector_state(struct drm_device *dev,
7259 struct drm_connector *connector)
7261 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7262 struct amdgpu_crtc *disconnected_acrtc;
7263 struct dm_crtc_state *acrtc_state;
7265 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7268 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7269 if (!disconnected_acrtc)
7272 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7273 if (!acrtc_state->stream)
7277 * If the previous sink is not released and different from the current,
7278 * we deduce we are in a state where we can not rely on usermode call
7279 * to turn on the display, so we do it here
7281 if (acrtc_state->stream->sink != aconnector->dc_sink)
7282 dm_force_atomic_commit(&aconnector->base);
7286 * Grabs all modesetting locks to serialize against any blocking commits,
7287 * Waits for completion of all non blocking commits.
7289 static int do_aquire_global_lock(struct drm_device *dev,
7290 struct drm_atomic_state *state)
7292 struct drm_crtc *crtc;
7293 struct drm_crtc_commit *commit;
7297 * Adding all modeset locks to aquire_ctx will
7298 * ensure that when the framework release it the
7299 * extra locks we are locking here will get released to
7301 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7305 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7306 spin_lock(&crtc->commit_lock);
7307 commit = list_first_entry_or_null(&crtc->commit_list,
7308 struct drm_crtc_commit, commit_entry);
7310 drm_crtc_commit_get(commit);
7311 spin_unlock(&crtc->commit_lock);
7317 * Make sure all pending HW programming completed and
7320 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7323 ret = wait_for_completion_interruptible_timeout(
7324 &commit->flip_done, 10*HZ);
7327 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7328 "timed out\n", crtc->base.id, crtc->name);
7330 drm_crtc_commit_put(commit);
7333 return ret < 0 ? ret : 0;
7336 static void get_freesync_config_for_crtc(
7337 struct dm_crtc_state *new_crtc_state,
7338 struct dm_connector_state *new_con_state)
7340 struct mod_freesync_config config = {0};
7341 struct amdgpu_dm_connector *aconnector =
7342 to_amdgpu_dm_connector(new_con_state->base.connector);
7343 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7344 int vrefresh = drm_mode_vrefresh(mode);
7346 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7347 vrefresh >= aconnector->min_vfreq &&
7348 vrefresh <= aconnector->max_vfreq;
7350 if (new_crtc_state->vrr_supported) {
7351 new_crtc_state->stream->ignore_msa_timing_param = true;
7352 config.state = new_crtc_state->base.vrr_enabled ?
7353 VRR_STATE_ACTIVE_VARIABLE :
7355 config.min_refresh_in_uhz =
7356 aconnector->min_vfreq * 1000000;
7357 config.max_refresh_in_uhz =
7358 aconnector->max_vfreq * 1000000;
7359 config.vsif_supported = true;
7363 new_crtc_state->freesync_config = config;
7366 static void reset_freesync_config_for_crtc(
7367 struct dm_crtc_state *new_crtc_state)
7369 new_crtc_state->vrr_supported = false;
7371 memset(&new_crtc_state->vrr_params, 0,
7372 sizeof(new_crtc_state->vrr_params));
7373 memset(&new_crtc_state->vrr_infopacket, 0,
7374 sizeof(new_crtc_state->vrr_infopacket));
7377 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7378 struct drm_atomic_state *state,
7379 struct drm_crtc *crtc,
7380 struct drm_crtc_state *old_crtc_state,
7381 struct drm_crtc_state *new_crtc_state,
7383 bool *lock_and_validation_needed)
7385 struct dm_atomic_state *dm_state = NULL;
7386 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7387 struct dc_stream_state *new_stream;
7391 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7392 * update changed items
7394 struct amdgpu_crtc *acrtc = NULL;
7395 struct amdgpu_dm_connector *aconnector = NULL;
7396 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7397 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7401 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7402 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7403 acrtc = to_amdgpu_crtc(crtc);
7404 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7406 /* TODO This hack should go away */
7407 if (aconnector && enable) {
7408 /* Make sure fake sink is created in plug-in scenario */
7409 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7411 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7414 if (IS_ERR(drm_new_conn_state)) {
7415 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7419 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7420 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7422 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7425 new_stream = create_stream_for_sink(aconnector,
7426 &new_crtc_state->mode,
7428 dm_old_crtc_state->stream);
7431 * we can have no stream on ACTION_SET if a display
7432 * was disconnected during S3, in this case it is not an
7433 * error, the OS will be updated after detection, and
7434 * will do the right thing on next atomic commit
7438 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7439 __func__, acrtc->base.base.id);
7444 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7446 ret = fill_hdr_info_packet(drm_new_conn_state,
7447 &new_stream->hdr_static_metadata);
7452 * If we already removed the old stream from the context
7453 * (and set the new stream to NULL) then we can't reuse
7454 * the old stream even if the stream and scaling are unchanged.
7455 * We'll hit the BUG_ON and black screen.
7457 * TODO: Refactor this function to allow this check to work
7458 * in all conditions.
7460 if (dm_new_crtc_state->stream &&
7461 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7462 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7463 new_crtc_state->mode_changed = false;
7464 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7465 new_crtc_state->mode_changed);
7469 /* mode_changed flag may get updated above, need to check again */
7470 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7474 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7475 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7476 "connectors_changed:%d\n",
7478 new_crtc_state->enable,
7479 new_crtc_state->active,
7480 new_crtc_state->planes_changed,
7481 new_crtc_state->mode_changed,
7482 new_crtc_state->active_changed,
7483 new_crtc_state->connectors_changed);
7485 /* Remove stream for any changed/disabled CRTC */
7488 if (!dm_old_crtc_state->stream)
7491 ret = dm_atomic_get_state(state, &dm_state);
7495 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7498 /* i.e. reset mode */
7499 if (dc_remove_stream_from_ctx(
7502 dm_old_crtc_state->stream) != DC_OK) {
7507 dc_stream_release(dm_old_crtc_state->stream);
7508 dm_new_crtc_state->stream = NULL;
7510 reset_freesync_config_for_crtc(dm_new_crtc_state);
7512 *lock_and_validation_needed = true;
7514 } else {/* Add stream for any updated/enabled CRTC */
7516 * Quick fix to prevent NULL pointer on new_stream when
7517 * added MST connectors not found in existing crtc_state in the chained mode
7518 * TODO: need to dig out the root cause of that
7520 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7523 if (modereset_required(new_crtc_state))
7526 if (modeset_required(new_crtc_state, new_stream,
7527 dm_old_crtc_state->stream)) {
7529 WARN_ON(dm_new_crtc_state->stream);
7531 ret = dm_atomic_get_state(state, &dm_state);
7535 dm_new_crtc_state->stream = new_stream;
7537 dc_stream_retain(new_stream);
7539 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7542 if (dc_add_stream_to_ctx(
7545 dm_new_crtc_state->stream) != DC_OK) {
7550 *lock_and_validation_needed = true;
7555 /* Release extra reference */
7557 dc_stream_release(new_stream);
7560 * We want to do dc stream updates that do not require a
7561 * full modeset below.
7563 if (!(enable && aconnector && new_crtc_state->enable &&
7564 new_crtc_state->active))
7567 * Given above conditions, the dc state cannot be NULL because:
7568 * 1. We're in the process of enabling CRTCs (just been added
7569 * to the dc context, or already is on the context)
7570 * 2. Has a valid connector attached, and
7571 * 3. Is currently active and enabled.
7572 * => The dc stream state currently exists.
7574 BUG_ON(dm_new_crtc_state->stream == NULL);
7576 /* Scaling or underscan settings */
7577 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7578 update_stream_scaling_settings(
7579 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7582 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7585 * Color management settings. We also update color properties
7586 * when a modeset is needed, to ensure it gets reprogrammed.
7588 if (dm_new_crtc_state->base.color_mgmt_changed ||
7589 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7590 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7595 /* Update Freesync settings. */
7596 get_freesync_config_for_crtc(dm_new_crtc_state,
7603 dc_stream_release(new_stream);
7607 static bool should_reset_plane(struct drm_atomic_state *state,
7608 struct drm_plane *plane,
7609 struct drm_plane_state *old_plane_state,
7610 struct drm_plane_state *new_plane_state)
7612 struct drm_plane *other;
7613 struct drm_plane_state *old_other_state, *new_other_state;
7614 struct drm_crtc_state *new_crtc_state;
7618 * TODO: Remove this hack once the checks below are sufficient
7619 * enough to determine when we need to reset all the planes on
7622 if (state->allow_modeset)
7625 /* Exit early if we know that we're adding or removing the plane. */
7626 if (old_plane_state->crtc != new_plane_state->crtc)
7629 /* old crtc == new_crtc == NULL, plane not in context. */
7630 if (!new_plane_state->crtc)
7634 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7636 if (!new_crtc_state)
7639 /* CRTC Degamma changes currently require us to recreate planes. */
7640 if (new_crtc_state->color_mgmt_changed)
7643 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7647 * If there are any new primary or overlay planes being added or
7648 * removed then the z-order can potentially change. To ensure
7649 * correct z-order and pipe acquisition the current DC architecture
7650 * requires us to remove and recreate all existing planes.
7652 * TODO: Come up with a more elegant solution for this.
7654 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7655 if (other->type == DRM_PLANE_TYPE_CURSOR)
7658 if (old_other_state->crtc != new_plane_state->crtc &&
7659 new_other_state->crtc != new_plane_state->crtc)
7662 if (old_other_state->crtc != new_other_state->crtc)
7665 /* TODO: Remove this once we can handle fast format changes. */
7666 if (old_other_state->fb && new_other_state->fb &&
7667 old_other_state->fb->format != new_other_state->fb->format)
7674 static int dm_update_plane_state(struct dc *dc,
7675 struct drm_atomic_state *state,
7676 struct drm_plane *plane,
7677 struct drm_plane_state *old_plane_state,
7678 struct drm_plane_state *new_plane_state,
7680 bool *lock_and_validation_needed)
7683 struct dm_atomic_state *dm_state = NULL;
7684 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7685 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7686 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7687 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7692 new_plane_crtc = new_plane_state->crtc;
7693 old_plane_crtc = old_plane_state->crtc;
7694 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7695 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7697 /*TODO Implement atomic check for cursor plane */
7698 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7701 needs_reset = should_reset_plane(state, plane, old_plane_state,
7704 /* Remove any changed/removed planes */
7709 if (!old_plane_crtc)
7712 old_crtc_state = drm_atomic_get_old_crtc_state(
7713 state, old_plane_crtc);
7714 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7716 if (!dm_old_crtc_state->stream)
7719 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7720 plane->base.id, old_plane_crtc->base.id);
7722 ret = dm_atomic_get_state(state, &dm_state);
7726 if (!dc_remove_plane_from_context(
7728 dm_old_crtc_state->stream,
7729 dm_old_plane_state->dc_state,
7730 dm_state->context)) {
7737 dc_plane_state_release(dm_old_plane_state->dc_state);
7738 dm_new_plane_state->dc_state = NULL;
7740 *lock_and_validation_needed = true;
7742 } else { /* Add new planes */
7743 struct dc_plane_state *dc_new_plane_state;
7745 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7748 if (!new_plane_crtc)
7751 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7752 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7754 if (!dm_new_crtc_state->stream)
7760 WARN_ON(dm_new_plane_state->dc_state);
7762 dc_new_plane_state = dc_create_plane_state(dc);
7763 if (!dc_new_plane_state)
7766 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7767 plane->base.id, new_plane_crtc->base.id);
7769 ret = fill_dc_plane_attributes(
7770 new_plane_crtc->dev->dev_private,
7775 dc_plane_state_release(dc_new_plane_state);
7779 ret = dm_atomic_get_state(state, &dm_state);
7781 dc_plane_state_release(dc_new_plane_state);
7786 * Any atomic check errors that occur after this will
7787 * not need a release. The plane state will be attached
7788 * to the stream, and therefore part of the atomic
7789 * state. It'll be released when the atomic state is
7792 if (!dc_add_plane_to_context(
7794 dm_new_crtc_state->stream,
7796 dm_state->context)) {
7798 dc_plane_state_release(dc_new_plane_state);
7802 dm_new_plane_state->dc_state = dc_new_plane_state;
7804 /* Tell DC to do a full surface update every time there
7805 * is a plane change. Inefficient, but works for now.
7807 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7809 *lock_and_validation_needed = true;
7817 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7818 struct drm_atomic_state *state,
7819 enum surface_update_type *out_type)
7821 struct dc *dc = dm->dc;
7822 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7823 int i, j, num_plane, ret = 0;
7824 struct drm_plane_state *old_plane_state, *new_plane_state;
7825 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7826 struct drm_crtc *new_plane_crtc;
7827 struct drm_plane *plane;
7829 struct drm_crtc *crtc;
7830 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7831 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7832 struct dc_stream_status *status = NULL;
7833 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7834 struct surface_info_bundle {
7835 struct dc_surface_update surface_updates[MAX_SURFACES];
7836 struct dc_plane_info plane_infos[MAX_SURFACES];
7837 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7838 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7839 struct dc_stream_update stream_update;
7842 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7845 DRM_ERROR("Failed to allocate update bundle\n");
7846 /* Set type to FULL to avoid crashing in DC*/
7847 update_type = UPDATE_TYPE_FULL;
7851 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7853 memset(bundle, 0, sizeof(struct surface_info_bundle));
7855 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7856 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7859 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7860 update_type = UPDATE_TYPE_FULL;
7864 if (!new_dm_crtc_state->stream)
7867 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
7868 const struct amdgpu_framebuffer *amdgpu_fb =
7869 to_amdgpu_framebuffer(new_plane_state->fb);
7870 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
7871 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
7872 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
7873 uint64_t tiling_flags;
7875 new_plane_crtc = new_plane_state->crtc;
7876 new_dm_plane_state = to_dm_plane_state(new_plane_state);
7877 old_dm_plane_state = to_dm_plane_state(old_plane_state);
7879 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7882 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
7883 update_type = UPDATE_TYPE_FULL;
7887 if (crtc != new_plane_crtc)
7890 bundle->surface_updates[num_plane].surface =
7891 new_dm_plane_state->dc_state;
7893 if (new_crtc_state->mode_changed) {
7894 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
7895 bundle->stream_update.src = new_dm_crtc_state->stream->src;
7898 if (new_crtc_state->color_mgmt_changed) {
7899 bundle->surface_updates[num_plane].gamma =
7900 new_dm_plane_state->dc_state->gamma_correction;
7901 bundle->surface_updates[num_plane].in_transfer_func =
7902 new_dm_plane_state->dc_state->in_transfer_func;
7903 bundle->stream_update.gamut_remap =
7904 &new_dm_crtc_state->stream->gamut_remap_matrix;
7905 bundle->stream_update.output_csc_transform =
7906 &new_dm_crtc_state->stream->csc_color_matrix;
7907 bundle->stream_update.out_transfer_func =
7908 new_dm_crtc_state->stream->out_transfer_func;
7911 ret = fill_dc_scaling_info(new_plane_state,
7916 bundle->surface_updates[num_plane].scaling_info = scaling_info;
7919 ret = get_fb_info(amdgpu_fb, &tiling_flags);
7923 ret = fill_dc_plane_info_and_addr(
7924 dm->adev, new_plane_state, tiling_flags,
7926 &flip_addr->address);
7930 bundle->surface_updates[num_plane].plane_info = plane_info;
7931 bundle->surface_updates[num_plane].flip_addr = flip_addr;
7940 ret = dm_atomic_get_state(state, &dm_state);
7944 old_dm_state = dm_atomic_get_old_state(state);
7945 if (!old_dm_state) {
7950 status = dc_stream_get_status_from_state(old_dm_state->context,
7951 new_dm_crtc_state->stream);
7952 bundle->stream_update.stream = new_dm_crtc_state->stream;
7954 * TODO: DC modifies the surface during this call so we need
7955 * to lock here - find a way to do this without locking.
7957 mutex_lock(&dm->dc_lock);
7958 update_type = dc_check_update_surfaces_for_stream(
7959 dc, bundle->surface_updates, num_plane,
7960 &bundle->stream_update, status);
7961 mutex_unlock(&dm->dc_lock);
7963 if (update_type > UPDATE_TYPE_MED) {
7964 update_type = UPDATE_TYPE_FULL;
7972 *out_type = update_type;
7976 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
7978 struct drm_connector *connector;
7979 struct drm_connector_state *conn_state;
7980 struct amdgpu_dm_connector *aconnector = NULL;
7982 for_each_new_connector_in_state(state, connector, conn_state, i) {
7983 if (conn_state->crtc != crtc)
7986 aconnector = to_amdgpu_dm_connector(connector);
7987 if (!aconnector->port || !aconnector->mst_port)
7996 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8000 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8001 * @dev: The DRM device
8002 * @state: The atomic state to commit
8004 * Validate that the given atomic state is programmable by DC into hardware.
8005 * This involves constructing a &struct dc_state reflecting the new hardware
8006 * state we wish to commit, then querying DC to see if it is programmable. It's
8007 * important not to modify the existing DC state. Otherwise, atomic_check
8008 * may unexpectedly commit hardware changes.
8010 * When validating the DC state, it's important that the right locks are
8011 * acquired. For full updates case which removes/adds/updates streams on one
8012 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8013 * that any such full update commit will wait for completion of any outstanding
8014 * flip using DRMs synchronization events. See
8015 * dm_determine_update_type_for_commit()
8017 * Note that DM adds the affected connectors for all CRTCs in state, when that
8018 * might not seem necessary. This is because DC stream creation requires the
8019 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8020 * be possible but non-trivial - a possible TODO item.
8022 * Return: -Error code if validation failed.
8024 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8025 struct drm_atomic_state *state)
8027 struct amdgpu_device *adev = dev->dev_private;
8028 struct dm_atomic_state *dm_state = NULL;
8029 struct dc *dc = adev->dm.dc;
8030 struct drm_connector *connector;
8031 struct drm_connector_state *old_con_state, *new_con_state;
8032 struct drm_crtc *crtc;
8033 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8034 struct drm_plane *plane;
8035 struct drm_plane_state *old_plane_state, *new_plane_state;
8036 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8037 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8042 * This bool will be set for true for any modeset/reset
8043 * or plane update which implies non fast surface update.
8045 bool lock_and_validation_needed = false;
8047 ret = drm_atomic_helper_check_modeset(dev, state);
8051 if (adev->asic_type >= CHIP_NAVI10) {
8052 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8053 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8054 ret = add_affected_mst_dsc_crtcs(state, crtc);
8061 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8062 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8063 !new_crtc_state->color_mgmt_changed &&
8064 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8067 if (!new_crtc_state->enable)
8070 ret = drm_atomic_add_affected_connectors(state, crtc);
8074 ret = drm_atomic_add_affected_planes(state, crtc);
8080 * Add all primary and overlay planes on the CRTC to the state
8081 * whenever a plane is enabled to maintain correct z-ordering
8082 * and to enable fast surface updates.
8084 drm_for_each_crtc(crtc, dev) {
8085 bool modified = false;
8087 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8088 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8091 if (new_plane_state->crtc == crtc ||
8092 old_plane_state->crtc == crtc) {
8101 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8102 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8106 drm_atomic_get_plane_state(state, plane);
8108 if (IS_ERR(new_plane_state)) {
8109 ret = PTR_ERR(new_plane_state);
8115 /* Remove exiting planes if they are modified */
8116 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8117 ret = dm_update_plane_state(dc, state, plane,
8121 &lock_and_validation_needed);
8126 /* Disable all crtcs which require disable */
8127 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8128 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8132 &lock_and_validation_needed);
8137 /* Enable all crtcs which require enable */
8138 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8139 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8143 &lock_and_validation_needed);
8148 /* Add new/modified planes */
8149 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8150 ret = dm_update_plane_state(dc, state, plane,
8154 &lock_and_validation_needed);
8159 /* Run this here since we want to validate the streams we created */
8160 ret = drm_atomic_helper_check_planes(dev, state);
8164 if (state->legacy_cursor_update) {
8166 * This is a fast cursor update coming from the plane update
8167 * helper, check if it can be done asynchronously for better
8170 state->async_update =
8171 !drm_atomic_helper_async_check(dev, state);
8174 * Skip the remaining global validation if this is an async
8175 * update. Cursor updates can be done without affecting
8176 * state or bandwidth calcs and this avoids the performance
8177 * penalty of locking the private state object and
8178 * allocating a new dc_state.
8180 if (state->async_update)
8184 /* Check scaling and underscan changes*/
8185 /* TODO Removed scaling changes validation due to inability to commit
8186 * new stream into context w\o causing full reset. Need to
8187 * decide how to handle.
8189 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8190 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8191 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8192 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8194 /* Skip any modesets/resets */
8195 if (!acrtc || drm_atomic_crtc_needs_modeset(
8196 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8199 /* Skip any thing not scale or underscan changes */
8200 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8203 overall_update_type = UPDATE_TYPE_FULL;
8204 lock_and_validation_needed = true;
8207 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8211 if (overall_update_type < update_type)
8212 overall_update_type = update_type;
8215 * lock_and_validation_needed was an old way to determine if we need to set
8216 * the global lock. Leaving it in to check if we broke any corner cases
8217 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8218 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8220 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8221 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8223 if (overall_update_type > UPDATE_TYPE_FAST) {
8224 ret = dm_atomic_get_state(state, &dm_state);
8228 ret = do_aquire_global_lock(dev, state);
8232 #if defined(CONFIG_DRM_AMD_DC_DCN)
8233 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8236 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8242 * Perform validation of MST topology in the state:
8243 * We need to perform MST atomic check before calling
8244 * dc_validate_global_state(), or there is a chance
8245 * to get stuck in an infinite loop and hang eventually.
8247 ret = drm_dp_mst_atomic_check(state);
8251 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8257 * The commit is a fast update. Fast updates shouldn't change
8258 * the DC context, affect global validation, and can have their
8259 * commit work done in parallel with other commits not touching
8260 * the same resource. If we have a new DC context as part of
8261 * the DM atomic state from validation we need to free it and
8262 * retain the existing one instead.
8264 struct dm_atomic_state *new_dm_state, *old_dm_state;
8266 new_dm_state = dm_atomic_get_new_state(state);
8267 old_dm_state = dm_atomic_get_old_state(state);
8269 if (new_dm_state && old_dm_state) {
8270 if (new_dm_state->context)
8271 dc_release_state(new_dm_state->context);
8273 new_dm_state->context = old_dm_state->context;
8275 if (old_dm_state->context)
8276 dc_retain_state(old_dm_state->context);
8280 /* Store the overall update type for use later in atomic check. */
8281 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8282 struct dm_crtc_state *dm_new_crtc_state =
8283 to_dm_crtc_state(new_crtc_state);
8285 dm_new_crtc_state->update_type = (int)overall_update_type;
8288 /* Must be success */
8293 if (ret == -EDEADLK)
8294 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8295 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8296 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8298 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8303 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8304 struct amdgpu_dm_connector *amdgpu_dm_connector)
8307 bool capable = false;
8309 if (amdgpu_dm_connector->dc_link &&
8310 dm_helpers_dp_read_dpcd(
8312 amdgpu_dm_connector->dc_link,
8313 DP_DOWN_STREAM_PORT_COUNT,
8315 sizeof(dpcd_data))) {
8316 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8321 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8325 bool edid_check_required;
8326 struct detailed_timing *timing;
8327 struct detailed_non_pixel *data;
8328 struct detailed_data_monitor_range *range;
8329 struct amdgpu_dm_connector *amdgpu_dm_connector =
8330 to_amdgpu_dm_connector(connector);
8331 struct dm_connector_state *dm_con_state = NULL;
8333 struct drm_device *dev = connector->dev;
8334 struct amdgpu_device *adev = dev->dev_private;
8335 bool freesync_capable = false;
8337 if (!connector->state) {
8338 DRM_ERROR("%s - Connector has no state", __func__);
8343 dm_con_state = to_dm_connector_state(connector->state);
8345 amdgpu_dm_connector->min_vfreq = 0;
8346 amdgpu_dm_connector->max_vfreq = 0;
8347 amdgpu_dm_connector->pixel_clock_mhz = 0;
8352 dm_con_state = to_dm_connector_state(connector->state);
8354 edid_check_required = false;
8355 if (!amdgpu_dm_connector->dc_sink) {
8356 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8359 if (!adev->dm.freesync_module)
8362 * if edid non zero restrict freesync only for dp and edp
8365 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8366 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8367 edid_check_required = is_dp_capable_without_timing_msa(
8369 amdgpu_dm_connector);
8372 if (edid_check_required == true && (edid->version > 1 ||
8373 (edid->version == 1 && edid->revision > 1))) {
8374 for (i = 0; i < 4; i++) {
8376 timing = &edid->detailed_timings[i];
8377 data = &timing->data.other_data;
8378 range = &data->data.range;
8380 * Check if monitor has continuous frequency mode
8382 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8385 * Check for flag range limits only. If flag == 1 then
8386 * no additional timing information provided.
8387 * Default GTF, GTF Secondary curve and CVT are not
8390 if (range->flags != 1)
8393 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8394 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8395 amdgpu_dm_connector->pixel_clock_mhz =
8396 range->pixel_clock_mhz * 10;
8400 if (amdgpu_dm_connector->max_vfreq -
8401 amdgpu_dm_connector->min_vfreq > 10) {
8403 freesync_capable = true;
8409 dm_con_state->freesync_capable = freesync_capable;
8411 if (connector->vrr_capable_property)
8412 drm_connector_set_vrr_capable_property(connector,
8416 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8418 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8420 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8422 if (link->type == dc_connection_none)
8424 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8425 dpcd_data, sizeof(dpcd_data))) {
8426 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8427 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8432 * amdgpu_dm_link_setup_psr() - configure psr link
8433 * @stream: stream state
8435 * Return: true if success
8437 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8439 struct dc_link *link = NULL;
8440 struct psr_config psr_config = {0};
8441 struct psr_context psr_context = {0};
8442 struct dc *dc = NULL;
8448 link = stream->link;
8451 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8453 if (psr_config.psr_version > 0) {
8454 psr_config.psr_exit_link_training_required = 0x1;
8455 psr_config.psr_frame_capture_indication_req = 0;
8456 psr_config.psr_rfb_setup_time = 0x37;
8457 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8458 psr_config.allow_smu_optimizations = 0x0;
8460 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8463 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8469 * amdgpu_dm_psr_enable() - enable psr f/w
8470 * @stream: stream state
8472 * Return: true if success
8474 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8476 struct dc_link *link = stream->link;
8477 unsigned int vsync_rate_hz = 0;
8478 struct dc_static_screen_params params = {0};
8479 /* Calculate number of static frames before generating interrupt to
8482 // Init fail safe of 2 frames static
8483 unsigned int num_frames_static = 2;
8485 DRM_DEBUG_DRIVER("Enabling psr...\n");
8487 vsync_rate_hz = div64_u64(div64_u64((
8488 stream->timing.pix_clk_100hz * 100),
8489 stream->timing.v_total),
8490 stream->timing.h_total);
8493 * Calculate number of frames such that at least 30 ms of time has
8496 if (vsync_rate_hz != 0) {
8497 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8498 num_frames_static = (30000 / frame_time_microsec) + 1;
8501 params.triggers.cursor_update = true;
8502 params.triggers.overlay_update = true;
8503 params.triggers.surface_update = true;
8504 params.num_frames = num_frames_static;
8506 dc_stream_set_static_screen_params(link->ctx->dc,
8510 return dc_link_set_psr_allow_active(link, true, false);
8514 * amdgpu_dm_psr_disable() - disable psr f/w
8515 * @stream: stream state
8517 * Return: true if success
8519 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8522 DRM_DEBUG_DRIVER("Disabling psr...\n");
8524 return dc_link_set_psr_allow_active(stream->link, false, true);