2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
117 * The root control structure is &struct amdgpu_display_manager.
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
129 * Returns 0 on success
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
145 struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
161 static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
185 if (crtc >= adev->mode_info.num_crtc)
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state->stream,
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
238 static bool dm_is_idle(void *handle)
244 static int dm_wait_for_idle(void *handle)
250 static bool dm_check_soft_reset(void *handle)
255 static int dm_soft_reset(void *handle)
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
269 if (otg_inst == -1) {
271 return adev->mode_info.crtcs[0];
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
277 if (amdgpu_crtc->otg_inst == otg_inst)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params)
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
397 static void dm_vupdate_high_irq(void *interrupt_params)
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
412 amdgpu_dm_vrr_active(acrtc_state));
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
420 if (amdgpu_dm_vrr_active(acrtc_state)) {
421 drm_crtc_handle_vblank(&acrtc->base);
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
430 &acrtc_state->vrr_params);
432 dc_stream_adjust_vmin_vmax(
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: ignored
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 static void dm_crtc_high_irq(void *interrupt_params)
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
453 struct amdgpu_crtc *acrtc;
454 struct dm_crtc_state *acrtc_state;
457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
460 acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
464 amdgpu_dm_vrr_active(acrtc_state));
466 /* Core vblank handling at start of front-porch is only possible
467 * in non-vrr mode, as only there vblank timestamping will give
468 * valid results while done in front-porch. Otherwise defer it
469 * to dm_vupdate_high_irq after end of front-porch.
471 if (!amdgpu_dm_vrr_active(acrtc_state))
472 drm_crtc_handle_vblank(&acrtc->base);
474 /* Following stuff must happen at start of vblank, for crc
475 * computation and below-the-range btr support in vrr mode.
477 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
479 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480 acrtc_state->vrr_params.supported &&
481 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482 spin_lock_irqsave(&adev->ddev->event_lock, flags);
483 mod_freesync_handle_v_update(
484 adev->dm.freesync_module,
486 &acrtc_state->vrr_params);
488 dc_stream_adjust_vmin_vmax(
491 &acrtc_state->vrr_params.adjust);
492 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
499 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500 * @interrupt params - interrupt parameters
502 * Notify DRM's vblank event handler at VSTARTUP
504 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505 * * We are close enough to VUPDATE - the point of no return for hw
506 * * We are in the fixed portion of variable front porch when vrr is enabled
507 * * We are before VUPDATE, where double-buffered vrr registers are swapped
509 * It is therefore the correct place to signal vblank, send user flip events,
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
514 struct common_irq_params *irq_params = interrupt_params;
515 struct amdgpu_device *adev = irq_params->adev;
516 struct amdgpu_crtc *acrtc;
517 struct dm_crtc_state *acrtc_state;
520 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
525 acrtc_state = to_dm_crtc_state(acrtc->base.state);
527 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 amdgpu_dm_vrr_active(acrtc_state),
529 acrtc_state->active_planes);
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 drm_crtc_handle_vblank(&acrtc->base);
534 spin_lock_irqsave(&adev->ddev->event_lock, flags);
536 if (acrtc_state->vrr_params.supported &&
537 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(
539 adev->dm.freesync_module,
541 &acrtc_state->vrr_params);
543 dc_stream_adjust_vmin_vmax(
546 &acrtc_state->vrr_params.adjust);
550 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 * In that case, pageflip completion interrupts won't fire and pageflip
552 * completion events won't get delivered. Prevent this by sending
553 * pending pageflip events from here if a flip is still pending.
555 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 * avoid race conditions between flip programming and completion,
557 * which could cause too early flip completion events.
559 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 acrtc_state->active_planes == 0) {
562 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
564 drm_crtc_vblank_put(&acrtc->base);
566 acrtc->pflip_status = AMDGPU_FLIP_NONE;
569 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
573 static int dm_set_clockgating_state(void *handle,
574 enum amd_clockgating_state state)
579 static int dm_set_powergating_state(void *handle,
580 enum amd_powergating_state state)
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
588 /* Allocate memory for FBC compressed data */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
591 struct drm_device *dev = connector->dev;
592 struct amdgpu_device *adev = dev->dev_private;
593 struct dm_comressor_info *compressor = &adev->dm.compressor;
594 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 struct drm_display_mode *mode;
596 unsigned long max_size = 0;
598 if (adev->dm.dc->fbc_compressor == NULL)
601 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
604 if (compressor->bo_ptr)
608 list_for_each_entry(mode, &connector->modes, head) {
609 if (max_size < mode->htotal * mode->vtotal)
610 max_size = mode->htotal * mode->vtotal;
614 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616 &compressor->gpu_addr, &compressor->cpu_addr);
619 DRM_ERROR("DM: Failed to initialize FBC\n");
621 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 int pipe, bool *enabled,
631 unsigned char *buf, int max_bytes)
633 struct drm_device *dev = dev_get_drvdata(kdev);
634 struct amdgpu_device *adev = dev->dev_private;
635 struct drm_connector *connector;
636 struct drm_connector_list_iter conn_iter;
637 struct amdgpu_dm_connector *aconnector;
642 mutex_lock(&adev->dm.audio_lock);
644 drm_connector_list_iter_begin(dev, &conn_iter);
645 drm_for_each_connector_iter(connector, &conn_iter) {
646 aconnector = to_amdgpu_dm_connector(connector);
647 if (aconnector->audio_inst != port)
651 ret = drm_eld_size(connector->eld);
652 memcpy(buf, connector->eld, min(max_bytes, ret));
656 drm_connector_list_iter_end(&conn_iter);
658 mutex_unlock(&adev->dm.audio_lock);
660 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 .get_eld = amdgpu_dm_audio_component_get_eld,
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 struct device *hda_kdev, void *data)
672 struct drm_device *dev = dev_get_drvdata(kdev);
673 struct amdgpu_device *adev = dev->dev_private;
674 struct drm_audio_component *acomp = data;
676 acomp->ops = &amdgpu_dm_audio_component_ops;
678 adev->dm.audio_component = acomp;
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 struct device *hda_kdev, void *data)
686 struct drm_device *dev = dev_get_drvdata(kdev);
687 struct amdgpu_device *adev = dev->dev_private;
688 struct drm_audio_component *acomp = data;
692 adev->dm.audio_component = NULL;
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 .bind = amdgpu_dm_audio_component_bind,
697 .unbind = amdgpu_dm_audio_component_unbind,
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
707 adev->mode_info.audio.enabled = true;
709 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 adev->mode_info.audio.pin[i].channels = -1;
713 adev->mode_info.audio.pin[i].rate = -1;
714 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 adev->mode_info.audio.pin[i].status_bits = 0;
716 adev->mode_info.audio.pin[i].category_code = 0;
717 adev->mode_info.audio.pin[i].connected = false;
718 adev->mode_info.audio.pin[i].id =
719 adev->dm.dc->res_pool->audios[i]->inst;
720 adev->mode_info.audio.pin[i].offset = 0;
723 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
727 adev->dm.audio_registered = true;
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
737 if (!adev->mode_info.audio.enabled)
740 if (adev->dm.audio_registered) {
741 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 adev->dm.audio_registered = false;
745 /* TODO: Disable audio? */
747 adev->mode_info.audio.enabled = false;
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
752 struct drm_audio_component *acomp = adev->dm.audio_component;
754 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
764 const struct dmcub_firmware_header_v1_0 *hdr;
765 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767 const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 struct abm *abm = adev->dm.dc->res_pool->abm;
770 struct dmub_srv_hw_params hw_params;
771 enum dmub_status status;
772 const unsigned char *fw_inst_const, *fw_bss_data;
773 uint32_t i, fw_inst_const_size, fw_bss_data_size;
777 /* DMUB isn't supported on the ASIC. */
781 DRM_ERROR("No framebuffer info for DMUB service.\n");
786 /* Firmware required for DMUB support. */
787 DRM_ERROR("No firmware provided for DMUB.\n");
791 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 if (status != DMUB_STATUS_OK) {
793 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
797 if (!has_hw_support) {
798 DRM_INFO("DMUB unsupported on ASIC\n");
802 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804 fw_inst_const = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
808 fw_bss_data = dmub_fw->data +
809 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 le32_to_cpu(hdr->inst_const_bytes);
812 /* Copy firmware and bios info into FB memory. */
813 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 * amdgpu_ucode_init_single_fw will load dmub firmware
820 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 * will be done by dm_dmub_hw_init
823 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
828 if (fw_bss_data_size)
829 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
830 fw_bss_data, fw_bss_data_size);
832 /* Copy firmware bios info into FB memory. */
833 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
836 /* Reset regions that need to be reset. */
837 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
838 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
840 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
841 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
843 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
844 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
846 /* Initialize hardware. */
847 memset(&hw_params, 0, sizeof(hw_params));
848 hw_params.fb_base = adev->gmc.fb_start;
849 hw_params.fb_offset = adev->gmc.aper_base;
851 /* backdoor load firmware and trigger dmub running */
852 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
853 hw_params.load_inst_const = true;
856 hw_params.psp_version = dmcu->psp_version;
858 for (i = 0; i < fb_info->num_fb; ++i)
859 hw_params.fb[i] = &fb_info->fb[i];
861 status = dmub_srv_hw_init(dmub_srv, &hw_params);
862 if (status != DMUB_STATUS_OK) {
863 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
867 /* Wait for firmware load to finish. */
868 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
869 if (status != DMUB_STATUS_OK)
870 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
872 /* Init DMCU and ABM if available. */
874 dmcu->funcs->dmcu_init(dmcu);
875 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
878 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
879 if (!adev->dm.dc->ctx->dmub_srv) {
880 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
884 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
885 adev->dm.dmcub_fw_version);
890 static int amdgpu_dm_init(struct amdgpu_device *adev)
892 struct dc_init_data init_data;
893 #ifdef CONFIG_DRM_AMD_DC_HDCP
894 struct dc_callback_init init_params;
898 adev->dm.ddev = adev->ddev;
899 adev->dm.adev = adev;
901 /* Zero all the fields */
902 memset(&init_data, 0, sizeof(init_data));
903 #ifdef CONFIG_DRM_AMD_DC_HDCP
904 memset(&init_params, 0, sizeof(init_params));
907 mutex_init(&adev->dm.dc_lock);
908 mutex_init(&adev->dm.audio_lock);
910 if(amdgpu_dm_irq_init(adev)) {
911 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
915 init_data.asic_id.chip_family = adev->family;
917 init_data.asic_id.pci_revision_id = adev->pdev->revision;
918 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
920 init_data.asic_id.vram_width = adev->gmc.vram_width;
921 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
922 init_data.asic_id.atombios_base_address =
923 adev->mode_info.atom_context->bios;
925 init_data.driver = adev;
927 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
929 if (!adev->dm.cgs_device) {
930 DRM_ERROR("amdgpu: failed to create cgs device.\n");
934 init_data.cgs_device = adev->dm.cgs_device;
936 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
938 switch (adev->asic_type) {
943 init_data.flags.gpu_vm_support = true;
949 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
950 init_data.flags.fbc_support = true;
952 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
953 init_data.flags.multi_mon_pp_mclk_switch = true;
955 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
956 init_data.flags.disable_fractional_pwm = true;
958 init_data.flags.power_down_display_on_boot = true;
960 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
962 /* Display Core create. */
963 adev->dm.dc = dc_create(&init_data);
966 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
968 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
972 r = dm_dmub_hw_init(adev);
974 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
978 dc_hardware_init(adev->dm.dc);
980 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
981 if (!adev->dm.freesync_module) {
983 "amdgpu: failed to initialize freesync_module.\n");
985 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
986 adev->dm.freesync_module);
988 amdgpu_dm_init_color_mod();
990 #ifdef CONFIG_DRM_AMD_DC_HDCP
991 if (adev->asic_type >= CHIP_RAVEN) {
992 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
994 if (!adev->dm.hdcp_workqueue)
995 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
997 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
999 dc_init_callbacks(adev->dm.dc, &init_params);
1002 if (amdgpu_dm_initialize_drm_device(adev)) {
1004 "amdgpu: failed to initialize sw for display support.\n");
1008 /* Update the actual used number of crtc */
1009 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1011 /* TODO: Add_display_info? */
1013 /* TODO use dynamic cursor width */
1014 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1015 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1017 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1019 "amdgpu: failed to initialize sw for display support.\n");
1023 DRM_DEBUG_DRIVER("KMS initialized.\n");
1027 amdgpu_dm_fini(adev);
1032 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1034 amdgpu_dm_audio_fini(adev);
1036 amdgpu_dm_destroy_drm_device(&adev->dm);
1038 #ifdef CONFIG_DRM_AMD_DC_HDCP
1039 if (adev->dm.hdcp_workqueue) {
1040 hdcp_destroy(adev->dm.hdcp_workqueue);
1041 adev->dm.hdcp_workqueue = NULL;
1045 dc_deinit_callbacks(adev->dm.dc);
1047 if (adev->dm.dc->ctx->dmub_srv) {
1048 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1049 adev->dm.dc->ctx->dmub_srv = NULL;
1052 if (adev->dm.dmub_bo)
1053 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1054 &adev->dm.dmub_bo_gpu_addr,
1055 &adev->dm.dmub_bo_cpu_addr);
1057 /* DC Destroy TODO: Replace destroy DAL */
1059 dc_destroy(&adev->dm.dc);
1061 * TODO: pageflip, vlank interrupt
1063 * amdgpu_dm_irq_fini(adev);
1066 if (adev->dm.cgs_device) {
1067 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1068 adev->dm.cgs_device = NULL;
1070 if (adev->dm.freesync_module) {
1071 mod_freesync_destroy(adev->dm.freesync_module);
1072 adev->dm.freesync_module = NULL;
1075 mutex_destroy(&adev->dm.audio_lock);
1076 mutex_destroy(&adev->dm.dc_lock);
1081 static int load_dmcu_fw(struct amdgpu_device *adev)
1083 const char *fw_name_dmcu = NULL;
1085 const struct dmcu_firmware_header_v1_0 *hdr;
1087 switch(adev->asic_type) {
1097 case CHIP_POLARIS11:
1098 case CHIP_POLARIS10:
1099 case CHIP_POLARIS12:
1109 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1112 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1113 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1114 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1115 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1120 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1124 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1125 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1129 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1131 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1132 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1133 adev->dm.fw_dmcu = NULL;
1137 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1142 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1144 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1146 release_firmware(adev->dm.fw_dmcu);
1147 adev->dm.fw_dmcu = NULL;
1151 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1152 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1153 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1154 adev->firmware.fw_size +=
1155 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1157 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1158 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1159 adev->firmware.fw_size +=
1160 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1162 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1164 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1169 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1171 struct amdgpu_device *adev = ctx;
1173 return dm_read_reg(adev->dm.dc->ctx, address);
1176 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1179 struct amdgpu_device *adev = ctx;
1181 return dm_write_reg(adev->dm.dc->ctx, address, value);
1184 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1186 struct dmub_srv_create_params create_params;
1187 struct dmub_srv_region_params region_params;
1188 struct dmub_srv_region_info region_info;
1189 struct dmub_srv_fb_params fb_params;
1190 struct dmub_srv_fb_info *fb_info;
1191 struct dmub_srv *dmub_srv;
1192 const struct dmcub_firmware_header_v1_0 *hdr;
1193 const char *fw_name_dmub;
1194 enum dmub_asic dmub_asic;
1195 enum dmub_status status;
1198 switch (adev->asic_type) {
1200 dmub_asic = DMUB_ASIC_DCN21;
1201 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1205 /* ASIC doesn't support DMUB. */
1209 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1211 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1215 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1217 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1221 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1223 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1224 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1225 AMDGPU_UCODE_ID_DMCUB;
1226 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1228 adev->firmware.fw_size +=
1229 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1231 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1232 adev->dm.dmcub_fw_version);
1235 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1237 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1238 dmub_srv = adev->dm.dmub_srv;
1241 DRM_ERROR("Failed to allocate DMUB service!\n");
1245 memset(&create_params, 0, sizeof(create_params));
1246 create_params.user_ctx = adev;
1247 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1248 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1249 create_params.asic = dmub_asic;
1251 /* Create the DMUB service. */
1252 status = dmub_srv_create(dmub_srv, &create_params);
1253 if (status != DMUB_STATUS_OK) {
1254 DRM_ERROR("Error creating DMUB service: %d\n", status);
1258 /* Calculate the size of all the regions for the DMUB service. */
1259 memset(®ion_params, 0, sizeof(region_params));
1261 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1262 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1263 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1264 region_params.vbios_size = adev->bios_size;
1265 region_params.fw_bss_data =
1266 adev->dm.dmub_fw->data +
1267 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1268 le32_to_cpu(hdr->inst_const_bytes);
1269 region_params.fw_inst_const =
1270 adev->dm.dmub_fw->data +
1271 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1274 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1277 if (status != DMUB_STATUS_OK) {
1278 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1283 * Allocate a framebuffer based on the total size of all the regions.
1284 * TODO: Move this into GART.
1286 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1287 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1288 &adev->dm.dmub_bo_gpu_addr,
1289 &adev->dm.dmub_bo_cpu_addr);
1293 /* Rebase the regions on the framebuffer address. */
1294 memset(&fb_params, 0, sizeof(fb_params));
1295 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1296 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1297 fb_params.region_info = ®ion_info;
1299 adev->dm.dmub_fb_info =
1300 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1301 fb_info = adev->dm.dmub_fb_info;
1305 "Failed to allocate framebuffer info for DMUB service!\n");
1309 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1310 if (status != DMUB_STATUS_OK) {
1311 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1318 static int dm_sw_init(void *handle)
1320 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1323 r = dm_dmub_sw_init(adev);
1327 return load_dmcu_fw(adev);
1330 static int dm_sw_fini(void *handle)
1332 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334 kfree(adev->dm.dmub_fb_info);
1335 adev->dm.dmub_fb_info = NULL;
1337 if (adev->dm.dmub_srv) {
1338 dmub_srv_destroy(adev->dm.dmub_srv);
1339 adev->dm.dmub_srv = NULL;
1342 if (adev->dm.dmub_fw) {
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
1347 if(adev->dm.fw_dmcu) {
1348 release_firmware(adev->dm.fw_dmcu);
1349 adev->dm.fw_dmcu = NULL;
1355 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1357 struct amdgpu_dm_connector *aconnector;
1358 struct drm_connector *connector;
1359 struct drm_connector_list_iter iter;
1362 drm_connector_list_iter_begin(dev, &iter);
1363 drm_for_each_connector_iter(connector, &iter) {
1364 aconnector = to_amdgpu_dm_connector(connector);
1365 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1366 aconnector->mst_mgr.aux) {
1367 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1369 aconnector->base.base.id);
1371 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1373 DRM_ERROR("DM_MST: Failed to start MST\n");
1374 aconnector->dc_link->type =
1375 dc_connection_single;
1380 drm_connector_list_iter_end(&iter);
1385 static int dm_late_init(void *handle)
1387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389 struct dmcu_iram_parameters params;
1390 unsigned int linear_lut[16];
1392 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1395 for (i = 0; i < 16; i++)
1396 linear_lut[i] = 0xFFFF * i / 15;
1399 params.backlight_ramping_start = 0xCCCC;
1400 params.backlight_ramping_reduction = 0xCCCCCCCC;
1401 params.backlight_lut_array_size = 16;
1402 params.backlight_lut_array = linear_lut;
1404 /* Min backlight level after ABM reduction, Don't allow below 1%
1405 * 0xFFFF x 0.01 = 0x28F
1407 params.min_abm_backlight = 0x28F;
1409 /* todo will enable for navi10 */
1410 if (adev->asic_type <= CHIP_RAVEN) {
1411 ret = dmcu_load_iram(dmcu, params);
1417 return detect_mst_link_for_all_connectors(adev->ddev);
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1422 struct amdgpu_dm_connector *aconnector;
1423 struct drm_connector *connector;
1424 struct drm_connector_list_iter iter;
1425 struct drm_dp_mst_topology_mgr *mgr;
1427 bool need_hotplug = false;
1429 drm_connector_list_iter_begin(dev, &iter);
1430 drm_for_each_connector_iter(connector, &iter) {
1431 aconnector = to_amdgpu_dm_connector(connector);
1432 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 aconnector->mst_port)
1436 mgr = &aconnector->mst_mgr;
1439 drm_dp_mst_topology_mgr_suspend(mgr);
1441 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1443 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 need_hotplug = true;
1448 drm_connector_list_iter_end(&iter);
1451 drm_kms_helper_hotplug_event(dev);
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1456 struct smu_context *smu = &adev->smu;
1459 if (!is_support_sw_smu(adev))
1462 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 * on window driver dc implementation.
1464 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 * should be passed to smu during boot up and resume from s3.
1466 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 * dcn20_resource_construct
1468 * then call pplib functions below to pass the settings to smu:
1469 * smu_set_watermarks_for_clock_ranges
1470 * smu_set_watermarks_table
1471 * navi10_set_watermarks_table
1472 * smu_write_watermarks_table
1474 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 * dc has implemented different flow for window driver:
1476 * dc_hardware_init / dc_set_power_state
1481 * smu_set_watermarks_for_clock_ranges
1482 * renoir_set_watermarks_table
1483 * smu_write_watermarks_table
1486 * dc_hardware_init -> amdgpu_dm_init
1487 * dc_set_power_state --> dm_resume
1489 * therefore, this function apply to navi10/12/14 but not Renoir
1492 switch(adev->asic_type) {
1501 mutex_lock(&smu->mutex);
1503 /* pass data to smu controller */
1504 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1505 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1506 ret = smu_write_watermarks_table(smu);
1509 mutex_unlock(&smu->mutex);
1510 DRM_ERROR("Failed to update WMTABLE!\n");
1513 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1516 mutex_unlock(&smu->mutex);
1522 * dm_hw_init() - Initialize DC device
1523 * @handle: The base driver device containing the amdgpu_dm device.
1525 * Initialize the &struct amdgpu_display_manager device. This involves calling
1526 * the initializers of each DM component, then populating the struct with them.
1528 * Although the function implies hardware initialization, both hardware and
1529 * software are initialized here. Splitting them out to their relevant init
1530 * hooks is a future TODO item.
1532 * Some notable things that are initialized here:
1534 * - Display Core, both software and hardware
1535 * - DC modules that we need (freesync and color management)
1536 * - DRM software states
1537 * - Interrupt sources and handlers
1539 * - Debug FS entries, if enabled
1541 static int dm_hw_init(void *handle)
1543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544 /* Create DAL display manager */
1545 amdgpu_dm_init(adev);
1546 amdgpu_dm_hpd_init(adev);
1552 * dm_hw_fini() - Teardown DC device
1553 * @handle: The base driver device containing the amdgpu_dm device.
1555 * Teardown components within &struct amdgpu_display_manager that require
1556 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1557 * were loaded. Also flush IRQ workqueues and disable them.
1559 static int dm_hw_fini(void *handle)
1561 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1563 amdgpu_dm_hpd_fini(adev);
1565 amdgpu_dm_irq_fini(adev);
1566 amdgpu_dm_fini(adev);
1570 static int dm_suspend(void *handle)
1572 struct amdgpu_device *adev = handle;
1573 struct amdgpu_display_manager *dm = &adev->dm;
1576 WARN_ON(adev->dm.cached_state);
1577 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1579 s3_handle_mst(adev->ddev, true);
1581 amdgpu_dm_irq_suspend(adev);
1584 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1589 static struct amdgpu_dm_connector *
1590 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1591 struct drm_crtc *crtc)
1594 struct drm_connector_state *new_con_state;
1595 struct drm_connector *connector;
1596 struct drm_crtc *crtc_from_state;
1598 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1599 crtc_from_state = new_con_state->crtc;
1601 if (crtc_from_state == crtc)
1602 return to_amdgpu_dm_connector(connector);
1608 static void emulated_link_detect(struct dc_link *link)
1610 struct dc_sink_init_data sink_init_data = { 0 };
1611 struct display_sink_capability sink_caps = { 0 };
1612 enum dc_edid_status edid_status;
1613 struct dc_context *dc_ctx = link->ctx;
1614 struct dc_sink *sink = NULL;
1615 struct dc_sink *prev_sink = NULL;
1617 link->type = dc_connection_none;
1618 prev_sink = link->local_sink;
1620 if (prev_sink != NULL)
1621 dc_sink_retain(prev_sink);
1623 switch (link->connector_signal) {
1624 case SIGNAL_TYPE_HDMI_TYPE_A: {
1625 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1626 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1630 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1631 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1632 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1636 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1637 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1638 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1642 case SIGNAL_TYPE_LVDS: {
1643 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1644 sink_caps.signal = SIGNAL_TYPE_LVDS;
1648 case SIGNAL_TYPE_EDP: {
1649 sink_caps.transaction_type =
1650 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1651 sink_caps.signal = SIGNAL_TYPE_EDP;
1655 case SIGNAL_TYPE_DISPLAY_PORT: {
1656 sink_caps.transaction_type =
1657 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1658 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1663 DC_ERROR("Invalid connector type! signal:%d\n",
1664 link->connector_signal);
1668 sink_init_data.link = link;
1669 sink_init_data.sink_signal = sink_caps.signal;
1671 sink = dc_sink_create(&sink_init_data);
1673 DC_ERROR("Failed to create sink!\n");
1677 /* dc_sink_create returns a new reference */
1678 link->local_sink = sink;
1680 edid_status = dm_helpers_read_local_edid(
1685 if (edid_status != EDID_OK)
1686 DC_ERROR("Failed to read EDID");
1690 static int dm_resume(void *handle)
1692 struct amdgpu_device *adev = handle;
1693 struct drm_device *ddev = adev->ddev;
1694 struct amdgpu_display_manager *dm = &adev->dm;
1695 struct amdgpu_dm_connector *aconnector;
1696 struct drm_connector *connector;
1697 struct drm_connector_list_iter iter;
1698 struct drm_crtc *crtc;
1699 struct drm_crtc_state *new_crtc_state;
1700 struct dm_crtc_state *dm_new_crtc_state;
1701 struct drm_plane *plane;
1702 struct drm_plane_state *new_plane_state;
1703 struct dm_plane_state *dm_new_plane_state;
1704 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1705 enum dc_connection_type new_connection_type = dc_connection_none;
1708 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1709 dc_release_state(dm_state->context);
1710 dm_state->context = dc_create_state(dm->dc);
1711 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1712 dc_resource_state_construct(dm->dc, dm_state->context);
1714 /* Before powering on DC we need to re-initialize DMUB. */
1715 r = dm_dmub_hw_init(adev);
1717 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1719 /* power on hardware */
1720 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1722 /* program HPD filter */
1726 * early enable HPD Rx IRQ, should be done before set mode as short
1727 * pulse interrupts are used for MST
1729 amdgpu_dm_irq_resume_early(adev);
1731 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1732 s3_handle_mst(ddev, false);
1735 drm_connector_list_iter_begin(ddev, &iter);
1736 drm_for_each_connector_iter(connector, &iter) {
1737 aconnector = to_amdgpu_dm_connector(connector);
1740 * this is the case when traversing through already created
1741 * MST connectors, should be skipped
1743 if (aconnector->mst_port)
1746 mutex_lock(&aconnector->hpd_lock);
1747 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1748 DRM_ERROR("KMS: Failed to detect connector\n");
1750 if (aconnector->base.force && new_connection_type == dc_connection_none)
1751 emulated_link_detect(aconnector->dc_link);
1753 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1755 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1756 aconnector->fake_enable = false;
1758 if (aconnector->dc_sink)
1759 dc_sink_release(aconnector->dc_sink);
1760 aconnector->dc_sink = NULL;
1761 amdgpu_dm_update_connector_after_detect(aconnector);
1762 mutex_unlock(&aconnector->hpd_lock);
1764 drm_connector_list_iter_end(&iter);
1766 /* Force mode set in atomic commit */
1767 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1768 new_crtc_state->active_changed = true;
1771 * atomic_check is expected to create the dc states. We need to release
1772 * them here, since they were duplicated as part of the suspend
1775 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1776 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1777 if (dm_new_crtc_state->stream) {
1778 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1779 dc_stream_release(dm_new_crtc_state->stream);
1780 dm_new_crtc_state->stream = NULL;
1784 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1785 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1786 if (dm_new_plane_state->dc_state) {
1787 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1788 dc_plane_state_release(dm_new_plane_state->dc_state);
1789 dm_new_plane_state->dc_state = NULL;
1793 drm_atomic_helper_resume(ddev, dm->cached_state);
1795 dm->cached_state = NULL;
1797 amdgpu_dm_irq_resume_late(adev);
1799 amdgpu_dm_smu_write_watermarks_table(adev);
1807 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1808 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1809 * the base driver's device list to be initialized and torn down accordingly.
1811 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1814 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1816 .early_init = dm_early_init,
1817 .late_init = dm_late_init,
1818 .sw_init = dm_sw_init,
1819 .sw_fini = dm_sw_fini,
1820 .hw_init = dm_hw_init,
1821 .hw_fini = dm_hw_fini,
1822 .suspend = dm_suspend,
1823 .resume = dm_resume,
1824 .is_idle = dm_is_idle,
1825 .wait_for_idle = dm_wait_for_idle,
1826 .check_soft_reset = dm_check_soft_reset,
1827 .soft_reset = dm_soft_reset,
1828 .set_clockgating_state = dm_set_clockgating_state,
1829 .set_powergating_state = dm_set_powergating_state,
1832 const struct amdgpu_ip_block_version dm_ip_block =
1834 .type = AMD_IP_BLOCK_TYPE_DCE,
1838 .funcs = &amdgpu_dm_funcs,
1848 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1849 .fb_create = amdgpu_display_user_framebuffer_create,
1850 .output_poll_changed = drm_fb_helper_output_poll_changed,
1851 .atomic_check = amdgpu_dm_atomic_check,
1852 .atomic_commit = amdgpu_dm_atomic_commit,
1855 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1856 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1859 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1861 u32 max_cll, min_cll, max, min, q, r;
1862 struct amdgpu_dm_backlight_caps *caps;
1863 struct amdgpu_display_manager *dm;
1864 struct drm_connector *conn_base;
1865 struct amdgpu_device *adev;
1866 static const u8 pre_computed_values[] = {
1867 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1868 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1870 if (!aconnector || !aconnector->dc_link)
1873 conn_base = &aconnector->base;
1874 adev = conn_base->dev->dev_private;
1876 caps = &dm->backlight_caps;
1877 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1878 caps->aux_support = false;
1879 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1880 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1882 if (caps->ext_caps->bits.oled == 1 ||
1883 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1884 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1885 caps->aux_support = true;
1887 /* From the specification (CTA-861-G), for calculating the maximum
1888 * luminance we need to use:
1889 * Luminance = 50*2**(CV/32)
1890 * Where CV is a one-byte value.
1891 * For calculating this expression we may need float point precision;
1892 * to avoid this complexity level, we take advantage that CV is divided
1893 * by a constant. From the Euclids division algorithm, we know that CV
1894 * can be written as: CV = 32*q + r. Next, we replace CV in the
1895 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1896 * need to pre-compute the value of r/32. For pre-computing the values
1897 * We just used the following Ruby line:
1898 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1899 * The results of the above expressions can be verified at
1900 * pre_computed_values.
1904 max = (1 << q) * pre_computed_values[r];
1906 // min luminance: maxLum * (CV/255)^2 / 100
1907 q = DIV_ROUND_CLOSEST(min_cll, 255);
1908 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1910 caps->aux_max_input_signal = max;
1911 caps->aux_min_input_signal = min;
1914 void amdgpu_dm_update_connector_after_detect(
1915 struct amdgpu_dm_connector *aconnector)
1917 struct drm_connector *connector = &aconnector->base;
1918 struct drm_device *dev = connector->dev;
1919 struct dc_sink *sink;
1921 /* MST handled by drm_mst framework */
1922 if (aconnector->mst_mgr.mst_state == true)
1926 sink = aconnector->dc_link->local_sink;
1928 dc_sink_retain(sink);
1931 * Edid mgmt connector gets first update only in mode_valid hook and then
1932 * the connector sink is set to either fake or physical sink depends on link status.
1933 * Skip if already done during boot.
1935 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1936 && aconnector->dc_em_sink) {
1939 * For S3 resume with headless use eml_sink to fake stream
1940 * because on resume connector->sink is set to NULL
1942 mutex_lock(&dev->mode_config.mutex);
1945 if (aconnector->dc_sink) {
1946 amdgpu_dm_update_freesync_caps(connector, NULL);
1948 * retain and release below are used to
1949 * bump up refcount for sink because the link doesn't point
1950 * to it anymore after disconnect, so on next crtc to connector
1951 * reshuffle by UMD we will get into unwanted dc_sink release
1953 dc_sink_release(aconnector->dc_sink);
1955 aconnector->dc_sink = sink;
1956 dc_sink_retain(aconnector->dc_sink);
1957 amdgpu_dm_update_freesync_caps(connector,
1960 amdgpu_dm_update_freesync_caps(connector, NULL);
1961 if (!aconnector->dc_sink) {
1962 aconnector->dc_sink = aconnector->dc_em_sink;
1963 dc_sink_retain(aconnector->dc_sink);
1967 mutex_unlock(&dev->mode_config.mutex);
1970 dc_sink_release(sink);
1975 * TODO: temporary guard to look for proper fix
1976 * if this sink is MST sink, we should not do anything
1978 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1979 dc_sink_release(sink);
1983 if (aconnector->dc_sink == sink) {
1985 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1988 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1989 aconnector->connector_id);
1991 dc_sink_release(sink);
1995 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1996 aconnector->connector_id, aconnector->dc_sink, sink);
1998 mutex_lock(&dev->mode_config.mutex);
2001 * 1. Update status of the drm connector
2002 * 2. Send an event and let userspace tell us what to do
2006 * TODO: check if we still need the S3 mode update workaround.
2007 * If yes, put it here.
2009 if (aconnector->dc_sink)
2010 amdgpu_dm_update_freesync_caps(connector, NULL);
2012 aconnector->dc_sink = sink;
2013 dc_sink_retain(aconnector->dc_sink);
2014 if (sink->dc_edid.length == 0) {
2015 aconnector->edid = NULL;
2016 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2019 (struct edid *) sink->dc_edid.raw_edid;
2022 drm_connector_update_edid_property(connector,
2024 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2027 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2028 update_connector_ext_caps(aconnector);
2030 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2031 amdgpu_dm_update_freesync_caps(connector, NULL);
2032 drm_connector_update_edid_property(connector, NULL);
2033 aconnector->num_modes = 0;
2034 dc_sink_release(aconnector->dc_sink);
2035 aconnector->dc_sink = NULL;
2036 aconnector->edid = NULL;
2037 #ifdef CONFIG_DRM_AMD_DC_HDCP
2038 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2039 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2040 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2044 mutex_unlock(&dev->mode_config.mutex);
2047 dc_sink_release(sink);
2050 static void handle_hpd_irq(void *param)
2052 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2053 struct drm_connector *connector = &aconnector->base;
2054 struct drm_device *dev = connector->dev;
2055 enum dc_connection_type new_connection_type = dc_connection_none;
2056 #ifdef CONFIG_DRM_AMD_DC_HDCP
2057 struct amdgpu_device *adev = dev->dev_private;
2061 * In case of failure or MST no need to update connector status or notify the OS
2062 * since (for MST case) MST does this in its own context.
2064 mutex_lock(&aconnector->hpd_lock);
2066 #ifdef CONFIG_DRM_AMD_DC_HDCP
2067 if (adev->dm.hdcp_workqueue)
2068 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2070 if (aconnector->fake_enable)
2071 aconnector->fake_enable = false;
2073 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2074 DRM_ERROR("KMS: Failed to detect connector\n");
2076 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2077 emulated_link_detect(aconnector->dc_link);
2080 drm_modeset_lock_all(dev);
2081 dm_restore_drm_connector_state(dev, connector);
2082 drm_modeset_unlock_all(dev);
2084 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2085 drm_kms_helper_hotplug_event(dev);
2087 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2088 amdgpu_dm_update_connector_after_detect(aconnector);
2091 drm_modeset_lock_all(dev);
2092 dm_restore_drm_connector_state(dev, connector);
2093 drm_modeset_unlock_all(dev);
2095 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2096 drm_kms_helper_hotplug_event(dev);
2098 mutex_unlock(&aconnector->hpd_lock);
2102 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2104 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2106 bool new_irq_handled = false;
2108 int dpcd_bytes_to_read;
2110 const int max_process_count = 30;
2111 int process_count = 0;
2113 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2115 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2116 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2117 /* DPCD 0x200 - 0x201 for downstream IRQ */
2118 dpcd_addr = DP_SINK_COUNT;
2120 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2121 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2122 dpcd_addr = DP_SINK_COUNT_ESI;
2125 dret = drm_dp_dpcd_read(
2126 &aconnector->dm_dp_aux.aux,
2129 dpcd_bytes_to_read);
2131 while (dret == dpcd_bytes_to_read &&
2132 process_count < max_process_count) {
2138 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2139 /* handle HPD short pulse irq */
2140 if (aconnector->mst_mgr.mst_state)
2142 &aconnector->mst_mgr,
2146 if (new_irq_handled) {
2147 /* ACK at DPCD to notify down stream */
2148 const int ack_dpcd_bytes_to_write =
2149 dpcd_bytes_to_read - 1;
2151 for (retry = 0; retry < 3; retry++) {
2154 wret = drm_dp_dpcd_write(
2155 &aconnector->dm_dp_aux.aux,
2158 ack_dpcd_bytes_to_write);
2159 if (wret == ack_dpcd_bytes_to_write)
2163 /* check if there is new irq to be handled */
2164 dret = drm_dp_dpcd_read(
2165 &aconnector->dm_dp_aux.aux,
2168 dpcd_bytes_to_read);
2170 new_irq_handled = false;
2176 if (process_count == max_process_count)
2177 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2180 static void handle_hpd_rx_irq(void *param)
2182 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2183 struct drm_connector *connector = &aconnector->base;
2184 struct drm_device *dev = connector->dev;
2185 struct dc_link *dc_link = aconnector->dc_link;
2186 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2187 enum dc_connection_type new_connection_type = dc_connection_none;
2188 #ifdef CONFIG_DRM_AMD_DC_HDCP
2189 union hpd_irq_data hpd_irq_data;
2190 struct amdgpu_device *adev = dev->dev_private;
2192 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2196 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2197 * conflict, after implement i2c helper, this mutex should be
2200 if (dc_link->type != dc_connection_mst_branch)
2201 mutex_lock(&aconnector->hpd_lock);
2204 #ifdef CONFIG_DRM_AMD_DC_HDCP
2205 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2207 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2209 !is_mst_root_connector) {
2210 /* Downstream Port status changed. */
2211 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2212 DRM_ERROR("KMS: Failed to detect connector\n");
2214 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2215 emulated_link_detect(dc_link);
2217 if (aconnector->fake_enable)
2218 aconnector->fake_enable = false;
2220 amdgpu_dm_update_connector_after_detect(aconnector);
2223 drm_modeset_lock_all(dev);
2224 dm_restore_drm_connector_state(dev, connector);
2225 drm_modeset_unlock_all(dev);
2227 drm_kms_helper_hotplug_event(dev);
2228 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2230 if (aconnector->fake_enable)
2231 aconnector->fake_enable = false;
2233 amdgpu_dm_update_connector_after_detect(aconnector);
2236 drm_modeset_lock_all(dev);
2237 dm_restore_drm_connector_state(dev, connector);
2238 drm_modeset_unlock_all(dev);
2240 drm_kms_helper_hotplug_event(dev);
2243 #ifdef CONFIG_DRM_AMD_DC_HDCP
2244 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2245 if (adev->dm.hdcp_workqueue)
2246 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2249 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2250 (dc_link->type == dc_connection_mst_branch))
2251 dm_handle_hpd_rx_irq(aconnector);
2253 if (dc_link->type != dc_connection_mst_branch) {
2254 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2255 mutex_unlock(&aconnector->hpd_lock);
2259 static void register_hpd_handlers(struct amdgpu_device *adev)
2261 struct drm_device *dev = adev->ddev;
2262 struct drm_connector *connector;
2263 struct amdgpu_dm_connector *aconnector;
2264 const struct dc_link *dc_link;
2265 struct dc_interrupt_params int_params = {0};
2267 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2268 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2270 list_for_each_entry(connector,
2271 &dev->mode_config.connector_list, head) {
2273 aconnector = to_amdgpu_dm_connector(connector);
2274 dc_link = aconnector->dc_link;
2276 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2277 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2278 int_params.irq_source = dc_link->irq_source_hpd;
2280 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2282 (void *) aconnector);
2285 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2287 /* Also register for DP short pulse (hpd_rx). */
2288 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2289 int_params.irq_source = dc_link->irq_source_hpd_rx;
2291 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2293 (void *) aconnector);
2298 /* Register IRQ sources and initialize IRQ callbacks */
2299 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2301 struct dc *dc = adev->dm.dc;
2302 struct common_irq_params *c_irq_params;
2303 struct dc_interrupt_params int_params = {0};
2306 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2308 if (adev->asic_type >= CHIP_VEGA10)
2309 client_id = SOC15_IH_CLIENTID_DCE;
2311 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2312 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2315 * Actions of amdgpu_irq_add_id():
2316 * 1. Register a set() function with base driver.
2317 * Base driver will call set() function to enable/disable an
2318 * interrupt in DC hardware.
2319 * 2. Register amdgpu_dm_irq_handler().
2320 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2321 * coming from DC hardware.
2322 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2323 * for acknowledging and handling. */
2325 /* Use VBLANK interrupt */
2326 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2327 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2329 DRM_ERROR("Failed to add crtc irq id!\n");
2333 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2334 int_params.irq_source =
2335 dc_interrupt_to_irq_source(dc, i, 0);
2337 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2339 c_irq_params->adev = adev;
2340 c_irq_params->irq_src = int_params.irq_source;
2342 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2343 dm_crtc_high_irq, c_irq_params);
2346 /* Use VUPDATE interrupt */
2347 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2348 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2350 DRM_ERROR("Failed to add vupdate irq id!\n");
2354 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2355 int_params.irq_source =
2356 dc_interrupt_to_irq_source(dc, i, 0);
2358 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2360 c_irq_params->adev = adev;
2361 c_irq_params->irq_src = int_params.irq_source;
2363 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2364 dm_vupdate_high_irq, c_irq_params);
2367 /* Use GRPH_PFLIP interrupt */
2368 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2369 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2370 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2372 DRM_ERROR("Failed to add page flip irq id!\n");
2376 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2377 int_params.irq_source =
2378 dc_interrupt_to_irq_source(dc, i, 0);
2380 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2382 c_irq_params->adev = adev;
2383 c_irq_params->irq_src = int_params.irq_source;
2385 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2386 dm_pflip_high_irq, c_irq_params);
2391 r = amdgpu_irq_add_id(adev, client_id,
2392 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2394 DRM_ERROR("Failed to add hpd irq id!\n");
2398 register_hpd_handlers(adev);
2403 #if defined(CONFIG_DRM_AMD_DC_DCN)
2404 /* Register IRQ sources and initialize IRQ callbacks */
2405 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2407 struct dc *dc = adev->dm.dc;
2408 struct common_irq_params *c_irq_params;
2409 struct dc_interrupt_params int_params = {0};
2413 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2414 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2417 * Actions of amdgpu_irq_add_id():
2418 * 1. Register a set() function with base driver.
2419 * Base driver will call set() function to enable/disable an
2420 * interrupt in DC hardware.
2421 * 2. Register amdgpu_dm_irq_handler().
2422 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2423 * coming from DC hardware.
2424 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2425 * for acknowledging and handling.
2428 /* Use VSTARTUP interrupt */
2429 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2430 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2432 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2435 DRM_ERROR("Failed to add crtc irq id!\n");
2439 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2440 int_params.irq_source =
2441 dc_interrupt_to_irq_source(dc, i, 0);
2443 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2445 c_irq_params->adev = adev;
2446 c_irq_params->irq_src = int_params.irq_source;
2448 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2449 dm_dcn_crtc_high_irq, c_irq_params);
2452 /* Use GRPH_PFLIP interrupt */
2453 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2454 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2456 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2458 DRM_ERROR("Failed to add page flip irq id!\n");
2462 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2463 int_params.irq_source =
2464 dc_interrupt_to_irq_source(dc, i, 0);
2466 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2468 c_irq_params->adev = adev;
2469 c_irq_params->irq_src = int_params.irq_source;
2471 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2472 dm_pflip_high_irq, c_irq_params);
2477 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2480 DRM_ERROR("Failed to add hpd irq id!\n");
2484 register_hpd_handlers(adev);
2491 * Acquires the lock for the atomic state object and returns
2492 * the new atomic state.
2494 * This should only be called during atomic check.
2496 static int dm_atomic_get_state(struct drm_atomic_state *state,
2497 struct dm_atomic_state **dm_state)
2499 struct drm_device *dev = state->dev;
2500 struct amdgpu_device *adev = dev->dev_private;
2501 struct amdgpu_display_manager *dm = &adev->dm;
2502 struct drm_private_state *priv_state;
2507 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2508 if (IS_ERR(priv_state))
2509 return PTR_ERR(priv_state);
2511 *dm_state = to_dm_atomic_state(priv_state);
2516 struct dm_atomic_state *
2517 dm_atomic_get_new_state(struct drm_atomic_state *state)
2519 struct drm_device *dev = state->dev;
2520 struct amdgpu_device *adev = dev->dev_private;
2521 struct amdgpu_display_manager *dm = &adev->dm;
2522 struct drm_private_obj *obj;
2523 struct drm_private_state *new_obj_state;
2526 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2527 if (obj->funcs == dm->atomic_obj.funcs)
2528 return to_dm_atomic_state(new_obj_state);
2534 struct dm_atomic_state *
2535 dm_atomic_get_old_state(struct drm_atomic_state *state)
2537 struct drm_device *dev = state->dev;
2538 struct amdgpu_device *adev = dev->dev_private;
2539 struct amdgpu_display_manager *dm = &adev->dm;
2540 struct drm_private_obj *obj;
2541 struct drm_private_state *old_obj_state;
2544 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2545 if (obj->funcs == dm->atomic_obj.funcs)
2546 return to_dm_atomic_state(old_obj_state);
2552 static struct drm_private_state *
2553 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2555 struct dm_atomic_state *old_state, *new_state;
2557 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2561 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2563 old_state = to_dm_atomic_state(obj->state);
2565 if (old_state && old_state->context)
2566 new_state->context = dc_copy_state(old_state->context);
2568 if (!new_state->context) {
2573 return &new_state->base;
2576 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2577 struct drm_private_state *state)
2579 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2581 if (dm_state && dm_state->context)
2582 dc_release_state(dm_state->context);
2587 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2588 .atomic_duplicate_state = dm_atomic_duplicate_state,
2589 .atomic_destroy_state = dm_atomic_destroy_state,
2592 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2594 struct dm_atomic_state *state;
2597 adev->mode_info.mode_config_initialized = true;
2599 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2600 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2602 adev->ddev->mode_config.max_width = 16384;
2603 adev->ddev->mode_config.max_height = 16384;
2605 adev->ddev->mode_config.preferred_depth = 24;
2606 adev->ddev->mode_config.prefer_shadow = 1;
2607 /* indicates support for immediate flip */
2608 adev->ddev->mode_config.async_page_flip = true;
2610 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2612 state = kzalloc(sizeof(*state), GFP_KERNEL);
2616 state->context = dc_create_state(adev->dm.dc);
2617 if (!state->context) {
2622 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2624 drm_atomic_private_obj_init(adev->ddev,
2625 &adev->dm.atomic_obj,
2627 &dm_atomic_state_funcs);
2629 r = amdgpu_display_modeset_create_props(adev);
2633 r = amdgpu_dm_audio_init(adev);
2640 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2641 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2642 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2644 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2645 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2647 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2649 #if defined(CONFIG_ACPI)
2650 struct amdgpu_dm_backlight_caps caps;
2652 if (dm->backlight_caps.caps_valid)
2655 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2656 if (caps.caps_valid) {
2657 dm->backlight_caps.caps_valid = true;
2658 if (caps.aux_support)
2660 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2661 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2663 dm->backlight_caps.min_input_signal =
2664 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2665 dm->backlight_caps.max_input_signal =
2666 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2669 if (dm->backlight_caps.aux_support)
2672 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2673 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2677 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2684 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2685 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2690 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2691 const uint32_t user_brightness)
2693 u32 min, max, conversion_pace;
2694 u32 brightness = user_brightness;
2699 if (!caps->aux_support) {
2700 max = caps->max_input_signal;
2701 min = caps->min_input_signal;
2703 * The brightness input is in the range 0-255
2704 * It needs to be rescaled to be between the
2705 * requested min and max input signal
2706 * It also needs to be scaled up by 0x101 to
2707 * match the DC interface which has a range of
2710 conversion_pace = 0x101;
2715 / AMDGPU_MAX_BL_LEVEL
2716 + min * conversion_pace;
2719 * We are doing a linear interpolation here, which is OK but
2720 * does not provide the optimal result. We probably want
2721 * something close to the Perceptual Quantizer (PQ) curve.
2723 max = caps->aux_max_input_signal;
2724 min = caps->aux_min_input_signal;
2726 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2727 + user_brightness * max;
2728 // Multiple the value by 1000 since we use millinits
2730 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2737 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2739 struct amdgpu_display_manager *dm = bl_get_data(bd);
2740 struct amdgpu_dm_backlight_caps caps;
2741 struct dc_link *link = NULL;
2745 amdgpu_dm_update_backlight_caps(dm);
2746 caps = dm->backlight_caps;
2748 link = (struct dc_link *)dm->backlight_link;
2750 brightness = convert_brightness(&caps, bd->props.brightness);
2751 // Change brightness based on AUX property
2752 if (caps.aux_support)
2753 return set_backlight_via_aux(link, brightness);
2755 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2760 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2762 struct amdgpu_display_manager *dm = bl_get_data(bd);
2763 int ret = dc_link_get_backlight_level(dm->backlight_link);
2765 if (ret == DC_ERROR_UNEXPECTED)
2766 return bd->props.brightness;
2770 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2771 .options = BL_CORE_SUSPENDRESUME,
2772 .get_brightness = amdgpu_dm_backlight_get_brightness,
2773 .update_status = amdgpu_dm_backlight_update_status,
2777 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2780 struct backlight_properties props = { 0 };
2782 amdgpu_dm_update_backlight_caps(dm);
2784 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2785 props.brightness = AMDGPU_MAX_BL_LEVEL;
2786 props.type = BACKLIGHT_RAW;
2788 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2789 dm->adev->ddev->primary->index);
2791 dm->backlight_dev = backlight_device_register(bl_name,
2792 dm->adev->ddev->dev,
2794 &amdgpu_dm_backlight_ops,
2797 if (IS_ERR(dm->backlight_dev))
2798 DRM_ERROR("DM: Backlight registration failed!\n");
2800 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2805 static int initialize_plane(struct amdgpu_display_manager *dm,
2806 struct amdgpu_mode_info *mode_info, int plane_id,
2807 enum drm_plane_type plane_type,
2808 const struct dc_plane_cap *plane_cap)
2810 struct drm_plane *plane;
2811 unsigned long possible_crtcs;
2814 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2816 DRM_ERROR("KMS: Failed to allocate plane\n");
2819 plane->type = plane_type;
2822 * HACK: IGT tests expect that the primary plane for a CRTC
2823 * can only have one possible CRTC. Only expose support for
2824 * any CRTC if they're not going to be used as a primary plane
2825 * for a CRTC - like overlay or underlay planes.
2827 possible_crtcs = 1 << plane_id;
2828 if (plane_id >= dm->dc->caps.max_streams)
2829 possible_crtcs = 0xff;
2831 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2834 DRM_ERROR("KMS: Failed to initialize plane\n");
2840 mode_info->planes[plane_id] = plane;
2846 static void register_backlight_device(struct amdgpu_display_manager *dm,
2847 struct dc_link *link)
2849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2850 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2852 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2853 link->type != dc_connection_none) {
2855 * Event if registration failed, we should continue with
2856 * DM initialization because not having a backlight control
2857 * is better then a black screen.
2859 amdgpu_dm_register_backlight_device(dm);
2861 if (dm->backlight_dev)
2862 dm->backlight_link = link;
2869 * In this architecture, the association
2870 * connector -> encoder -> crtc
2871 * id not really requried. The crtc and connector will hold the
2872 * display_index as an abstraction to use with DAL component
2874 * Returns 0 on success
2876 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2878 struct amdgpu_display_manager *dm = &adev->dm;
2880 struct amdgpu_dm_connector *aconnector = NULL;
2881 struct amdgpu_encoder *aencoder = NULL;
2882 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2884 int32_t primary_planes;
2885 enum dc_connection_type new_connection_type = dc_connection_none;
2886 const struct dc_plane_cap *plane;
2888 link_cnt = dm->dc->caps.max_links;
2889 if (amdgpu_dm_mode_config_init(dm->adev)) {
2890 DRM_ERROR("DM: Failed to initialize mode config\n");
2894 /* There is one primary plane per CRTC */
2895 primary_planes = dm->dc->caps.max_streams;
2896 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2899 * Initialize primary planes, implicit planes for legacy IOCTLS.
2900 * Order is reversed to match iteration order in atomic check.
2902 for (i = (primary_planes - 1); i >= 0; i--) {
2903 plane = &dm->dc->caps.planes[i];
2905 if (initialize_plane(dm, mode_info, i,
2906 DRM_PLANE_TYPE_PRIMARY, plane)) {
2907 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2913 * Initialize overlay planes, index starting after primary planes.
2914 * These planes have a higher DRM index than the primary planes since
2915 * they should be considered as having a higher z-order.
2916 * Order is reversed to match iteration order in atomic check.
2918 * Only support DCN for now, and only expose one so we don't encourage
2919 * userspace to use up all the pipes.
2921 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2922 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2924 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2927 if (!plane->blends_with_above || !plane->blends_with_below)
2930 if (!plane->pixel_format_support.argb8888)
2933 if (initialize_plane(dm, NULL, primary_planes + i,
2934 DRM_PLANE_TYPE_OVERLAY, plane)) {
2935 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2939 /* Only create one overlay plane. */
2943 for (i = 0; i < dm->dc->caps.max_streams; i++)
2944 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2945 DRM_ERROR("KMS: Failed to initialize crtc\n");
2949 dm->display_indexes_num = dm->dc->caps.max_streams;
2951 /* loops over all connectors on the board */
2952 for (i = 0; i < link_cnt; i++) {
2953 struct dc_link *link = NULL;
2955 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2957 "KMS: Cannot support more than %d display indexes\n",
2958 AMDGPU_DM_MAX_DISPLAY_INDEX);
2962 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2966 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2970 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2971 DRM_ERROR("KMS: Failed to initialize encoder\n");
2975 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2976 DRM_ERROR("KMS: Failed to initialize connector\n");
2980 link = dc_get_link_at_index(dm->dc, i);
2982 if (!dc_link_detect_sink(link, &new_connection_type))
2983 DRM_ERROR("KMS: Failed to detect connector\n");
2985 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2986 emulated_link_detect(link);
2987 amdgpu_dm_update_connector_after_detect(aconnector);
2989 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2990 amdgpu_dm_update_connector_after_detect(aconnector);
2991 register_backlight_device(dm, link);
2992 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2993 amdgpu_dm_set_psr_caps(link);
2999 /* Software is initialized. Now we can register interrupt handlers. */
3000 switch (adev->asic_type) {
3010 case CHIP_POLARIS11:
3011 case CHIP_POLARIS10:
3012 case CHIP_POLARIS12:
3017 if (dce110_register_irq_handlers(dm->adev)) {
3018 DRM_ERROR("DM: Failed to initialize IRQ\n");
3022 #if defined(CONFIG_DRM_AMD_DC_DCN)
3028 if (dcn10_register_irq_handlers(dm->adev)) {
3029 DRM_ERROR("DM: Failed to initialize IRQ\n");
3035 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3039 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3040 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3042 /* No userspace support. */
3043 dm->dc->debug.disable_tri_buf = true;
3053 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3055 drm_mode_config_cleanup(dm->ddev);
3056 drm_atomic_private_obj_fini(&dm->atomic_obj);
3060 /******************************************************************************
3061 * amdgpu_display_funcs functions
3062 *****************************************************************************/
3065 * dm_bandwidth_update - program display watermarks
3067 * @adev: amdgpu_device pointer
3069 * Calculate and program the display watermarks and line buffer allocation.
3071 static void dm_bandwidth_update(struct amdgpu_device *adev)
3073 /* TODO: implement later */
3076 static const struct amdgpu_display_funcs dm_display_funcs = {
3077 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3078 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3079 .backlight_set_level = NULL, /* never called for DC */
3080 .backlight_get_level = NULL, /* never called for DC */
3081 .hpd_sense = NULL,/* called unconditionally */
3082 .hpd_set_polarity = NULL, /* called unconditionally */
3083 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3084 .page_flip_get_scanoutpos =
3085 dm_crtc_get_scanoutpos,/* called unconditionally */
3086 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3087 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3090 #if defined(CONFIG_DEBUG_KERNEL_DC)
3092 static ssize_t s3_debug_store(struct device *device,
3093 struct device_attribute *attr,
3099 struct drm_device *drm_dev = dev_get_drvdata(device);
3100 struct amdgpu_device *adev = drm_dev->dev_private;
3102 ret = kstrtoint(buf, 0, &s3_state);
3107 drm_kms_helper_hotplug_event(adev->ddev);
3112 return ret == 0 ? count : 0;
3115 DEVICE_ATTR_WO(s3_debug);
3119 static int dm_early_init(void *handle)
3121 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3123 switch (adev->asic_type) {
3126 adev->mode_info.num_crtc = 6;
3127 adev->mode_info.num_hpd = 6;
3128 adev->mode_info.num_dig = 6;
3131 adev->mode_info.num_crtc = 4;
3132 adev->mode_info.num_hpd = 6;
3133 adev->mode_info.num_dig = 7;
3137 adev->mode_info.num_crtc = 2;
3138 adev->mode_info.num_hpd = 6;
3139 adev->mode_info.num_dig = 6;
3143 adev->mode_info.num_crtc = 6;
3144 adev->mode_info.num_hpd = 6;
3145 adev->mode_info.num_dig = 7;
3148 adev->mode_info.num_crtc = 3;
3149 adev->mode_info.num_hpd = 6;
3150 adev->mode_info.num_dig = 9;
3153 adev->mode_info.num_crtc = 2;
3154 adev->mode_info.num_hpd = 6;
3155 adev->mode_info.num_dig = 9;
3157 case CHIP_POLARIS11:
3158 case CHIP_POLARIS12:
3159 adev->mode_info.num_crtc = 5;
3160 adev->mode_info.num_hpd = 5;
3161 adev->mode_info.num_dig = 5;
3163 case CHIP_POLARIS10:
3165 adev->mode_info.num_crtc = 6;
3166 adev->mode_info.num_hpd = 6;
3167 adev->mode_info.num_dig = 6;
3172 adev->mode_info.num_crtc = 6;
3173 adev->mode_info.num_hpd = 6;
3174 adev->mode_info.num_dig = 6;
3176 #if defined(CONFIG_DRM_AMD_DC_DCN)
3178 adev->mode_info.num_crtc = 4;
3179 adev->mode_info.num_hpd = 4;
3180 adev->mode_info.num_dig = 4;
3185 adev->mode_info.num_crtc = 6;
3186 adev->mode_info.num_hpd = 6;
3187 adev->mode_info.num_dig = 6;
3190 adev->mode_info.num_crtc = 5;
3191 adev->mode_info.num_hpd = 5;
3192 adev->mode_info.num_dig = 5;
3195 adev->mode_info.num_crtc = 4;
3196 adev->mode_info.num_hpd = 4;
3197 adev->mode_info.num_dig = 4;
3200 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3204 amdgpu_dm_set_irq_funcs(adev);
3206 if (adev->mode_info.funcs == NULL)
3207 adev->mode_info.funcs = &dm_display_funcs;
3210 * Note: Do NOT change adev->audio_endpt_rreg and
3211 * adev->audio_endpt_wreg because they are initialised in
3212 * amdgpu_device_init()
3214 #if defined(CONFIG_DEBUG_KERNEL_DC)
3217 &dev_attr_s3_debug);
3223 static bool modeset_required(struct drm_crtc_state *crtc_state,
3224 struct dc_stream_state *new_stream,
3225 struct dc_stream_state *old_stream)
3227 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3230 if (!crtc_state->enable)
3233 return crtc_state->active;
3236 static bool modereset_required(struct drm_crtc_state *crtc_state)
3238 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3241 return !crtc_state->enable || !crtc_state->active;
3244 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3246 drm_encoder_cleanup(encoder);
3250 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3251 .destroy = amdgpu_dm_encoder_destroy,
3255 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3256 struct dc_scaling_info *scaling_info)
3258 int scale_w, scale_h;
3260 memset(scaling_info, 0, sizeof(*scaling_info));
3262 /* Source is fixed 16.16 but we ignore mantissa for now... */
3263 scaling_info->src_rect.x = state->src_x >> 16;
3264 scaling_info->src_rect.y = state->src_y >> 16;
3266 scaling_info->src_rect.width = state->src_w >> 16;
3267 if (scaling_info->src_rect.width == 0)
3270 scaling_info->src_rect.height = state->src_h >> 16;
3271 if (scaling_info->src_rect.height == 0)
3274 scaling_info->dst_rect.x = state->crtc_x;
3275 scaling_info->dst_rect.y = state->crtc_y;
3277 if (state->crtc_w == 0)
3280 scaling_info->dst_rect.width = state->crtc_w;
3282 if (state->crtc_h == 0)
3285 scaling_info->dst_rect.height = state->crtc_h;
3287 /* DRM doesn't specify clipping on destination output. */
3288 scaling_info->clip_rect = scaling_info->dst_rect;
3290 /* TODO: Validate scaling per-format with DC plane caps */
3291 scale_w = scaling_info->dst_rect.width * 1000 /
3292 scaling_info->src_rect.width;
3294 if (scale_w < 250 || scale_w > 16000)
3297 scale_h = scaling_info->dst_rect.height * 1000 /
3298 scaling_info->src_rect.height;
3300 if (scale_h < 250 || scale_h > 16000)
3304 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3305 * assume reasonable defaults based on the format.
3311 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3312 uint64_t *tiling_flags)
3314 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3315 int r = amdgpu_bo_reserve(rbo, false);
3318 /* Don't show error message when returning -ERESTARTSYS */
3319 if (r != -ERESTARTSYS)
3320 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3325 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3327 amdgpu_bo_unreserve(rbo);
3332 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3334 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3336 return offset ? (address + offset * 256) : 0;
3340 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3341 const struct amdgpu_framebuffer *afb,
3342 const enum surface_pixel_format format,
3343 const enum dc_rotation_angle rotation,
3344 const struct plane_size *plane_size,
3345 const union dc_tiling_info *tiling_info,
3346 const uint64_t info,
3347 struct dc_plane_dcc_param *dcc,
3348 struct dc_plane_address *address,
3349 bool force_disable_dcc)
3351 struct dc *dc = adev->dm.dc;
3352 struct dc_dcc_surface_param input;
3353 struct dc_surface_dcc_cap output;
3354 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3355 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3356 uint64_t dcc_address;
3358 memset(&input, 0, sizeof(input));
3359 memset(&output, 0, sizeof(output));
3361 if (force_disable_dcc)
3367 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3370 if (!dc->cap_funcs.get_dcc_compression_cap)
3373 input.format = format;
3374 input.surface_size.width = plane_size->surface_size.width;
3375 input.surface_size.height = plane_size->surface_size.height;
3376 input.swizzle_mode = tiling_info->gfx9.swizzle;
3378 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3379 input.scan = SCAN_DIRECTION_HORIZONTAL;
3380 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3381 input.scan = SCAN_DIRECTION_VERTICAL;
3383 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3386 if (!output.capable)
3389 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3394 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3395 dcc->independent_64b_blks = i64b;
3397 dcc_address = get_dcc_address(afb->address, info);
3398 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3399 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3405 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3406 const struct amdgpu_framebuffer *afb,
3407 const enum surface_pixel_format format,
3408 const enum dc_rotation_angle rotation,
3409 const uint64_t tiling_flags,
3410 union dc_tiling_info *tiling_info,
3411 struct plane_size *plane_size,
3412 struct dc_plane_dcc_param *dcc,
3413 struct dc_plane_address *address,
3414 bool force_disable_dcc)
3416 const struct drm_framebuffer *fb = &afb->base;
3419 memset(tiling_info, 0, sizeof(*tiling_info));
3420 memset(plane_size, 0, sizeof(*plane_size));
3421 memset(dcc, 0, sizeof(*dcc));
3422 memset(address, 0, sizeof(*address));
3424 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3425 plane_size->surface_size.x = 0;
3426 plane_size->surface_size.y = 0;
3427 plane_size->surface_size.width = fb->width;
3428 plane_size->surface_size.height = fb->height;
3429 plane_size->surface_pitch =
3430 fb->pitches[0] / fb->format->cpp[0];
3432 address->type = PLN_ADDR_TYPE_GRAPHICS;
3433 address->grph.addr.low_part = lower_32_bits(afb->address);
3434 address->grph.addr.high_part = upper_32_bits(afb->address);
3435 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3436 uint64_t chroma_addr = afb->address + fb->offsets[1];
3438 plane_size->surface_size.x = 0;
3439 plane_size->surface_size.y = 0;
3440 plane_size->surface_size.width = fb->width;
3441 plane_size->surface_size.height = fb->height;
3442 plane_size->surface_pitch =
3443 fb->pitches[0] / fb->format->cpp[0];
3445 plane_size->chroma_size.x = 0;
3446 plane_size->chroma_size.y = 0;
3447 /* TODO: set these based on surface format */
3448 plane_size->chroma_size.width = fb->width / 2;
3449 plane_size->chroma_size.height = fb->height / 2;
3451 plane_size->chroma_pitch =
3452 fb->pitches[1] / fb->format->cpp[1];
3454 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3455 address->video_progressive.luma_addr.low_part =
3456 lower_32_bits(afb->address);
3457 address->video_progressive.luma_addr.high_part =
3458 upper_32_bits(afb->address);
3459 address->video_progressive.chroma_addr.low_part =
3460 lower_32_bits(chroma_addr);
3461 address->video_progressive.chroma_addr.high_part =
3462 upper_32_bits(chroma_addr);
3465 /* Fill GFX8 params */
3466 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3467 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3469 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3470 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3471 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3472 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3473 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3475 /* XXX fix me for VI */
3476 tiling_info->gfx8.num_banks = num_banks;
3477 tiling_info->gfx8.array_mode =
3478 DC_ARRAY_2D_TILED_THIN1;
3479 tiling_info->gfx8.tile_split = tile_split;
3480 tiling_info->gfx8.bank_width = bankw;
3481 tiling_info->gfx8.bank_height = bankh;
3482 tiling_info->gfx8.tile_aspect = mtaspect;
3483 tiling_info->gfx8.tile_mode =
3484 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3485 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3486 == DC_ARRAY_1D_TILED_THIN1) {
3487 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3490 tiling_info->gfx8.pipe_config =
3491 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3493 if (adev->asic_type == CHIP_VEGA10 ||
3494 adev->asic_type == CHIP_VEGA12 ||
3495 adev->asic_type == CHIP_VEGA20 ||
3496 adev->asic_type == CHIP_NAVI10 ||
3497 adev->asic_type == CHIP_NAVI14 ||
3498 adev->asic_type == CHIP_NAVI12 ||
3499 adev->asic_type == CHIP_RENOIR ||
3500 adev->asic_type == CHIP_RAVEN) {
3501 /* Fill GFX9 params */
3502 tiling_info->gfx9.num_pipes =
3503 adev->gfx.config.gb_addr_config_fields.num_pipes;
3504 tiling_info->gfx9.num_banks =
3505 adev->gfx.config.gb_addr_config_fields.num_banks;
3506 tiling_info->gfx9.pipe_interleave =
3507 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3508 tiling_info->gfx9.num_shader_engines =
3509 adev->gfx.config.gb_addr_config_fields.num_se;
3510 tiling_info->gfx9.max_compressed_frags =
3511 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3512 tiling_info->gfx9.num_rb_per_se =
3513 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3514 tiling_info->gfx9.swizzle =
3515 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3516 tiling_info->gfx9.shaderEnable = 1;
3518 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3519 plane_size, tiling_info,
3520 tiling_flags, dcc, address,
3530 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3531 bool *per_pixel_alpha, bool *global_alpha,
3532 int *global_alpha_value)
3534 *per_pixel_alpha = false;
3535 *global_alpha = false;
3536 *global_alpha_value = 0xff;
3538 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3541 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3542 static const uint32_t alpha_formats[] = {
3543 DRM_FORMAT_ARGB8888,
3544 DRM_FORMAT_RGBA8888,
3545 DRM_FORMAT_ABGR8888,
3547 uint32_t format = plane_state->fb->format->format;
3550 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3551 if (format == alpha_formats[i]) {
3552 *per_pixel_alpha = true;
3558 if (plane_state->alpha < 0xffff) {
3559 *global_alpha = true;
3560 *global_alpha_value = plane_state->alpha >> 8;
3565 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3566 const enum surface_pixel_format format,
3567 enum dc_color_space *color_space)
3571 *color_space = COLOR_SPACE_SRGB;
3573 /* DRM color properties only affect non-RGB formats. */
3574 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3577 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3579 switch (plane_state->color_encoding) {
3580 case DRM_COLOR_YCBCR_BT601:
3582 *color_space = COLOR_SPACE_YCBCR601;
3584 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3587 case DRM_COLOR_YCBCR_BT709:
3589 *color_space = COLOR_SPACE_YCBCR709;
3591 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3594 case DRM_COLOR_YCBCR_BT2020:
3596 *color_space = COLOR_SPACE_2020_YCBCR;
3609 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3610 const struct drm_plane_state *plane_state,
3611 const uint64_t tiling_flags,
3612 struct dc_plane_info *plane_info,
3613 struct dc_plane_address *address,
3614 bool force_disable_dcc)
3616 const struct drm_framebuffer *fb = plane_state->fb;
3617 const struct amdgpu_framebuffer *afb =
3618 to_amdgpu_framebuffer(plane_state->fb);
3619 struct drm_format_name_buf format_name;
3622 memset(plane_info, 0, sizeof(*plane_info));
3624 switch (fb->format->format) {
3626 plane_info->format =
3627 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3629 case DRM_FORMAT_RGB565:
3630 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3632 case DRM_FORMAT_XRGB8888:
3633 case DRM_FORMAT_ARGB8888:
3634 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3636 case DRM_FORMAT_XRGB2101010:
3637 case DRM_FORMAT_ARGB2101010:
3638 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3640 case DRM_FORMAT_XBGR2101010:
3641 case DRM_FORMAT_ABGR2101010:
3642 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3644 case DRM_FORMAT_XBGR8888:
3645 case DRM_FORMAT_ABGR8888:
3646 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3648 case DRM_FORMAT_NV21:
3649 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3651 case DRM_FORMAT_NV12:
3652 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3654 case DRM_FORMAT_P010:
3655 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3659 "Unsupported screen format %s\n",
3660 drm_get_format_name(fb->format->format, &format_name));
3664 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3665 case DRM_MODE_ROTATE_0:
3666 plane_info->rotation = ROTATION_ANGLE_0;
3668 case DRM_MODE_ROTATE_90:
3669 plane_info->rotation = ROTATION_ANGLE_90;
3671 case DRM_MODE_ROTATE_180:
3672 plane_info->rotation = ROTATION_ANGLE_180;
3674 case DRM_MODE_ROTATE_270:
3675 plane_info->rotation = ROTATION_ANGLE_270;
3678 plane_info->rotation = ROTATION_ANGLE_0;
3682 plane_info->visible = true;
3683 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3685 plane_info->layer_index = 0;
3687 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3688 &plane_info->color_space);
3692 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3693 plane_info->rotation, tiling_flags,
3694 &plane_info->tiling_info,
3695 &plane_info->plane_size,
3696 &plane_info->dcc, address,
3701 fill_blending_from_plane_state(
3702 plane_state, &plane_info->per_pixel_alpha,
3703 &plane_info->global_alpha, &plane_info->global_alpha_value);
3708 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3709 struct dc_plane_state *dc_plane_state,
3710 struct drm_plane_state *plane_state,
3711 struct drm_crtc_state *crtc_state)
3713 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3714 const struct amdgpu_framebuffer *amdgpu_fb =
3715 to_amdgpu_framebuffer(plane_state->fb);
3716 struct dc_scaling_info scaling_info;
3717 struct dc_plane_info plane_info;
3718 uint64_t tiling_flags;
3720 bool force_disable_dcc = false;
3722 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3726 dc_plane_state->src_rect = scaling_info.src_rect;
3727 dc_plane_state->dst_rect = scaling_info.dst_rect;
3728 dc_plane_state->clip_rect = scaling_info.clip_rect;
3729 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3731 ret = get_fb_info(amdgpu_fb, &tiling_flags);
3735 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3736 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3738 &dc_plane_state->address,
3743 dc_plane_state->format = plane_info.format;
3744 dc_plane_state->color_space = plane_info.color_space;
3745 dc_plane_state->format = plane_info.format;
3746 dc_plane_state->plane_size = plane_info.plane_size;
3747 dc_plane_state->rotation = plane_info.rotation;
3748 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3749 dc_plane_state->stereo_format = plane_info.stereo_format;
3750 dc_plane_state->tiling_info = plane_info.tiling_info;
3751 dc_plane_state->visible = plane_info.visible;
3752 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3753 dc_plane_state->global_alpha = plane_info.global_alpha;
3754 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3755 dc_plane_state->dcc = plane_info.dcc;
3756 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3759 * Always set input transfer function, since plane state is refreshed
3762 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3769 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3770 const struct dm_connector_state *dm_state,
3771 struct dc_stream_state *stream)
3773 enum amdgpu_rmx_type rmx_type;
3775 struct rect src = { 0 }; /* viewport in composition space*/
3776 struct rect dst = { 0 }; /* stream addressable area */
3778 /* no mode. nothing to be done */
3782 /* Full screen scaling by default */
3783 src.width = mode->hdisplay;
3784 src.height = mode->vdisplay;
3785 dst.width = stream->timing.h_addressable;
3786 dst.height = stream->timing.v_addressable;
3789 rmx_type = dm_state->scaling;
3790 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3791 if (src.width * dst.height <
3792 src.height * dst.width) {
3793 /* height needs less upscaling/more downscaling */
3794 dst.width = src.width *
3795 dst.height / src.height;
3797 /* width needs less upscaling/more downscaling */
3798 dst.height = src.height *
3799 dst.width / src.width;
3801 } else if (rmx_type == RMX_CENTER) {
3805 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3806 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3808 if (dm_state->underscan_enable) {
3809 dst.x += dm_state->underscan_hborder / 2;
3810 dst.y += dm_state->underscan_vborder / 2;
3811 dst.width -= dm_state->underscan_hborder;
3812 dst.height -= dm_state->underscan_vborder;
3819 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3820 dst.x, dst.y, dst.width, dst.height);
3824 static enum dc_color_depth
3825 convert_color_depth_from_display_info(const struct drm_connector *connector,
3826 const struct drm_connector_state *state,
3834 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3835 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3837 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3839 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3842 bpc = (uint8_t)connector->display_info.bpc;
3843 /* Assume 8 bpc by default if no bpc is specified. */
3844 bpc = bpc ? bpc : 8;
3848 state = connector->state;
3852 * Cap display bpc based on the user requested value.
3854 * The value for state->max_bpc may not correctly updated
3855 * depending on when the connector gets added to the state
3856 * or if this was called outside of atomic check, so it
3857 * can't be used directly.
3859 bpc = min(bpc, state->max_requested_bpc);
3861 /* Round down to the nearest even number. */
3862 bpc = bpc - (bpc & 1);
3868 * Temporary Work around, DRM doesn't parse color depth for
3869 * EDID revision before 1.4
3870 * TODO: Fix edid parsing
3872 return COLOR_DEPTH_888;
3874 return COLOR_DEPTH_666;
3876 return COLOR_DEPTH_888;
3878 return COLOR_DEPTH_101010;
3880 return COLOR_DEPTH_121212;
3882 return COLOR_DEPTH_141414;
3884 return COLOR_DEPTH_161616;
3886 return COLOR_DEPTH_UNDEFINED;
3890 static enum dc_aspect_ratio
3891 get_aspect_ratio(const struct drm_display_mode *mode_in)
3893 /* 1-1 mapping, since both enums follow the HDMI spec. */
3894 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3897 static enum dc_color_space
3898 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3900 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3902 switch (dc_crtc_timing->pixel_encoding) {
3903 case PIXEL_ENCODING_YCBCR422:
3904 case PIXEL_ENCODING_YCBCR444:
3905 case PIXEL_ENCODING_YCBCR420:
3908 * 27030khz is the separation point between HDTV and SDTV
3909 * according to HDMI spec, we use YCbCr709 and YCbCr601
3912 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3913 if (dc_crtc_timing->flags.Y_ONLY)
3915 COLOR_SPACE_YCBCR709_LIMITED;
3917 color_space = COLOR_SPACE_YCBCR709;
3919 if (dc_crtc_timing->flags.Y_ONLY)
3921 COLOR_SPACE_YCBCR601_LIMITED;
3923 color_space = COLOR_SPACE_YCBCR601;
3928 case PIXEL_ENCODING_RGB:
3929 color_space = COLOR_SPACE_SRGB;
3940 static bool adjust_colour_depth_from_display_info(
3941 struct dc_crtc_timing *timing_out,
3942 const struct drm_display_info *info)
3944 enum dc_color_depth depth = timing_out->display_color_depth;
3947 normalized_clk = timing_out->pix_clk_100hz / 10;
3948 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3949 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3950 normalized_clk /= 2;
3951 /* Adjusting pix clock following on HDMI spec based on colour depth */
3953 case COLOR_DEPTH_888:
3955 case COLOR_DEPTH_101010:
3956 normalized_clk = (normalized_clk * 30) / 24;
3958 case COLOR_DEPTH_121212:
3959 normalized_clk = (normalized_clk * 36) / 24;
3961 case COLOR_DEPTH_161616:
3962 normalized_clk = (normalized_clk * 48) / 24;
3965 /* The above depths are the only ones valid for HDMI. */
3968 if (normalized_clk <= info->max_tmds_clock) {
3969 timing_out->display_color_depth = depth;
3972 } while (--depth > COLOR_DEPTH_666);
3976 static void fill_stream_properties_from_drm_display_mode(
3977 struct dc_stream_state *stream,
3978 const struct drm_display_mode *mode_in,
3979 const struct drm_connector *connector,
3980 const struct drm_connector_state *connector_state,
3981 const struct dc_stream_state *old_stream)
3983 struct dc_crtc_timing *timing_out = &stream->timing;
3984 const struct drm_display_info *info = &connector->display_info;
3985 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3986 struct hdmi_vendor_infoframe hv_frame;
3987 struct hdmi_avi_infoframe avi_frame;
3989 memset(&hv_frame, 0, sizeof(hv_frame));
3990 memset(&avi_frame, 0, sizeof(avi_frame));
3992 timing_out->h_border_left = 0;
3993 timing_out->h_border_right = 0;
3994 timing_out->v_border_top = 0;
3995 timing_out->v_border_bottom = 0;
3996 /* TODO: un-hardcode */
3997 if (drm_mode_is_420_only(info, mode_in)
3998 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3999 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4000 else if (drm_mode_is_420_also(info, mode_in)
4001 && aconnector->force_yuv420_output)
4002 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4003 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4004 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4005 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4007 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4009 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4010 timing_out->display_color_depth = convert_color_depth_from_display_info(
4011 connector, connector_state,
4012 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4013 timing_out->scan_type = SCANNING_TYPE_NODATA;
4014 timing_out->hdmi_vic = 0;
4017 timing_out->vic = old_stream->timing.vic;
4018 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4019 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4021 timing_out->vic = drm_match_cea_mode(mode_in);
4022 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4023 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4024 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4025 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4028 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4029 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4030 timing_out->vic = avi_frame.video_code;
4031 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4032 timing_out->hdmi_vic = hv_frame.vic;
4035 timing_out->h_addressable = mode_in->crtc_hdisplay;
4036 timing_out->h_total = mode_in->crtc_htotal;
4037 timing_out->h_sync_width =
4038 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4039 timing_out->h_front_porch =
4040 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4041 timing_out->v_total = mode_in->crtc_vtotal;
4042 timing_out->v_addressable = mode_in->crtc_vdisplay;
4043 timing_out->v_front_porch =
4044 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4045 timing_out->v_sync_width =
4046 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4047 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4048 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4050 stream->output_color_space = get_output_color_space(timing_out);
4052 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4053 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4054 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4055 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4056 drm_mode_is_420_also(info, mode_in) &&
4057 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4058 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4059 adjust_colour_depth_from_display_info(timing_out, info);
4064 static void fill_audio_info(struct audio_info *audio_info,
4065 const struct drm_connector *drm_connector,
4066 const struct dc_sink *dc_sink)
4069 int cea_revision = 0;
4070 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4072 audio_info->manufacture_id = edid_caps->manufacturer_id;
4073 audio_info->product_id = edid_caps->product_id;
4075 cea_revision = drm_connector->display_info.cea_rev;
4077 strscpy(audio_info->display_name,
4078 edid_caps->display_name,
4079 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4081 if (cea_revision >= 3) {
4082 audio_info->mode_count = edid_caps->audio_mode_count;
4084 for (i = 0; i < audio_info->mode_count; ++i) {
4085 audio_info->modes[i].format_code =
4086 (enum audio_format_code)
4087 (edid_caps->audio_modes[i].format_code);
4088 audio_info->modes[i].channel_count =
4089 edid_caps->audio_modes[i].channel_count;
4090 audio_info->modes[i].sample_rates.all =
4091 edid_caps->audio_modes[i].sample_rate;
4092 audio_info->modes[i].sample_size =
4093 edid_caps->audio_modes[i].sample_size;
4097 audio_info->flags.all = edid_caps->speaker_flags;
4099 /* TODO: We only check for the progressive mode, check for interlace mode too */
4100 if (drm_connector->latency_present[0]) {
4101 audio_info->video_latency = drm_connector->video_latency[0];
4102 audio_info->audio_latency = drm_connector->audio_latency[0];
4105 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4110 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4111 struct drm_display_mode *dst_mode)
4113 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4114 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4115 dst_mode->crtc_clock = src_mode->crtc_clock;
4116 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4117 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4118 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4119 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4120 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4121 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4122 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4123 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4124 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4125 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4126 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4130 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4131 const struct drm_display_mode *native_mode,
4134 if (scale_enabled) {
4135 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4136 } else if (native_mode->clock == drm_mode->clock &&
4137 native_mode->htotal == drm_mode->htotal &&
4138 native_mode->vtotal == drm_mode->vtotal) {
4139 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4141 /* no scaling nor amdgpu inserted, no need to patch */
4145 static struct dc_sink *
4146 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4148 struct dc_sink_init_data sink_init_data = { 0 };
4149 struct dc_sink *sink = NULL;
4150 sink_init_data.link = aconnector->dc_link;
4151 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4153 sink = dc_sink_create(&sink_init_data);
4155 DRM_ERROR("Failed to create sink!\n");
4158 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4163 static void set_multisync_trigger_params(
4164 struct dc_stream_state *stream)
4166 if (stream->triggered_crtc_reset.enabled) {
4167 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4168 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4172 static void set_master_stream(struct dc_stream_state *stream_set[],
4175 int j, highest_rfr = 0, master_stream = 0;
4177 for (j = 0; j < stream_count; j++) {
4178 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4179 int refresh_rate = 0;
4181 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4182 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4183 if (refresh_rate > highest_rfr) {
4184 highest_rfr = refresh_rate;
4189 for (j = 0; j < stream_count; j++) {
4191 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4195 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4199 if (context->stream_count < 2)
4201 for (i = 0; i < context->stream_count ; i++) {
4202 if (!context->streams[i])
4205 * TODO: add a function to read AMD VSDB bits and set
4206 * crtc_sync_master.multi_sync_enabled flag
4207 * For now it's set to false
4209 set_multisync_trigger_params(context->streams[i]);
4211 set_master_stream(context->streams, context->stream_count);
4214 static struct dc_stream_state *
4215 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4216 const struct drm_display_mode *drm_mode,
4217 const struct dm_connector_state *dm_state,
4218 const struct dc_stream_state *old_stream)
4220 struct drm_display_mode *preferred_mode = NULL;
4221 struct drm_connector *drm_connector;
4222 const struct drm_connector_state *con_state =
4223 dm_state ? &dm_state->base : NULL;
4224 struct dc_stream_state *stream = NULL;
4225 struct drm_display_mode mode = *drm_mode;
4226 bool native_mode_found = false;
4227 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4229 int preferred_refresh = 0;
4230 #if defined(CONFIG_DRM_AMD_DC_DCN)
4231 struct dsc_dec_dpcd_caps dsc_caps;
4233 uint32_t link_bandwidth_kbps;
4235 struct dc_sink *sink = NULL;
4236 if (aconnector == NULL) {
4237 DRM_ERROR("aconnector is NULL!\n");
4241 drm_connector = &aconnector->base;
4243 if (!aconnector->dc_sink) {
4244 sink = create_fake_sink(aconnector);
4248 sink = aconnector->dc_sink;
4249 dc_sink_retain(sink);
4252 stream = dc_create_stream_for_sink(sink);
4254 if (stream == NULL) {
4255 DRM_ERROR("Failed to create stream for sink!\n");
4259 stream->dm_stream_context = aconnector;
4261 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4262 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4264 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4265 /* Search for preferred mode */
4266 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4267 native_mode_found = true;
4271 if (!native_mode_found)
4272 preferred_mode = list_first_entry_or_null(
4273 &aconnector->base.modes,
4274 struct drm_display_mode,
4277 mode_refresh = drm_mode_vrefresh(&mode);
4279 if (preferred_mode == NULL) {
4281 * This may not be an error, the use case is when we have no
4282 * usermode calls to reset and set mode upon hotplug. In this
4283 * case, we call set mode ourselves to restore the previous mode
4284 * and the modelist may not be filled in in time.
4286 DRM_DEBUG_DRIVER("No preferred mode found\n");
4288 decide_crtc_timing_for_drm_display_mode(
4289 &mode, preferred_mode,
4290 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4291 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4295 drm_mode_set_crtcinfo(&mode, 0);
4298 * If scaling is enabled and refresh rate didn't change
4299 * we copy the vic and polarities of the old timings
4301 if (!scale || mode_refresh != preferred_refresh)
4302 fill_stream_properties_from_drm_display_mode(stream,
4303 &mode, &aconnector->base, con_state, NULL);
4305 fill_stream_properties_from_drm_display_mode(stream,
4306 &mode, &aconnector->base, con_state, old_stream);
4308 stream->timing.flags.DSC = 0;
4310 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4311 #if defined(CONFIG_DRM_AMD_DC_DCN)
4312 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4313 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4314 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4317 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4318 dc_link_get_link_cap(aconnector->dc_link));
4320 #if defined(CONFIG_DRM_AMD_DC_DCN)
4321 if (dsc_caps.is_dsc_supported)
4322 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4324 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4325 link_bandwidth_kbps,
4327 &stream->timing.dsc_cfg))
4328 stream->timing.flags.DSC = 1;
4332 update_stream_scaling_settings(&mode, dm_state, stream);
4335 &stream->audio_info,
4339 update_stream_signal(stream, sink);
4341 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4342 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4343 if (stream->link->psr_settings.psr_feature_enabled) {
4344 struct dc *core_dc = stream->link->ctx->dc;
4346 if (dc_is_dmcu_initialized(core_dc)) {
4348 // should decide stream support vsc sdp colorimetry capability
4349 // before building vsc info packet
4351 stream->use_vsc_sdp_for_colorimetry = false;
4352 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4353 stream->use_vsc_sdp_for_colorimetry =
4354 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4356 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4357 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4358 stream->use_vsc_sdp_for_colorimetry = true;
4361 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4365 dc_sink_release(sink);
4370 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4372 drm_crtc_cleanup(crtc);
4376 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4377 struct drm_crtc_state *state)
4379 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4381 /* TODO Destroy dc_stream objects are stream object is flattened */
4383 dc_stream_release(cur->stream);
4386 __drm_atomic_helper_crtc_destroy_state(state);
4392 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4394 struct dm_crtc_state *state;
4397 dm_crtc_destroy_state(crtc, crtc->state);
4399 state = kzalloc(sizeof(*state), GFP_KERNEL);
4400 if (WARN_ON(!state))
4403 crtc->state = &state->base;
4404 crtc->state->crtc = crtc;
4408 static struct drm_crtc_state *
4409 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4411 struct dm_crtc_state *state, *cur;
4413 cur = to_dm_crtc_state(crtc->state);
4415 if (WARN_ON(!crtc->state))
4418 state = kzalloc(sizeof(*state), GFP_KERNEL);
4422 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4425 state->stream = cur->stream;
4426 dc_stream_retain(state->stream);
4429 state->active_planes = cur->active_planes;
4430 state->interrupts_enabled = cur->interrupts_enabled;
4431 state->vrr_params = cur->vrr_params;
4432 state->vrr_infopacket = cur->vrr_infopacket;
4433 state->abm_level = cur->abm_level;
4434 state->vrr_supported = cur->vrr_supported;
4435 state->freesync_config = cur->freesync_config;
4436 state->crc_src = cur->crc_src;
4437 state->cm_has_degamma = cur->cm_has_degamma;
4438 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4440 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4442 return &state->base;
4445 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4447 enum dc_irq_source irq_source;
4448 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4449 struct amdgpu_device *adev = crtc->dev->dev_private;
4452 /* Do not set vupdate for DCN hardware */
4453 if (adev->family > AMDGPU_FAMILY_AI)
4456 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4458 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4460 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4461 acrtc->crtc_id, enable ? "en" : "dis", rc);
4465 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4467 enum dc_irq_source irq_source;
4468 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4469 struct amdgpu_device *adev = crtc->dev->dev_private;
4470 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4474 /* vblank irq on -> Only need vupdate irq in vrr mode */
4475 if (amdgpu_dm_vrr_active(acrtc_state))
4476 rc = dm_set_vupdate_irq(crtc, true);
4478 /* vblank irq off -> vupdate irq off */
4479 rc = dm_set_vupdate_irq(crtc, false);
4485 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4486 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4489 static int dm_enable_vblank(struct drm_crtc *crtc)
4491 return dm_set_vblank(crtc, true);
4494 static void dm_disable_vblank(struct drm_crtc *crtc)
4496 dm_set_vblank(crtc, false);
4499 /* Implemented only the options currently availible for the driver */
4500 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4501 .reset = dm_crtc_reset_state,
4502 .destroy = amdgpu_dm_crtc_destroy,
4503 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4504 .set_config = drm_atomic_helper_set_config,
4505 .page_flip = drm_atomic_helper_page_flip,
4506 .atomic_duplicate_state = dm_crtc_duplicate_state,
4507 .atomic_destroy_state = dm_crtc_destroy_state,
4508 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4509 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4510 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4511 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4512 .enable_vblank = dm_enable_vblank,
4513 .disable_vblank = dm_disable_vblank,
4514 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4517 static enum drm_connector_status
4518 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4521 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4525 * 1. This interface is NOT called in context of HPD irq.
4526 * 2. This interface *is called* in context of user-mode ioctl. Which
4527 * makes it a bad place for *any* MST-related activity.
4530 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4531 !aconnector->fake_enable)
4532 connected = (aconnector->dc_sink != NULL);
4534 connected = (aconnector->base.force == DRM_FORCE_ON);
4536 return (connected ? connector_status_connected :
4537 connector_status_disconnected);
4540 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4541 struct drm_connector_state *connector_state,
4542 struct drm_property *property,
4545 struct drm_device *dev = connector->dev;
4546 struct amdgpu_device *adev = dev->dev_private;
4547 struct dm_connector_state *dm_old_state =
4548 to_dm_connector_state(connector->state);
4549 struct dm_connector_state *dm_new_state =
4550 to_dm_connector_state(connector_state);
4554 if (property == dev->mode_config.scaling_mode_property) {
4555 enum amdgpu_rmx_type rmx_type;
4558 case DRM_MODE_SCALE_CENTER:
4559 rmx_type = RMX_CENTER;
4561 case DRM_MODE_SCALE_ASPECT:
4562 rmx_type = RMX_ASPECT;
4564 case DRM_MODE_SCALE_FULLSCREEN:
4565 rmx_type = RMX_FULL;
4567 case DRM_MODE_SCALE_NONE:
4573 if (dm_old_state->scaling == rmx_type)
4576 dm_new_state->scaling = rmx_type;
4578 } else if (property == adev->mode_info.underscan_hborder_property) {
4579 dm_new_state->underscan_hborder = val;
4581 } else if (property == adev->mode_info.underscan_vborder_property) {
4582 dm_new_state->underscan_vborder = val;
4584 } else if (property == adev->mode_info.underscan_property) {
4585 dm_new_state->underscan_enable = val;
4587 } else if (property == adev->mode_info.abm_level_property) {
4588 dm_new_state->abm_level = val;
4595 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4596 const struct drm_connector_state *state,
4597 struct drm_property *property,
4600 struct drm_device *dev = connector->dev;
4601 struct amdgpu_device *adev = dev->dev_private;
4602 struct dm_connector_state *dm_state =
4603 to_dm_connector_state(state);
4606 if (property == dev->mode_config.scaling_mode_property) {
4607 switch (dm_state->scaling) {
4609 *val = DRM_MODE_SCALE_CENTER;
4612 *val = DRM_MODE_SCALE_ASPECT;
4615 *val = DRM_MODE_SCALE_FULLSCREEN;
4619 *val = DRM_MODE_SCALE_NONE;
4623 } else if (property == adev->mode_info.underscan_hborder_property) {
4624 *val = dm_state->underscan_hborder;
4626 } else if (property == adev->mode_info.underscan_vborder_property) {
4627 *val = dm_state->underscan_vborder;
4629 } else if (property == adev->mode_info.underscan_property) {
4630 *val = dm_state->underscan_enable;
4632 } else if (property == adev->mode_info.abm_level_property) {
4633 *val = dm_state->abm_level;
4640 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4642 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4644 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4647 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4649 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4650 const struct dc_link *link = aconnector->dc_link;
4651 struct amdgpu_device *adev = connector->dev->dev_private;
4652 struct amdgpu_display_manager *dm = &adev->dm;
4654 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4655 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4657 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4658 link->type != dc_connection_none &&
4659 dm->backlight_dev) {
4660 backlight_device_unregister(dm->backlight_dev);
4661 dm->backlight_dev = NULL;
4665 if (aconnector->dc_em_sink)
4666 dc_sink_release(aconnector->dc_em_sink);
4667 aconnector->dc_em_sink = NULL;
4668 if (aconnector->dc_sink)
4669 dc_sink_release(aconnector->dc_sink);
4670 aconnector->dc_sink = NULL;
4672 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4673 drm_connector_unregister(connector);
4674 drm_connector_cleanup(connector);
4675 if (aconnector->i2c) {
4676 i2c_del_adapter(&aconnector->i2c->base);
4677 kfree(aconnector->i2c);
4679 kfree(aconnector->dm_dp_aux.aux.name);
4684 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4686 struct dm_connector_state *state =
4687 to_dm_connector_state(connector->state);
4689 if (connector->state)
4690 __drm_atomic_helper_connector_destroy_state(connector->state);
4694 state = kzalloc(sizeof(*state), GFP_KERNEL);
4697 state->scaling = RMX_OFF;
4698 state->underscan_enable = false;
4699 state->underscan_hborder = 0;
4700 state->underscan_vborder = 0;
4701 state->base.max_requested_bpc = 8;
4702 state->vcpi_slots = 0;
4704 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4705 state->abm_level = amdgpu_dm_abm_level;
4707 __drm_atomic_helper_connector_reset(connector, &state->base);
4711 struct drm_connector_state *
4712 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4714 struct dm_connector_state *state =
4715 to_dm_connector_state(connector->state);
4717 struct dm_connector_state *new_state =
4718 kmemdup(state, sizeof(*state), GFP_KERNEL);
4723 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4725 new_state->freesync_capable = state->freesync_capable;
4726 new_state->abm_level = state->abm_level;
4727 new_state->scaling = state->scaling;
4728 new_state->underscan_enable = state->underscan_enable;
4729 new_state->underscan_hborder = state->underscan_hborder;
4730 new_state->underscan_vborder = state->underscan_vborder;
4731 new_state->vcpi_slots = state->vcpi_slots;
4732 new_state->pbn = state->pbn;
4733 return &new_state->base;
4737 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4739 struct amdgpu_dm_connector *amdgpu_dm_connector =
4740 to_amdgpu_dm_connector(connector);
4743 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4744 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4745 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4746 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4751 #if defined(CONFIG_DEBUG_FS)
4752 connector_debugfs_init(amdgpu_dm_connector);
4758 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4759 .reset = amdgpu_dm_connector_funcs_reset,
4760 .detect = amdgpu_dm_connector_detect,
4761 .fill_modes = drm_helper_probe_single_connector_modes,
4762 .destroy = amdgpu_dm_connector_destroy,
4763 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4764 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4765 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4766 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4767 .late_register = amdgpu_dm_connector_late_register,
4768 .early_unregister = amdgpu_dm_connector_unregister
4771 static int get_modes(struct drm_connector *connector)
4773 return amdgpu_dm_connector_get_modes(connector);
4776 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4778 struct dc_sink_init_data init_params = {
4779 .link = aconnector->dc_link,
4780 .sink_signal = SIGNAL_TYPE_VIRTUAL
4784 if (!aconnector->base.edid_blob_ptr) {
4785 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4786 aconnector->base.name);
4788 aconnector->base.force = DRM_FORCE_OFF;
4789 aconnector->base.override_edid = false;
4793 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4795 aconnector->edid = edid;
4797 aconnector->dc_em_sink = dc_link_add_remote_sink(
4798 aconnector->dc_link,
4800 (edid->extensions + 1) * EDID_LENGTH,
4803 if (aconnector->base.force == DRM_FORCE_ON) {
4804 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4805 aconnector->dc_link->local_sink :
4806 aconnector->dc_em_sink;
4807 dc_sink_retain(aconnector->dc_sink);
4811 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4813 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4816 * In case of headless boot with force on for DP managed connector
4817 * Those settings have to be != 0 to get initial modeset
4819 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4820 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4821 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4825 aconnector->base.override_edid = true;
4826 create_eml_sink(aconnector);
4829 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4830 struct drm_display_mode *mode)
4832 int result = MODE_ERROR;
4833 struct dc_sink *dc_sink;
4834 struct amdgpu_device *adev = connector->dev->dev_private;
4835 /* TODO: Unhardcode stream count */
4836 struct dc_stream_state *stream;
4837 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4838 enum dc_status dc_result = DC_OK;
4840 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4841 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4845 * Only run this the first time mode_valid is called to initilialize
4848 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4849 !aconnector->dc_em_sink)
4850 handle_edid_mgmt(aconnector);
4852 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4854 if (dc_sink == NULL) {
4855 DRM_ERROR("dc_sink is NULL!\n");
4859 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4860 if (stream == NULL) {
4861 DRM_ERROR("Failed to create stream for sink!\n");
4865 dc_result = dc_validate_stream(adev->dm.dc, stream);
4867 if (dc_result == DC_OK)
4870 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4876 dc_stream_release(stream);
4879 /* TODO: error handling*/
4883 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4884 struct dc_info_packet *out)
4886 struct hdmi_drm_infoframe frame;
4887 unsigned char buf[30]; /* 26 + 4 */
4891 memset(out, 0, sizeof(*out));
4893 if (!state->hdr_output_metadata)
4896 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4900 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4904 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4908 /* Prepare the infopacket for DC. */
4909 switch (state->connector->connector_type) {
4910 case DRM_MODE_CONNECTOR_HDMIA:
4911 out->hb0 = 0x87; /* type */
4912 out->hb1 = 0x01; /* version */
4913 out->hb2 = 0x1A; /* length */
4914 out->sb[0] = buf[3]; /* checksum */
4918 case DRM_MODE_CONNECTOR_DisplayPort:
4919 case DRM_MODE_CONNECTOR_eDP:
4920 out->hb0 = 0x00; /* sdp id, zero */
4921 out->hb1 = 0x87; /* type */
4922 out->hb2 = 0x1D; /* payload len - 1 */
4923 out->hb3 = (0x13 << 2); /* sdp version */
4924 out->sb[0] = 0x01; /* version */
4925 out->sb[1] = 0x1A; /* length */
4933 memcpy(&out->sb[i], &buf[4], 26);
4936 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4937 sizeof(out->sb), false);
4943 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4944 const struct drm_connector_state *new_state)
4946 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4947 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4949 if (old_blob != new_blob) {
4950 if (old_blob && new_blob &&
4951 old_blob->length == new_blob->length)
4952 return memcmp(old_blob->data, new_blob->data,
4962 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4963 struct drm_atomic_state *state)
4965 struct drm_connector_state *new_con_state =
4966 drm_atomic_get_new_connector_state(state, conn);
4967 struct drm_connector_state *old_con_state =
4968 drm_atomic_get_old_connector_state(state, conn);
4969 struct drm_crtc *crtc = new_con_state->crtc;
4970 struct drm_crtc_state *new_crtc_state;
4976 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4977 struct dc_info_packet hdr_infopacket;
4979 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4983 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4984 if (IS_ERR(new_crtc_state))
4985 return PTR_ERR(new_crtc_state);
4988 * DC considers the stream backends changed if the
4989 * static metadata changes. Forcing the modeset also
4990 * gives a simple way for userspace to switch from
4991 * 8bpc to 10bpc when setting the metadata to enter
4994 * Changing the static metadata after it's been
4995 * set is permissible, however. So only force a
4996 * modeset if we're entering or exiting HDR.
4998 new_crtc_state->mode_changed =
4999 !old_con_state->hdr_output_metadata ||
5000 !new_con_state->hdr_output_metadata;
5006 static const struct drm_connector_helper_funcs
5007 amdgpu_dm_connector_helper_funcs = {
5009 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5010 * modes will be filtered by drm_mode_validate_size(), and those modes
5011 * are missing after user start lightdm. So we need to renew modes list.
5012 * in get_modes call back, not just return the modes count
5014 .get_modes = get_modes,
5015 .mode_valid = amdgpu_dm_connector_mode_valid,
5016 .atomic_check = amdgpu_dm_connector_atomic_check,
5019 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5023 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5025 struct drm_device *dev = new_crtc_state->crtc->dev;
5026 struct drm_plane *plane;
5028 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5029 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5036 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5038 struct drm_atomic_state *state = new_crtc_state->state;
5039 struct drm_plane *plane;
5042 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5043 struct drm_plane_state *new_plane_state;
5045 /* Cursor planes are "fake". */
5046 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5049 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5051 if (!new_plane_state) {
5053 * The plane is enable on the CRTC and hasn't changed
5054 * state. This means that it previously passed
5055 * validation and is therefore enabled.
5061 /* We need a framebuffer to be considered enabled. */
5062 num_active += (new_plane_state->fb != NULL);
5069 * Sets whether interrupts should be enabled on a specific CRTC.
5070 * We require that the stream be enabled and that there exist active
5071 * DC planes on the stream.
5074 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5075 struct drm_crtc_state *new_crtc_state)
5077 struct dm_crtc_state *dm_new_crtc_state =
5078 to_dm_crtc_state(new_crtc_state);
5080 dm_new_crtc_state->active_planes = 0;
5081 dm_new_crtc_state->interrupts_enabled = false;
5083 if (!dm_new_crtc_state->stream)
5086 dm_new_crtc_state->active_planes =
5087 count_crtc_active_planes(new_crtc_state);
5089 dm_new_crtc_state->interrupts_enabled =
5090 dm_new_crtc_state->active_planes > 0;
5093 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5094 struct drm_crtc_state *state)
5096 struct amdgpu_device *adev = crtc->dev->dev_private;
5097 struct dc *dc = adev->dm.dc;
5098 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5102 * Update interrupt state for the CRTC. This needs to happen whenever
5103 * the CRTC has changed or whenever any of its planes have changed.
5104 * Atomic check satisfies both of these requirements since the CRTC
5105 * is added to the state by DRM during drm_atomic_helper_check_planes.
5107 dm_update_crtc_interrupt_state(crtc, state);
5109 if (unlikely(!dm_crtc_state->stream &&
5110 modeset_required(state, NULL, dm_crtc_state->stream))) {
5115 /* In some use cases, like reset, no stream is attached */
5116 if (!dm_crtc_state->stream)
5120 * We want at least one hardware plane enabled to use
5121 * the stream with a cursor enabled.
5123 if (state->enable && state->active &&
5124 does_crtc_have_active_cursor(state) &&
5125 dm_crtc_state->active_planes == 0)
5128 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5134 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5135 const struct drm_display_mode *mode,
5136 struct drm_display_mode *adjusted_mode)
5141 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5142 .disable = dm_crtc_helper_disable,
5143 .atomic_check = dm_crtc_helper_atomic_check,
5144 .mode_fixup = dm_crtc_helper_mode_fixup,
5145 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5148 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5153 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5155 switch (display_color_depth) {
5156 case COLOR_DEPTH_666:
5158 case COLOR_DEPTH_888:
5160 case COLOR_DEPTH_101010:
5162 case COLOR_DEPTH_121212:
5164 case COLOR_DEPTH_141414:
5166 case COLOR_DEPTH_161616:
5174 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5175 struct drm_crtc_state *crtc_state,
5176 struct drm_connector_state *conn_state)
5178 struct drm_atomic_state *state = crtc_state->state;
5179 struct drm_connector *connector = conn_state->connector;
5180 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5181 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5182 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5183 struct drm_dp_mst_topology_mgr *mst_mgr;
5184 struct drm_dp_mst_port *mst_port;
5185 enum dc_color_depth color_depth;
5187 bool is_y420 = false;
5189 if (!aconnector->port || !aconnector->dc_sink)
5192 mst_port = aconnector->port;
5193 mst_mgr = &aconnector->mst_port->mst_mgr;
5195 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5198 if (!state->duplicated) {
5199 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5200 aconnector->force_yuv420_output;
5201 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5203 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5204 clock = adjusted_mode->clock;
5205 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5207 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5210 dm_new_connector_state->pbn,
5212 if (dm_new_connector_state->vcpi_slots < 0) {
5213 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5214 return dm_new_connector_state->vcpi_slots;
5219 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5220 .disable = dm_encoder_helper_disable,
5221 .atomic_check = dm_encoder_helper_atomic_check
5224 #if defined(CONFIG_DRM_AMD_DC_DCN)
5225 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5226 struct dc_state *dc_state)
5228 struct dc_stream_state *stream = NULL;
5229 struct drm_connector *connector;
5230 struct drm_connector_state *new_con_state, *old_con_state;
5231 struct amdgpu_dm_connector *aconnector;
5232 struct dm_connector_state *dm_conn_state;
5233 int i, j, clock, bpp;
5234 int vcpi, pbn_div, pbn = 0;
5236 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5238 aconnector = to_amdgpu_dm_connector(connector);
5240 if (!aconnector->port)
5243 if (!new_con_state || !new_con_state->crtc)
5246 dm_conn_state = to_dm_connector_state(new_con_state);
5248 for (j = 0; j < dc_state->stream_count; j++) {
5249 stream = dc_state->streams[j];
5253 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5262 if (stream->timing.flags.DSC != 1) {
5263 drm_dp_mst_atomic_enable_dsc(state,
5271 pbn_div = dm_mst_get_pbn_divider(stream->link);
5272 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5273 clock = stream->timing.pix_clk_100hz / 10;
5274 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5275 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5282 dm_conn_state->pbn = pbn;
5283 dm_conn_state->vcpi_slots = vcpi;
5289 static void dm_drm_plane_reset(struct drm_plane *plane)
5291 struct dm_plane_state *amdgpu_state = NULL;
5294 plane->funcs->atomic_destroy_state(plane, plane->state);
5296 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5297 WARN_ON(amdgpu_state == NULL);
5300 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5303 static struct drm_plane_state *
5304 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5306 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5308 old_dm_plane_state = to_dm_plane_state(plane->state);
5309 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5310 if (!dm_plane_state)
5313 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5315 if (old_dm_plane_state->dc_state) {
5316 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5317 dc_plane_state_retain(dm_plane_state->dc_state);
5320 return &dm_plane_state->base;
5323 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5324 struct drm_plane_state *state)
5326 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5328 if (dm_plane_state->dc_state)
5329 dc_plane_state_release(dm_plane_state->dc_state);
5331 drm_atomic_helper_plane_destroy_state(plane, state);
5334 static const struct drm_plane_funcs dm_plane_funcs = {
5335 .update_plane = drm_atomic_helper_update_plane,
5336 .disable_plane = drm_atomic_helper_disable_plane,
5337 .destroy = drm_primary_helper_destroy,
5338 .reset = dm_drm_plane_reset,
5339 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5340 .atomic_destroy_state = dm_drm_plane_destroy_state,
5343 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5344 struct drm_plane_state *new_state)
5346 struct amdgpu_framebuffer *afb;
5347 struct drm_gem_object *obj;
5348 struct amdgpu_device *adev;
5349 struct amdgpu_bo *rbo;
5350 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5351 struct list_head list;
5352 struct ttm_validate_buffer tv;
5353 struct ww_acquire_ctx ticket;
5354 uint64_t tiling_flags;
5357 bool force_disable_dcc = false;
5359 dm_plane_state_old = to_dm_plane_state(plane->state);
5360 dm_plane_state_new = to_dm_plane_state(new_state);
5362 if (!new_state->fb) {
5363 DRM_DEBUG_DRIVER("No FB bound\n");
5367 afb = to_amdgpu_framebuffer(new_state->fb);
5368 obj = new_state->fb->obj[0];
5369 rbo = gem_to_amdgpu_bo(obj);
5370 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5371 INIT_LIST_HEAD(&list);
5375 list_add(&tv.head, &list);
5377 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5379 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5383 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5384 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5386 domain = AMDGPU_GEM_DOMAIN_VRAM;
5388 r = amdgpu_bo_pin(rbo, domain);
5389 if (unlikely(r != 0)) {
5390 if (r != -ERESTARTSYS)
5391 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5392 ttm_eu_backoff_reservation(&ticket, &list);
5396 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5397 if (unlikely(r != 0)) {
5398 amdgpu_bo_unpin(rbo);
5399 ttm_eu_backoff_reservation(&ticket, &list);
5400 DRM_ERROR("%p bind failed\n", rbo);
5404 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5406 ttm_eu_backoff_reservation(&ticket, &list);
5408 afb->address = amdgpu_bo_gpu_offset(rbo);
5412 if (dm_plane_state_new->dc_state &&
5413 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5414 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5416 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5417 fill_plane_buffer_attributes(
5418 adev, afb, plane_state->format, plane_state->rotation,
5419 tiling_flags, &plane_state->tiling_info,
5420 &plane_state->plane_size, &plane_state->dcc,
5421 &plane_state->address,
5428 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5429 struct drm_plane_state *old_state)
5431 struct amdgpu_bo *rbo;
5437 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5438 r = amdgpu_bo_reserve(rbo, false);
5440 DRM_ERROR("failed to reserve rbo before unpin\n");
5444 amdgpu_bo_unpin(rbo);
5445 amdgpu_bo_unreserve(rbo);
5446 amdgpu_bo_unref(&rbo);
5449 static int dm_plane_atomic_check(struct drm_plane *plane,
5450 struct drm_plane_state *state)
5452 struct amdgpu_device *adev = plane->dev->dev_private;
5453 struct dc *dc = adev->dm.dc;
5454 struct dm_plane_state *dm_plane_state;
5455 struct dc_scaling_info scaling_info;
5458 dm_plane_state = to_dm_plane_state(state);
5460 if (!dm_plane_state->dc_state)
5463 ret = fill_dc_scaling_info(state, &scaling_info);
5467 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5473 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5474 struct drm_plane_state *new_plane_state)
5476 /* Only support async updates on cursor planes. */
5477 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5483 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5484 struct drm_plane_state *new_state)
5486 struct drm_plane_state *old_state =
5487 drm_atomic_get_old_plane_state(new_state->state, plane);
5489 swap(plane->state->fb, new_state->fb);
5491 plane->state->src_x = new_state->src_x;
5492 plane->state->src_y = new_state->src_y;
5493 plane->state->src_w = new_state->src_w;
5494 plane->state->src_h = new_state->src_h;
5495 plane->state->crtc_x = new_state->crtc_x;
5496 plane->state->crtc_y = new_state->crtc_y;
5497 plane->state->crtc_w = new_state->crtc_w;
5498 plane->state->crtc_h = new_state->crtc_h;
5500 handle_cursor_update(plane, old_state);
5503 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5504 .prepare_fb = dm_plane_helper_prepare_fb,
5505 .cleanup_fb = dm_plane_helper_cleanup_fb,
5506 .atomic_check = dm_plane_atomic_check,
5507 .atomic_async_check = dm_plane_atomic_async_check,
5508 .atomic_async_update = dm_plane_atomic_async_update
5512 * TODO: these are currently initialized to rgb formats only.
5513 * For future use cases we should either initialize them dynamically based on
5514 * plane capabilities, or initialize this array to all formats, so internal drm
5515 * check will succeed, and let DC implement proper check
5517 static const uint32_t rgb_formats[] = {
5518 DRM_FORMAT_XRGB8888,
5519 DRM_FORMAT_ARGB8888,
5520 DRM_FORMAT_RGBA8888,
5521 DRM_FORMAT_XRGB2101010,
5522 DRM_FORMAT_XBGR2101010,
5523 DRM_FORMAT_ARGB2101010,
5524 DRM_FORMAT_ABGR2101010,
5525 DRM_FORMAT_XBGR8888,
5526 DRM_FORMAT_ABGR8888,
5530 static const uint32_t overlay_formats[] = {
5531 DRM_FORMAT_XRGB8888,
5532 DRM_FORMAT_ARGB8888,
5533 DRM_FORMAT_RGBA8888,
5534 DRM_FORMAT_XBGR8888,
5535 DRM_FORMAT_ABGR8888,
5539 static const u32 cursor_formats[] = {
5543 static int get_plane_formats(const struct drm_plane *plane,
5544 const struct dc_plane_cap *plane_cap,
5545 uint32_t *formats, int max_formats)
5547 int i, num_formats = 0;
5550 * TODO: Query support for each group of formats directly from
5551 * DC plane caps. This will require adding more formats to the
5555 switch (plane->type) {
5556 case DRM_PLANE_TYPE_PRIMARY:
5557 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5558 if (num_formats >= max_formats)
5561 formats[num_formats++] = rgb_formats[i];
5564 if (plane_cap && plane_cap->pixel_format_support.nv12)
5565 formats[num_formats++] = DRM_FORMAT_NV12;
5566 if (plane_cap && plane_cap->pixel_format_support.p010)
5567 formats[num_formats++] = DRM_FORMAT_P010;
5570 case DRM_PLANE_TYPE_OVERLAY:
5571 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5572 if (num_formats >= max_formats)
5575 formats[num_formats++] = overlay_formats[i];
5579 case DRM_PLANE_TYPE_CURSOR:
5580 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5581 if (num_formats >= max_formats)
5584 formats[num_formats++] = cursor_formats[i];
5592 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5593 struct drm_plane *plane,
5594 unsigned long possible_crtcs,
5595 const struct dc_plane_cap *plane_cap)
5597 uint32_t formats[32];
5601 num_formats = get_plane_formats(plane, plane_cap, formats,
5602 ARRAY_SIZE(formats));
5604 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5605 &dm_plane_funcs, formats, num_formats,
5606 NULL, plane->type, NULL);
5610 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5611 plane_cap && plane_cap->per_pixel_alpha) {
5612 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5613 BIT(DRM_MODE_BLEND_PREMULTI);
5615 drm_plane_create_alpha_property(plane);
5616 drm_plane_create_blend_mode_property(plane, blend_caps);
5619 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5621 (plane_cap->pixel_format_support.nv12 ||
5622 plane_cap->pixel_format_support.p010)) {
5623 /* This only affects YUV formats. */
5624 drm_plane_create_color_properties(
5626 BIT(DRM_COLOR_YCBCR_BT601) |
5627 BIT(DRM_COLOR_YCBCR_BT709) |
5628 BIT(DRM_COLOR_YCBCR_BT2020),
5629 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5630 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5631 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5634 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5636 /* Create (reset) the plane state */
5637 if (plane->funcs->reset)
5638 plane->funcs->reset(plane);
5643 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5644 struct drm_plane *plane,
5645 uint32_t crtc_index)
5647 struct amdgpu_crtc *acrtc = NULL;
5648 struct drm_plane *cursor_plane;
5652 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5656 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5657 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5659 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5663 res = drm_crtc_init_with_planes(
5668 &amdgpu_dm_crtc_funcs, NULL);
5673 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5675 /* Create (reset) the plane state */
5676 if (acrtc->base.funcs->reset)
5677 acrtc->base.funcs->reset(&acrtc->base);
5679 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5680 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5682 acrtc->crtc_id = crtc_index;
5683 acrtc->base.enabled = false;
5684 acrtc->otg_inst = -1;
5686 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5687 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5688 true, MAX_COLOR_LUT_ENTRIES);
5689 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5695 kfree(cursor_plane);
5700 static int to_drm_connector_type(enum signal_type st)
5703 case SIGNAL_TYPE_HDMI_TYPE_A:
5704 return DRM_MODE_CONNECTOR_HDMIA;
5705 case SIGNAL_TYPE_EDP:
5706 return DRM_MODE_CONNECTOR_eDP;
5707 case SIGNAL_TYPE_LVDS:
5708 return DRM_MODE_CONNECTOR_LVDS;
5709 case SIGNAL_TYPE_RGB:
5710 return DRM_MODE_CONNECTOR_VGA;
5711 case SIGNAL_TYPE_DISPLAY_PORT:
5712 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5713 return DRM_MODE_CONNECTOR_DisplayPort;
5714 case SIGNAL_TYPE_DVI_DUAL_LINK:
5715 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5716 return DRM_MODE_CONNECTOR_DVID;
5717 case SIGNAL_TYPE_VIRTUAL:
5718 return DRM_MODE_CONNECTOR_VIRTUAL;
5721 return DRM_MODE_CONNECTOR_Unknown;
5725 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5727 struct drm_encoder *encoder;
5729 /* There is only one encoder per connector */
5730 drm_connector_for_each_possible_encoder(connector, encoder)
5736 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5738 struct drm_encoder *encoder;
5739 struct amdgpu_encoder *amdgpu_encoder;
5741 encoder = amdgpu_dm_connector_to_encoder(connector);
5743 if (encoder == NULL)
5746 amdgpu_encoder = to_amdgpu_encoder(encoder);
5748 amdgpu_encoder->native_mode.clock = 0;
5750 if (!list_empty(&connector->probed_modes)) {
5751 struct drm_display_mode *preferred_mode = NULL;
5753 list_for_each_entry(preferred_mode,
5754 &connector->probed_modes,
5756 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5757 amdgpu_encoder->native_mode = *preferred_mode;
5765 static struct drm_display_mode *
5766 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5768 int hdisplay, int vdisplay)
5770 struct drm_device *dev = encoder->dev;
5771 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5772 struct drm_display_mode *mode = NULL;
5773 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5775 mode = drm_mode_duplicate(dev, native_mode);
5780 mode->hdisplay = hdisplay;
5781 mode->vdisplay = vdisplay;
5782 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5783 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5789 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5790 struct drm_connector *connector)
5792 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5793 struct drm_display_mode *mode = NULL;
5794 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5795 struct amdgpu_dm_connector *amdgpu_dm_connector =
5796 to_amdgpu_dm_connector(connector);
5800 char name[DRM_DISPLAY_MODE_LEN];
5803 } common_modes[] = {
5804 { "640x480", 640, 480},
5805 { "800x600", 800, 600},
5806 { "1024x768", 1024, 768},
5807 { "1280x720", 1280, 720},
5808 { "1280x800", 1280, 800},
5809 {"1280x1024", 1280, 1024},
5810 { "1440x900", 1440, 900},
5811 {"1680x1050", 1680, 1050},
5812 {"1600x1200", 1600, 1200},
5813 {"1920x1080", 1920, 1080},
5814 {"1920x1200", 1920, 1200}
5817 n = ARRAY_SIZE(common_modes);
5819 for (i = 0; i < n; i++) {
5820 struct drm_display_mode *curmode = NULL;
5821 bool mode_existed = false;
5823 if (common_modes[i].w > native_mode->hdisplay ||
5824 common_modes[i].h > native_mode->vdisplay ||
5825 (common_modes[i].w == native_mode->hdisplay &&
5826 common_modes[i].h == native_mode->vdisplay))
5829 list_for_each_entry(curmode, &connector->probed_modes, head) {
5830 if (common_modes[i].w == curmode->hdisplay &&
5831 common_modes[i].h == curmode->vdisplay) {
5832 mode_existed = true;
5840 mode = amdgpu_dm_create_common_mode(encoder,
5841 common_modes[i].name, common_modes[i].w,
5843 drm_mode_probed_add(connector, mode);
5844 amdgpu_dm_connector->num_modes++;
5848 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5851 struct amdgpu_dm_connector *amdgpu_dm_connector =
5852 to_amdgpu_dm_connector(connector);
5855 /* empty probed_modes */
5856 INIT_LIST_HEAD(&connector->probed_modes);
5857 amdgpu_dm_connector->num_modes =
5858 drm_add_edid_modes(connector, edid);
5860 /* sorting the probed modes before calling function
5861 * amdgpu_dm_get_native_mode() since EDID can have
5862 * more than one preferred mode. The modes that are
5863 * later in the probed mode list could be of higher
5864 * and preferred resolution. For example, 3840x2160
5865 * resolution in base EDID preferred timing and 4096x2160
5866 * preferred resolution in DID extension block later.
5868 drm_mode_sort(&connector->probed_modes);
5869 amdgpu_dm_get_native_mode(connector);
5871 amdgpu_dm_connector->num_modes = 0;
5875 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5877 struct amdgpu_dm_connector *amdgpu_dm_connector =
5878 to_amdgpu_dm_connector(connector);
5879 struct drm_encoder *encoder;
5880 struct edid *edid = amdgpu_dm_connector->edid;
5882 encoder = amdgpu_dm_connector_to_encoder(connector);
5884 if (!edid || !drm_edid_is_valid(edid)) {
5885 amdgpu_dm_connector->num_modes =
5886 drm_add_modes_noedid(connector, 640, 480);
5888 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5889 amdgpu_dm_connector_add_common_modes(encoder, connector);
5891 amdgpu_dm_fbc_init(connector);
5893 return amdgpu_dm_connector->num_modes;
5896 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5897 struct amdgpu_dm_connector *aconnector,
5899 struct dc_link *link,
5902 struct amdgpu_device *adev = dm->ddev->dev_private;
5905 * Some of the properties below require access to state, like bpc.
5906 * Allocate some default initial connector state with our reset helper.
5908 if (aconnector->base.funcs->reset)
5909 aconnector->base.funcs->reset(&aconnector->base);
5911 aconnector->connector_id = link_index;
5912 aconnector->dc_link = link;
5913 aconnector->base.interlace_allowed = false;
5914 aconnector->base.doublescan_allowed = false;
5915 aconnector->base.stereo_allowed = false;
5916 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5917 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5918 aconnector->audio_inst = -1;
5919 mutex_init(&aconnector->hpd_lock);
5922 * configure support HPD hot plug connector_>polled default value is 0
5923 * which means HPD hot plug not supported
5925 switch (connector_type) {
5926 case DRM_MODE_CONNECTOR_HDMIA:
5927 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5928 aconnector->base.ycbcr_420_allowed =
5929 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5931 case DRM_MODE_CONNECTOR_DisplayPort:
5932 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5933 aconnector->base.ycbcr_420_allowed =
5934 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5936 case DRM_MODE_CONNECTOR_DVID:
5937 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5943 drm_object_attach_property(&aconnector->base.base,
5944 dm->ddev->mode_config.scaling_mode_property,
5945 DRM_MODE_SCALE_NONE);
5947 drm_object_attach_property(&aconnector->base.base,
5948 adev->mode_info.underscan_property,
5950 drm_object_attach_property(&aconnector->base.base,
5951 adev->mode_info.underscan_hborder_property,
5953 drm_object_attach_property(&aconnector->base.base,
5954 adev->mode_info.underscan_vborder_property,
5957 if (!aconnector->mst_port)
5958 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5960 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5961 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5962 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5964 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5965 dc_is_dmcu_initialized(adev->dm.dc)) {
5966 drm_object_attach_property(&aconnector->base.base,
5967 adev->mode_info.abm_level_property, 0);
5970 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5971 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5972 connector_type == DRM_MODE_CONNECTOR_eDP) {
5973 drm_object_attach_property(
5974 &aconnector->base.base,
5975 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5977 if (!aconnector->mst_port)
5978 drm_connector_attach_vrr_capable_property(&aconnector->base);
5980 #ifdef CONFIG_DRM_AMD_DC_HDCP
5981 if (adev->dm.hdcp_workqueue)
5982 drm_connector_attach_content_protection_property(&aconnector->base, true);
5987 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5988 struct i2c_msg *msgs, int num)
5990 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5991 struct ddc_service *ddc_service = i2c->ddc_service;
5992 struct i2c_command cmd;
5996 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6001 cmd.number_of_payloads = num;
6002 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6005 for (i = 0; i < num; i++) {
6006 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6007 cmd.payloads[i].address = msgs[i].addr;
6008 cmd.payloads[i].length = msgs[i].len;
6009 cmd.payloads[i].data = msgs[i].buf;
6013 ddc_service->ctx->dc,
6014 ddc_service->ddc_pin->hw_info.ddc_channel,
6018 kfree(cmd.payloads);
6022 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6024 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6027 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6028 .master_xfer = amdgpu_dm_i2c_xfer,
6029 .functionality = amdgpu_dm_i2c_func,
6032 static struct amdgpu_i2c_adapter *
6033 create_i2c(struct ddc_service *ddc_service,
6037 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6038 struct amdgpu_i2c_adapter *i2c;
6040 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6043 i2c->base.owner = THIS_MODULE;
6044 i2c->base.class = I2C_CLASS_DDC;
6045 i2c->base.dev.parent = &adev->pdev->dev;
6046 i2c->base.algo = &amdgpu_dm_i2c_algo;
6047 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6048 i2c_set_adapdata(&i2c->base, i2c);
6049 i2c->ddc_service = ddc_service;
6050 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6057 * Note: this function assumes that dc_link_detect() was called for the
6058 * dc_link which will be represented by this aconnector.
6060 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6061 struct amdgpu_dm_connector *aconnector,
6062 uint32_t link_index,
6063 struct amdgpu_encoder *aencoder)
6067 struct dc *dc = dm->dc;
6068 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6069 struct amdgpu_i2c_adapter *i2c;
6071 link->priv = aconnector;
6073 DRM_DEBUG_DRIVER("%s()\n", __func__);
6075 i2c = create_i2c(link->ddc, link->link_index, &res);
6077 DRM_ERROR("Failed to create i2c adapter data\n");
6081 aconnector->i2c = i2c;
6082 res = i2c_add_adapter(&i2c->base);
6085 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6089 connector_type = to_drm_connector_type(link->connector_signal);
6091 res = drm_connector_init_with_ddc(
6094 &amdgpu_dm_connector_funcs,
6099 DRM_ERROR("connector_init failed\n");
6100 aconnector->connector_id = -1;
6104 drm_connector_helper_add(
6106 &amdgpu_dm_connector_helper_funcs);
6108 amdgpu_dm_connector_init_helper(
6115 drm_connector_attach_encoder(
6116 &aconnector->base, &aencoder->base);
6118 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6119 || connector_type == DRM_MODE_CONNECTOR_eDP)
6120 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6125 aconnector->i2c = NULL;
6130 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6132 switch (adev->mode_info.num_crtc) {
6149 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6150 struct amdgpu_encoder *aencoder,
6151 uint32_t link_index)
6153 struct amdgpu_device *adev = dev->dev_private;
6155 int res = drm_encoder_init(dev,
6157 &amdgpu_dm_encoder_funcs,
6158 DRM_MODE_ENCODER_TMDS,
6161 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6164 aencoder->encoder_id = link_index;
6166 aencoder->encoder_id = -1;
6168 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6173 static void manage_dm_interrupts(struct amdgpu_device *adev,
6174 struct amdgpu_crtc *acrtc,
6178 * this is not correct translation but will work as soon as VBLANK
6179 * constant is the same as PFLIP
6182 amdgpu_display_crtc_idx_to_irq_type(
6187 drm_crtc_vblank_on(&acrtc->base);
6190 &adev->pageflip_irq,
6196 &adev->pageflip_irq,
6198 drm_crtc_vblank_off(&acrtc->base);
6203 is_scaling_state_different(const struct dm_connector_state *dm_state,
6204 const struct dm_connector_state *old_dm_state)
6206 if (dm_state->scaling != old_dm_state->scaling)
6208 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6209 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6211 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6212 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6214 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6215 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6220 #ifdef CONFIG_DRM_AMD_DC_HDCP
6221 static bool is_content_protection_different(struct drm_connector_state *state,
6222 const struct drm_connector_state *old_state,
6223 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6225 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6227 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6228 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6229 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6233 /* CP is being re enabled, ignore this */
6234 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6235 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6236 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6240 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6241 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6242 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6243 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6245 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6246 * hot-plug, headless s3, dpms
6248 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6249 aconnector->dc_sink != NULL)
6252 if (old_state->content_protection == state->content_protection)
6255 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6262 static void remove_stream(struct amdgpu_device *adev,
6263 struct amdgpu_crtc *acrtc,
6264 struct dc_stream_state *stream)
6266 /* this is the update mode case */
6268 acrtc->otg_inst = -1;
6269 acrtc->enabled = false;
6272 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6273 struct dc_cursor_position *position)
6275 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6277 int xorigin = 0, yorigin = 0;
6279 position->enable = false;
6283 if (!crtc || !plane->state->fb)
6286 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6287 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6288 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6290 plane->state->crtc_w,
6291 plane->state->crtc_h);
6295 x = plane->state->crtc_x;
6296 y = plane->state->crtc_y;
6298 if (x <= -amdgpu_crtc->max_cursor_width ||
6299 y <= -amdgpu_crtc->max_cursor_height)
6303 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6307 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6310 position->enable = true;
6311 position->translate_by_source = true;
6314 position->x_hotspot = xorigin;
6315 position->y_hotspot = yorigin;
6320 static void handle_cursor_update(struct drm_plane *plane,
6321 struct drm_plane_state *old_plane_state)
6323 struct amdgpu_device *adev = plane->dev->dev_private;
6324 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6325 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6326 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6327 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6328 uint64_t address = afb ? afb->address : 0;
6329 struct dc_cursor_position position;
6330 struct dc_cursor_attributes attributes;
6333 if (!plane->state->fb && !old_plane_state->fb)
6336 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6338 amdgpu_crtc->crtc_id,
6339 plane->state->crtc_w,
6340 plane->state->crtc_h);
6342 ret = get_cursor_position(plane, crtc, &position);
6346 if (!position.enable) {
6347 /* turn off cursor */
6348 if (crtc_state && crtc_state->stream) {
6349 mutex_lock(&adev->dm.dc_lock);
6350 dc_stream_set_cursor_position(crtc_state->stream,
6352 mutex_unlock(&adev->dm.dc_lock);
6357 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6358 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6360 memset(&attributes, 0, sizeof(attributes));
6361 attributes.address.high_part = upper_32_bits(address);
6362 attributes.address.low_part = lower_32_bits(address);
6363 attributes.width = plane->state->crtc_w;
6364 attributes.height = plane->state->crtc_h;
6365 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6366 attributes.rotation_angle = 0;
6367 attributes.attribute_flags.value = 0;
6369 attributes.pitch = attributes.width;
6371 if (crtc_state->stream) {
6372 mutex_lock(&adev->dm.dc_lock);
6373 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6375 DRM_ERROR("DC failed to set cursor attributes\n");
6377 if (!dc_stream_set_cursor_position(crtc_state->stream,
6379 DRM_ERROR("DC failed to set cursor position\n");
6380 mutex_unlock(&adev->dm.dc_lock);
6384 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6387 assert_spin_locked(&acrtc->base.dev->event_lock);
6388 WARN_ON(acrtc->event);
6390 acrtc->event = acrtc->base.state->event;
6392 /* Set the flip status */
6393 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6395 /* Mark this event as consumed */
6396 acrtc->base.state->event = NULL;
6398 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6402 static void update_freesync_state_on_stream(
6403 struct amdgpu_display_manager *dm,
6404 struct dm_crtc_state *new_crtc_state,
6405 struct dc_stream_state *new_stream,
6406 struct dc_plane_state *surface,
6407 u32 flip_timestamp_in_us)
6409 struct mod_vrr_params vrr_params;
6410 struct dc_info_packet vrr_infopacket = {0};
6411 struct amdgpu_device *adev = dm->adev;
6412 unsigned long flags;
6418 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6419 * For now it's sufficient to just guard against these conditions.
6422 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6425 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6426 vrr_params = new_crtc_state->vrr_params;
6429 mod_freesync_handle_preflip(
6430 dm->freesync_module,
6433 flip_timestamp_in_us,
6436 if (adev->family < AMDGPU_FAMILY_AI &&
6437 amdgpu_dm_vrr_active(new_crtc_state)) {
6438 mod_freesync_handle_v_update(dm->freesync_module,
6439 new_stream, &vrr_params);
6441 /* Need to call this before the frame ends. */
6442 dc_stream_adjust_vmin_vmax(dm->dc,
6443 new_crtc_state->stream,
6444 &vrr_params.adjust);
6448 mod_freesync_build_vrr_infopacket(
6449 dm->freesync_module,
6453 TRANSFER_FUNC_UNKNOWN,
6456 new_crtc_state->freesync_timing_changed |=
6457 (memcmp(&new_crtc_state->vrr_params.adjust,
6459 sizeof(vrr_params.adjust)) != 0);
6461 new_crtc_state->freesync_vrr_info_changed |=
6462 (memcmp(&new_crtc_state->vrr_infopacket,
6464 sizeof(vrr_infopacket)) != 0);
6466 new_crtc_state->vrr_params = vrr_params;
6467 new_crtc_state->vrr_infopacket = vrr_infopacket;
6469 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6470 new_stream->vrr_infopacket = vrr_infopacket;
6472 if (new_crtc_state->freesync_vrr_info_changed)
6473 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6474 new_crtc_state->base.crtc->base.id,
6475 (int)new_crtc_state->base.vrr_enabled,
6476 (int)vrr_params.state);
6478 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6481 static void pre_update_freesync_state_on_stream(
6482 struct amdgpu_display_manager *dm,
6483 struct dm_crtc_state *new_crtc_state)
6485 struct dc_stream_state *new_stream = new_crtc_state->stream;
6486 struct mod_vrr_params vrr_params;
6487 struct mod_freesync_config config = new_crtc_state->freesync_config;
6488 struct amdgpu_device *adev = dm->adev;
6489 unsigned long flags;
6495 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6496 * For now it's sufficient to just guard against these conditions.
6498 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6501 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6502 vrr_params = new_crtc_state->vrr_params;
6504 if (new_crtc_state->vrr_supported &&
6505 config.min_refresh_in_uhz &&
6506 config.max_refresh_in_uhz) {
6507 config.state = new_crtc_state->base.vrr_enabled ?
6508 VRR_STATE_ACTIVE_VARIABLE :
6511 config.state = VRR_STATE_UNSUPPORTED;
6514 mod_freesync_build_vrr_params(dm->freesync_module,
6516 &config, &vrr_params);
6518 new_crtc_state->freesync_timing_changed |=
6519 (memcmp(&new_crtc_state->vrr_params.adjust,
6521 sizeof(vrr_params.adjust)) != 0);
6523 new_crtc_state->vrr_params = vrr_params;
6524 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6527 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6528 struct dm_crtc_state *new_state)
6530 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6531 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6533 if (!old_vrr_active && new_vrr_active) {
6534 /* Transition VRR inactive -> active:
6535 * While VRR is active, we must not disable vblank irq, as a
6536 * reenable after disable would compute bogus vblank/pflip
6537 * timestamps if it likely happened inside display front-porch.
6539 * We also need vupdate irq for the actual core vblank handling
6542 dm_set_vupdate_irq(new_state->base.crtc, true);
6543 drm_crtc_vblank_get(new_state->base.crtc);
6544 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6545 __func__, new_state->base.crtc->base.id);
6546 } else if (old_vrr_active && !new_vrr_active) {
6547 /* Transition VRR active -> inactive:
6548 * Allow vblank irq disable again for fixed refresh rate.
6550 dm_set_vupdate_irq(new_state->base.crtc, false);
6551 drm_crtc_vblank_put(new_state->base.crtc);
6552 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6553 __func__, new_state->base.crtc->base.id);
6557 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6559 struct drm_plane *plane;
6560 struct drm_plane_state *old_plane_state, *new_plane_state;
6564 * TODO: Make this per-stream so we don't issue redundant updates for
6565 * commits with multiple streams.
6567 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6569 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6570 handle_cursor_update(plane, old_plane_state);
6573 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6574 struct dc_state *dc_state,
6575 struct drm_device *dev,
6576 struct amdgpu_display_manager *dm,
6577 struct drm_crtc *pcrtc,
6578 bool wait_for_vblank)
6581 uint64_t timestamp_ns;
6582 struct drm_plane *plane;
6583 struct drm_plane_state *old_plane_state, *new_plane_state;
6584 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6585 struct drm_crtc_state *new_pcrtc_state =
6586 drm_atomic_get_new_crtc_state(state, pcrtc);
6587 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6588 struct dm_crtc_state *dm_old_crtc_state =
6589 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6590 int planes_count = 0, vpos, hpos;
6592 unsigned long flags;
6593 struct amdgpu_bo *abo;
6594 uint64_t tiling_flags;
6595 uint32_t target_vblank, last_flip_vblank;
6596 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6597 bool pflip_present = false;
6599 struct dc_surface_update surface_updates[MAX_SURFACES];
6600 struct dc_plane_info plane_infos[MAX_SURFACES];
6601 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6602 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6603 struct dc_stream_update stream_update;
6606 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6609 dm_error("Failed to allocate update bundle\n");
6614 * Disable the cursor first if we're disabling all the planes.
6615 * It'll remain on the screen after the planes are re-enabled
6618 if (acrtc_state->active_planes == 0)
6619 amdgpu_dm_commit_cursors(state);
6621 /* update planes when needed */
6622 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6623 struct drm_crtc *crtc = new_plane_state->crtc;
6624 struct drm_crtc_state *new_crtc_state;
6625 struct drm_framebuffer *fb = new_plane_state->fb;
6626 bool plane_needs_flip;
6627 struct dc_plane_state *dc_plane;
6628 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6630 /* Cursor plane is handled after stream updates */
6631 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6634 if (!fb || !crtc || pcrtc != crtc)
6637 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6638 if (!new_crtc_state->active)
6641 dc_plane = dm_new_plane_state->dc_state;
6643 bundle->surface_updates[planes_count].surface = dc_plane;
6644 if (new_pcrtc_state->color_mgmt_changed) {
6645 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6646 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6647 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6650 fill_dc_scaling_info(new_plane_state,
6651 &bundle->scaling_infos[planes_count]);
6653 bundle->surface_updates[planes_count].scaling_info =
6654 &bundle->scaling_infos[planes_count];
6656 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6658 pflip_present = pflip_present || plane_needs_flip;
6660 if (!plane_needs_flip) {
6665 abo = gem_to_amdgpu_bo(fb->obj[0]);
6668 * Wait for all fences on this FB. Do limited wait to avoid
6669 * deadlock during GPU reset when this fence will not signal
6670 * but we hold reservation lock for the BO.
6672 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6674 msecs_to_jiffies(5000));
6675 if (unlikely(r <= 0))
6676 DRM_ERROR("Waiting for fences timed out!");
6679 * TODO This might fail and hence better not used, wait
6680 * explicitly on fences instead
6681 * and in general should be called for
6682 * blocking commit to as per framework helpers
6684 r = amdgpu_bo_reserve(abo, true);
6685 if (unlikely(r != 0))
6686 DRM_ERROR("failed to reserve buffer before flip\n");
6688 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6690 amdgpu_bo_unreserve(abo);
6692 fill_dc_plane_info_and_addr(
6693 dm->adev, new_plane_state, tiling_flags,
6694 &bundle->plane_infos[planes_count],
6695 &bundle->flip_addrs[planes_count].address,
6698 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6699 new_plane_state->plane->index,
6700 bundle->plane_infos[planes_count].dcc.enable);
6702 bundle->surface_updates[planes_count].plane_info =
6703 &bundle->plane_infos[planes_count];
6706 * Only allow immediate flips for fast updates that don't
6707 * change FB pitch, DCC state, rotation or mirroing.
6709 bundle->flip_addrs[planes_count].flip_immediate =
6710 crtc->state->async_flip &&
6711 acrtc_state->update_type == UPDATE_TYPE_FAST;
6713 timestamp_ns = ktime_get_ns();
6714 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6715 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6716 bundle->surface_updates[planes_count].surface = dc_plane;
6718 if (!bundle->surface_updates[planes_count].surface) {
6719 DRM_ERROR("No surface for CRTC: id=%d\n",
6720 acrtc_attach->crtc_id);
6724 if (plane == pcrtc->primary)
6725 update_freesync_state_on_stream(
6728 acrtc_state->stream,
6730 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6732 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6734 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6735 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6741 if (pflip_present) {
6743 /* Use old throttling in non-vrr fixed refresh rate mode
6744 * to keep flip scheduling based on target vblank counts
6745 * working in a backwards compatible way, e.g., for
6746 * clients using the GLX_OML_sync_control extension or
6747 * DRI3/Present extension with defined target_msc.
6749 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6752 /* For variable refresh rate mode only:
6753 * Get vblank of last completed flip to avoid > 1 vrr
6754 * flips per video frame by use of throttling, but allow
6755 * flip programming anywhere in the possibly large
6756 * variable vrr vblank interval for fine-grained flip
6757 * timing control and more opportunity to avoid stutter
6758 * on late submission of flips.
6760 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6761 last_flip_vblank = acrtc_attach->last_flip_vblank;
6762 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6765 target_vblank = last_flip_vblank + wait_for_vblank;
6768 * Wait until we're out of the vertical blank period before the one
6769 * targeted by the flip
6771 while ((acrtc_attach->enabled &&
6772 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6773 0, &vpos, &hpos, NULL,
6774 NULL, &pcrtc->hwmode)
6775 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6776 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6777 (int)(target_vblank -
6778 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6779 usleep_range(1000, 1100);
6782 if (acrtc_attach->base.state->event) {
6783 drm_crtc_vblank_get(pcrtc);
6785 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6787 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6788 prepare_flip_isr(acrtc_attach);
6790 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6793 if (acrtc_state->stream) {
6794 if (acrtc_state->freesync_vrr_info_changed)
6795 bundle->stream_update.vrr_infopacket =
6796 &acrtc_state->stream->vrr_infopacket;
6800 /* Update the planes if changed or disable if we don't have any. */
6801 if ((planes_count || acrtc_state->active_planes == 0) &&
6802 acrtc_state->stream) {
6803 bundle->stream_update.stream = acrtc_state->stream;
6804 if (new_pcrtc_state->mode_changed) {
6805 bundle->stream_update.src = acrtc_state->stream->src;
6806 bundle->stream_update.dst = acrtc_state->stream->dst;
6809 if (new_pcrtc_state->color_mgmt_changed) {
6811 * TODO: This isn't fully correct since we've actually
6812 * already modified the stream in place.
6814 bundle->stream_update.gamut_remap =
6815 &acrtc_state->stream->gamut_remap_matrix;
6816 bundle->stream_update.output_csc_transform =
6817 &acrtc_state->stream->csc_color_matrix;
6818 bundle->stream_update.out_transfer_func =
6819 acrtc_state->stream->out_transfer_func;
6822 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6823 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6824 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6827 * If FreeSync state on the stream has changed then we need to
6828 * re-adjust the min/max bounds now that DC doesn't handle this
6829 * as part of commit.
6831 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6832 amdgpu_dm_vrr_active(acrtc_state)) {
6833 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6834 dc_stream_adjust_vmin_vmax(
6835 dm->dc, acrtc_state->stream,
6836 &acrtc_state->vrr_params.adjust);
6837 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6839 mutex_lock(&dm->dc_lock);
6840 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6841 acrtc_state->stream->link->psr_settings.psr_allow_active)
6842 amdgpu_dm_psr_disable(acrtc_state->stream);
6844 dc_commit_updates_for_stream(dm->dc,
6845 bundle->surface_updates,
6847 acrtc_state->stream,
6848 &bundle->stream_update,
6851 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6852 acrtc_state->stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED &&
6853 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
6854 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6855 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6856 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
6857 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
6858 amdgpu_dm_psr_enable(acrtc_state->stream);
6861 mutex_unlock(&dm->dc_lock);
6865 * Update cursor state *after* programming all the planes.
6866 * This avoids redundant programming in the case where we're going
6867 * to be disabling a single plane - those pipes are being disabled.
6869 if (acrtc_state->active_planes)
6870 amdgpu_dm_commit_cursors(state);
6876 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6877 struct drm_atomic_state *state)
6879 struct amdgpu_device *adev = dev->dev_private;
6880 struct amdgpu_dm_connector *aconnector;
6881 struct drm_connector *connector;
6882 struct drm_connector_state *old_con_state, *new_con_state;
6883 struct drm_crtc_state *new_crtc_state;
6884 struct dm_crtc_state *new_dm_crtc_state;
6885 const struct dc_stream_status *status;
6888 /* Notify device removals. */
6889 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6890 if (old_con_state->crtc != new_con_state->crtc) {
6891 /* CRTC changes require notification. */
6895 if (!new_con_state->crtc)
6898 new_crtc_state = drm_atomic_get_new_crtc_state(
6899 state, new_con_state->crtc);
6901 if (!new_crtc_state)
6904 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6908 aconnector = to_amdgpu_dm_connector(connector);
6910 mutex_lock(&adev->dm.audio_lock);
6911 inst = aconnector->audio_inst;
6912 aconnector->audio_inst = -1;
6913 mutex_unlock(&adev->dm.audio_lock);
6915 amdgpu_dm_audio_eld_notify(adev, inst);
6918 /* Notify audio device additions. */
6919 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6920 if (!new_con_state->crtc)
6923 new_crtc_state = drm_atomic_get_new_crtc_state(
6924 state, new_con_state->crtc);
6926 if (!new_crtc_state)
6929 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6932 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6933 if (!new_dm_crtc_state->stream)
6936 status = dc_stream_get_status(new_dm_crtc_state->stream);
6940 aconnector = to_amdgpu_dm_connector(connector);
6942 mutex_lock(&adev->dm.audio_lock);
6943 inst = status->audio_inst;
6944 aconnector->audio_inst = inst;
6945 mutex_unlock(&adev->dm.audio_lock);
6947 amdgpu_dm_audio_eld_notify(adev, inst);
6952 * Enable interrupts on CRTCs that are newly active, undergone
6953 * a modeset, or have active planes again.
6955 * Done in two passes, based on the for_modeset flag:
6956 * Pass 1: For CRTCs going through modeset
6957 * Pass 2: For CRTCs going from 0 to n active planes
6959 * Interrupts can only be enabled after the planes are programmed,
6960 * so this requires a two-pass approach since we don't want to
6961 * just defer the interrupts until after commit planes every time.
6963 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6964 struct drm_atomic_state *state,
6967 struct amdgpu_device *adev = dev->dev_private;
6968 struct drm_crtc *crtc;
6969 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6971 #ifdef CONFIG_DEBUG_FS
6972 enum amdgpu_dm_pipe_crc_source source;
6975 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6976 new_crtc_state, i) {
6977 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6978 struct dm_crtc_state *dm_new_crtc_state =
6979 to_dm_crtc_state(new_crtc_state);
6980 struct dm_crtc_state *dm_old_crtc_state =
6981 to_dm_crtc_state(old_crtc_state);
6982 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6985 run_pass = (for_modeset && modeset) ||
6986 (!for_modeset && !modeset &&
6987 !dm_old_crtc_state->interrupts_enabled);
6992 if (!dm_new_crtc_state->interrupts_enabled)
6995 manage_dm_interrupts(adev, acrtc, true);
6997 #ifdef CONFIG_DEBUG_FS
6998 /* The stream has changed so CRC capture needs to re-enabled. */
6999 source = dm_new_crtc_state->crc_src;
7000 if (amdgpu_dm_is_valid_crc_source(source)) {
7001 amdgpu_dm_crtc_configure_crc_source(
7002 crtc, dm_new_crtc_state,
7003 dm_new_crtc_state->crc_src);
7010 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7011 * @crtc_state: the DRM CRTC state
7012 * @stream_state: the DC stream state.
7014 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7015 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7017 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7018 struct dc_stream_state *stream_state)
7020 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7023 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7024 struct drm_atomic_state *state,
7027 struct drm_crtc *crtc;
7028 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7029 struct amdgpu_device *adev = dev->dev_private;
7033 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7034 * a modeset, being disabled, or have no active planes.
7036 * It's done in atomic commit rather than commit tail for now since
7037 * some of these interrupt handlers access the current CRTC state and
7038 * potentially the stream pointer itself.
7040 * Since the atomic state is swapped within atomic commit and not within
7041 * commit tail this would leave to new state (that hasn't been committed yet)
7042 * being accesssed from within the handlers.
7044 * TODO: Fix this so we can do this in commit tail and not have to block
7047 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7048 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7049 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7050 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7052 if (dm_old_crtc_state->interrupts_enabled &&
7053 (!dm_new_crtc_state->interrupts_enabled ||
7054 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7055 manage_dm_interrupts(adev, acrtc, false);
7058 * Add check here for SoC's that support hardware cursor plane, to
7059 * unset legacy_cursor_update
7062 return drm_atomic_helper_commit(dev, state, nonblock);
7064 /*TODO Handle EINTR, reenable IRQ*/
7068 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7069 * @state: The atomic state to commit
7071 * This will tell DC to commit the constructed DC state from atomic_check,
7072 * programming the hardware. Any failures here implies a hardware failure, since
7073 * atomic check should have filtered anything non-kosher.
7075 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7077 struct drm_device *dev = state->dev;
7078 struct amdgpu_device *adev = dev->dev_private;
7079 struct amdgpu_display_manager *dm = &adev->dm;
7080 struct dm_atomic_state *dm_state;
7081 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7083 struct drm_crtc *crtc;
7084 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7085 unsigned long flags;
7086 bool wait_for_vblank = true;
7087 struct drm_connector *connector;
7088 struct drm_connector_state *old_con_state, *new_con_state;
7089 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7090 int crtc_disable_count = 0;
7092 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7094 dm_state = dm_atomic_get_new_state(state);
7095 if (dm_state && dm_state->context) {
7096 dc_state = dm_state->context;
7098 /* No state changes, retain current state. */
7099 dc_state_temp = dc_create_state(dm->dc);
7100 ASSERT(dc_state_temp);
7101 dc_state = dc_state_temp;
7102 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7105 /* update changed items */
7106 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7107 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7109 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7110 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7113 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7114 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7115 "connectors_changed:%d\n",
7117 new_crtc_state->enable,
7118 new_crtc_state->active,
7119 new_crtc_state->planes_changed,
7120 new_crtc_state->mode_changed,
7121 new_crtc_state->active_changed,
7122 new_crtc_state->connectors_changed);
7124 /* Copy all transient state flags into dc state */
7125 if (dm_new_crtc_state->stream) {
7126 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7127 dm_new_crtc_state->stream);
7130 /* handles headless hotplug case, updating new_state and
7131 * aconnector as needed
7134 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7136 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7138 if (!dm_new_crtc_state->stream) {
7140 * this could happen because of issues with
7141 * userspace notifications delivery.
7142 * In this case userspace tries to set mode on
7143 * display which is disconnected in fact.
7144 * dc_sink is NULL in this case on aconnector.
7145 * We expect reset mode will come soon.
7147 * This can also happen when unplug is done
7148 * during resume sequence ended
7150 * In this case, we want to pretend we still
7151 * have a sink to keep the pipe running so that
7152 * hw state is consistent with the sw state
7154 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7155 __func__, acrtc->base.base.id);
7159 if (dm_old_crtc_state->stream)
7160 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7162 pm_runtime_get_noresume(dev->dev);
7164 acrtc->enabled = true;
7165 acrtc->hw_mode = new_crtc_state->mode;
7166 crtc->hwmode = new_crtc_state->mode;
7167 } else if (modereset_required(new_crtc_state)) {
7168 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7169 /* i.e. reset mode */
7170 if (dm_old_crtc_state->stream) {
7171 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7172 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7174 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7177 } /* for_each_crtc_in_state() */
7180 dm_enable_per_frame_crtc_master_sync(dc_state);
7181 mutex_lock(&dm->dc_lock);
7182 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7183 mutex_unlock(&dm->dc_lock);
7186 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7187 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7189 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7191 if (dm_new_crtc_state->stream != NULL) {
7192 const struct dc_stream_status *status =
7193 dc_stream_get_status(dm_new_crtc_state->stream);
7196 status = dc_stream_get_status_from_state(dc_state,
7197 dm_new_crtc_state->stream);
7200 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7202 acrtc->otg_inst = status->primary_otg_inst;
7205 #ifdef CONFIG_DRM_AMD_DC_HDCP
7206 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7207 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7208 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7209 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7211 new_crtc_state = NULL;
7214 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7216 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7218 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7219 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7220 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7221 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7225 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7226 hdcp_update_display(
7227 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7228 new_con_state->hdcp_content_type,
7229 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7234 /* Handle connector state changes */
7235 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7236 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7237 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7238 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7239 struct dc_surface_update dummy_updates[MAX_SURFACES];
7240 struct dc_stream_update stream_update;
7241 struct dc_info_packet hdr_packet;
7242 struct dc_stream_status *status = NULL;
7243 bool abm_changed, hdr_changed, scaling_changed;
7245 memset(&dummy_updates, 0, sizeof(dummy_updates));
7246 memset(&stream_update, 0, sizeof(stream_update));
7249 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7250 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7253 /* Skip any modesets/resets */
7254 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7257 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7258 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7260 scaling_changed = is_scaling_state_different(dm_new_con_state,
7263 abm_changed = dm_new_crtc_state->abm_level !=
7264 dm_old_crtc_state->abm_level;
7267 is_hdr_metadata_different(old_con_state, new_con_state);
7269 if (!scaling_changed && !abm_changed && !hdr_changed)
7272 stream_update.stream = dm_new_crtc_state->stream;
7273 if (scaling_changed) {
7274 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7275 dm_new_con_state, dm_new_crtc_state->stream);
7277 stream_update.src = dm_new_crtc_state->stream->src;
7278 stream_update.dst = dm_new_crtc_state->stream->dst;
7282 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7284 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7288 fill_hdr_info_packet(new_con_state, &hdr_packet);
7289 stream_update.hdr_static_metadata = &hdr_packet;
7292 status = dc_stream_get_status(dm_new_crtc_state->stream);
7294 WARN_ON(!status->plane_count);
7297 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7298 * Here we create an empty update on each plane.
7299 * To fix this, DC should permit updating only stream properties.
7301 for (j = 0; j < status->plane_count; j++)
7302 dummy_updates[j].surface = status->plane_states[0];
7305 mutex_lock(&dm->dc_lock);
7306 dc_commit_updates_for_stream(dm->dc,
7308 status->plane_count,
7309 dm_new_crtc_state->stream,
7312 mutex_unlock(&dm->dc_lock);
7315 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7316 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7317 new_crtc_state, i) {
7318 if (old_crtc_state->active && !new_crtc_state->active)
7319 crtc_disable_count++;
7321 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7322 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7324 /* Update freesync active state. */
7325 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7327 /* Handle vrr on->off / off->on transitions */
7328 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7332 /* Enable interrupts for CRTCs going through a modeset. */
7333 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7335 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7336 if (new_crtc_state->async_flip)
7337 wait_for_vblank = false;
7339 /* update planes when needed per crtc*/
7340 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7341 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7343 if (dm_new_crtc_state->stream)
7344 amdgpu_dm_commit_planes(state, dc_state, dev,
7345 dm, crtc, wait_for_vblank);
7348 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7349 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7351 /* Update audio instances for each connector. */
7352 amdgpu_dm_commit_audio(dev, state);
7355 * send vblank event on all events not handled in flip and
7356 * mark consumed event for drm_atomic_helper_commit_hw_done
7358 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7359 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7361 if (new_crtc_state->event)
7362 drm_send_event_locked(dev, &new_crtc_state->event->base);
7364 new_crtc_state->event = NULL;
7366 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7368 /* Signal HW programming completion */
7369 drm_atomic_helper_commit_hw_done(state);
7371 if (wait_for_vblank)
7372 drm_atomic_helper_wait_for_flip_done(dev, state);
7374 drm_atomic_helper_cleanup_planes(dev, state);
7377 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7378 * so we can put the GPU into runtime suspend if we're not driving any
7381 for (i = 0; i < crtc_disable_count; i++)
7382 pm_runtime_put_autosuspend(dev->dev);
7383 pm_runtime_mark_last_busy(dev->dev);
7386 dc_release_state(dc_state_temp);
7390 static int dm_force_atomic_commit(struct drm_connector *connector)
7393 struct drm_device *ddev = connector->dev;
7394 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7395 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7396 struct drm_plane *plane = disconnected_acrtc->base.primary;
7397 struct drm_connector_state *conn_state;
7398 struct drm_crtc_state *crtc_state;
7399 struct drm_plane_state *plane_state;
7404 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7406 /* Construct an atomic state to restore previous display setting */
7409 * Attach connectors to drm_atomic_state
7411 conn_state = drm_atomic_get_connector_state(state, connector);
7413 ret = PTR_ERR_OR_ZERO(conn_state);
7417 /* Attach crtc to drm_atomic_state*/
7418 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7420 ret = PTR_ERR_OR_ZERO(crtc_state);
7424 /* force a restore */
7425 crtc_state->mode_changed = true;
7427 /* Attach plane to drm_atomic_state */
7428 plane_state = drm_atomic_get_plane_state(state, plane);
7430 ret = PTR_ERR_OR_ZERO(plane_state);
7435 /* Call commit internally with the state we just constructed */
7436 ret = drm_atomic_commit(state);
7441 DRM_ERROR("Restoring old state failed with %i\n", ret);
7442 drm_atomic_state_put(state);
7448 * This function handles all cases when set mode does not come upon hotplug.
7449 * This includes when a display is unplugged then plugged back into the
7450 * same port and when running without usermode desktop manager supprot
7452 void dm_restore_drm_connector_state(struct drm_device *dev,
7453 struct drm_connector *connector)
7455 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7456 struct amdgpu_crtc *disconnected_acrtc;
7457 struct dm_crtc_state *acrtc_state;
7459 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7462 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7463 if (!disconnected_acrtc)
7466 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7467 if (!acrtc_state->stream)
7471 * If the previous sink is not released and different from the current,
7472 * we deduce we are in a state where we can not rely on usermode call
7473 * to turn on the display, so we do it here
7475 if (acrtc_state->stream->sink != aconnector->dc_sink)
7476 dm_force_atomic_commit(&aconnector->base);
7480 * Grabs all modesetting locks to serialize against any blocking commits,
7481 * Waits for completion of all non blocking commits.
7483 static int do_aquire_global_lock(struct drm_device *dev,
7484 struct drm_atomic_state *state)
7486 struct drm_crtc *crtc;
7487 struct drm_crtc_commit *commit;
7491 * Adding all modeset locks to aquire_ctx will
7492 * ensure that when the framework release it the
7493 * extra locks we are locking here will get released to
7495 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7499 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7500 spin_lock(&crtc->commit_lock);
7501 commit = list_first_entry_or_null(&crtc->commit_list,
7502 struct drm_crtc_commit, commit_entry);
7504 drm_crtc_commit_get(commit);
7505 spin_unlock(&crtc->commit_lock);
7511 * Make sure all pending HW programming completed and
7514 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7517 ret = wait_for_completion_interruptible_timeout(
7518 &commit->flip_done, 10*HZ);
7521 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7522 "timed out\n", crtc->base.id, crtc->name);
7524 drm_crtc_commit_put(commit);
7527 return ret < 0 ? ret : 0;
7530 static void get_freesync_config_for_crtc(
7531 struct dm_crtc_state *new_crtc_state,
7532 struct dm_connector_state *new_con_state)
7534 struct mod_freesync_config config = {0};
7535 struct amdgpu_dm_connector *aconnector =
7536 to_amdgpu_dm_connector(new_con_state->base.connector);
7537 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7538 int vrefresh = drm_mode_vrefresh(mode);
7540 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7541 vrefresh >= aconnector->min_vfreq &&
7542 vrefresh <= aconnector->max_vfreq;
7544 if (new_crtc_state->vrr_supported) {
7545 new_crtc_state->stream->ignore_msa_timing_param = true;
7546 config.state = new_crtc_state->base.vrr_enabled ?
7547 VRR_STATE_ACTIVE_VARIABLE :
7549 config.min_refresh_in_uhz =
7550 aconnector->min_vfreq * 1000000;
7551 config.max_refresh_in_uhz =
7552 aconnector->max_vfreq * 1000000;
7553 config.vsif_supported = true;
7557 new_crtc_state->freesync_config = config;
7560 static void reset_freesync_config_for_crtc(
7561 struct dm_crtc_state *new_crtc_state)
7563 new_crtc_state->vrr_supported = false;
7565 memset(&new_crtc_state->vrr_params, 0,
7566 sizeof(new_crtc_state->vrr_params));
7567 memset(&new_crtc_state->vrr_infopacket, 0,
7568 sizeof(new_crtc_state->vrr_infopacket));
7571 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7572 struct drm_atomic_state *state,
7573 struct drm_crtc *crtc,
7574 struct drm_crtc_state *old_crtc_state,
7575 struct drm_crtc_state *new_crtc_state,
7577 bool *lock_and_validation_needed)
7579 struct dm_atomic_state *dm_state = NULL;
7580 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7581 struct dc_stream_state *new_stream;
7585 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7586 * update changed items
7588 struct amdgpu_crtc *acrtc = NULL;
7589 struct amdgpu_dm_connector *aconnector = NULL;
7590 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7591 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7595 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7596 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7597 acrtc = to_amdgpu_crtc(crtc);
7598 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7600 /* TODO This hack should go away */
7601 if (aconnector && enable) {
7602 /* Make sure fake sink is created in plug-in scenario */
7603 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7605 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7608 if (IS_ERR(drm_new_conn_state)) {
7609 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7613 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7614 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7616 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7619 new_stream = create_stream_for_sink(aconnector,
7620 &new_crtc_state->mode,
7622 dm_old_crtc_state->stream);
7625 * we can have no stream on ACTION_SET if a display
7626 * was disconnected during S3, in this case it is not an
7627 * error, the OS will be updated after detection, and
7628 * will do the right thing on next atomic commit
7632 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7633 __func__, acrtc->base.base.id);
7638 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7640 ret = fill_hdr_info_packet(drm_new_conn_state,
7641 &new_stream->hdr_static_metadata);
7646 * If we already removed the old stream from the context
7647 * (and set the new stream to NULL) then we can't reuse
7648 * the old stream even if the stream and scaling are unchanged.
7649 * We'll hit the BUG_ON and black screen.
7651 * TODO: Refactor this function to allow this check to work
7652 * in all conditions.
7654 if (dm_new_crtc_state->stream &&
7655 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7656 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7657 new_crtc_state->mode_changed = false;
7658 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7659 new_crtc_state->mode_changed);
7663 /* mode_changed flag may get updated above, need to check again */
7664 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7668 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7669 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7670 "connectors_changed:%d\n",
7672 new_crtc_state->enable,
7673 new_crtc_state->active,
7674 new_crtc_state->planes_changed,
7675 new_crtc_state->mode_changed,
7676 new_crtc_state->active_changed,
7677 new_crtc_state->connectors_changed);
7679 /* Remove stream for any changed/disabled CRTC */
7682 if (!dm_old_crtc_state->stream)
7685 ret = dm_atomic_get_state(state, &dm_state);
7689 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7692 /* i.e. reset mode */
7693 if (dc_remove_stream_from_ctx(
7696 dm_old_crtc_state->stream) != DC_OK) {
7701 dc_stream_release(dm_old_crtc_state->stream);
7702 dm_new_crtc_state->stream = NULL;
7704 reset_freesync_config_for_crtc(dm_new_crtc_state);
7706 *lock_and_validation_needed = true;
7708 } else {/* Add stream for any updated/enabled CRTC */
7710 * Quick fix to prevent NULL pointer on new_stream when
7711 * added MST connectors not found in existing crtc_state in the chained mode
7712 * TODO: need to dig out the root cause of that
7714 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7717 if (modereset_required(new_crtc_state))
7720 if (modeset_required(new_crtc_state, new_stream,
7721 dm_old_crtc_state->stream)) {
7723 WARN_ON(dm_new_crtc_state->stream);
7725 ret = dm_atomic_get_state(state, &dm_state);
7729 dm_new_crtc_state->stream = new_stream;
7731 dc_stream_retain(new_stream);
7733 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7736 if (dc_add_stream_to_ctx(
7739 dm_new_crtc_state->stream) != DC_OK) {
7744 *lock_and_validation_needed = true;
7749 /* Release extra reference */
7751 dc_stream_release(new_stream);
7754 * We want to do dc stream updates that do not require a
7755 * full modeset below.
7757 if (!(enable && aconnector && new_crtc_state->enable &&
7758 new_crtc_state->active))
7761 * Given above conditions, the dc state cannot be NULL because:
7762 * 1. We're in the process of enabling CRTCs (just been added
7763 * to the dc context, or already is on the context)
7764 * 2. Has a valid connector attached, and
7765 * 3. Is currently active and enabled.
7766 * => The dc stream state currently exists.
7768 BUG_ON(dm_new_crtc_state->stream == NULL);
7770 /* Scaling or underscan settings */
7771 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7772 update_stream_scaling_settings(
7773 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7776 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7779 * Color management settings. We also update color properties
7780 * when a modeset is needed, to ensure it gets reprogrammed.
7782 if (dm_new_crtc_state->base.color_mgmt_changed ||
7783 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7784 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7789 /* Update Freesync settings. */
7790 get_freesync_config_for_crtc(dm_new_crtc_state,
7797 dc_stream_release(new_stream);
7801 static bool should_reset_plane(struct drm_atomic_state *state,
7802 struct drm_plane *plane,
7803 struct drm_plane_state *old_plane_state,
7804 struct drm_plane_state *new_plane_state)
7806 struct drm_plane *other;
7807 struct drm_plane_state *old_other_state, *new_other_state;
7808 struct drm_crtc_state *new_crtc_state;
7812 * TODO: Remove this hack once the checks below are sufficient
7813 * enough to determine when we need to reset all the planes on
7816 if (state->allow_modeset)
7819 /* Exit early if we know that we're adding or removing the plane. */
7820 if (old_plane_state->crtc != new_plane_state->crtc)
7823 /* old crtc == new_crtc == NULL, plane not in context. */
7824 if (!new_plane_state->crtc)
7828 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7830 if (!new_crtc_state)
7833 /* CRTC Degamma changes currently require us to recreate planes. */
7834 if (new_crtc_state->color_mgmt_changed)
7837 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7841 * If there are any new primary or overlay planes being added or
7842 * removed then the z-order can potentially change. To ensure
7843 * correct z-order and pipe acquisition the current DC architecture
7844 * requires us to remove and recreate all existing planes.
7846 * TODO: Come up with a more elegant solution for this.
7848 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7849 if (other->type == DRM_PLANE_TYPE_CURSOR)
7852 if (old_other_state->crtc != new_plane_state->crtc &&
7853 new_other_state->crtc != new_plane_state->crtc)
7856 if (old_other_state->crtc != new_other_state->crtc)
7859 /* TODO: Remove this once we can handle fast format changes. */
7860 if (old_other_state->fb && new_other_state->fb &&
7861 old_other_state->fb->format != new_other_state->fb->format)
7868 static int dm_update_plane_state(struct dc *dc,
7869 struct drm_atomic_state *state,
7870 struct drm_plane *plane,
7871 struct drm_plane_state *old_plane_state,
7872 struct drm_plane_state *new_plane_state,
7874 bool *lock_and_validation_needed)
7877 struct dm_atomic_state *dm_state = NULL;
7878 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7879 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7880 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7881 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7886 new_plane_crtc = new_plane_state->crtc;
7887 old_plane_crtc = old_plane_state->crtc;
7888 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7889 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7891 /*TODO Implement atomic check for cursor plane */
7892 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7895 needs_reset = should_reset_plane(state, plane, old_plane_state,
7898 /* Remove any changed/removed planes */
7903 if (!old_plane_crtc)
7906 old_crtc_state = drm_atomic_get_old_crtc_state(
7907 state, old_plane_crtc);
7908 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7910 if (!dm_old_crtc_state->stream)
7913 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7914 plane->base.id, old_plane_crtc->base.id);
7916 ret = dm_atomic_get_state(state, &dm_state);
7920 if (!dc_remove_plane_from_context(
7922 dm_old_crtc_state->stream,
7923 dm_old_plane_state->dc_state,
7924 dm_state->context)) {
7931 dc_plane_state_release(dm_old_plane_state->dc_state);
7932 dm_new_plane_state->dc_state = NULL;
7934 *lock_and_validation_needed = true;
7936 } else { /* Add new planes */
7937 struct dc_plane_state *dc_new_plane_state;
7939 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7942 if (!new_plane_crtc)
7945 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7946 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7948 if (!dm_new_crtc_state->stream)
7954 WARN_ON(dm_new_plane_state->dc_state);
7956 dc_new_plane_state = dc_create_plane_state(dc);
7957 if (!dc_new_plane_state)
7960 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7961 plane->base.id, new_plane_crtc->base.id);
7963 ret = fill_dc_plane_attributes(
7964 new_plane_crtc->dev->dev_private,
7969 dc_plane_state_release(dc_new_plane_state);
7973 ret = dm_atomic_get_state(state, &dm_state);
7975 dc_plane_state_release(dc_new_plane_state);
7980 * Any atomic check errors that occur after this will
7981 * not need a release. The plane state will be attached
7982 * to the stream, and therefore part of the atomic
7983 * state. It'll be released when the atomic state is
7986 if (!dc_add_plane_to_context(
7988 dm_new_crtc_state->stream,
7990 dm_state->context)) {
7992 dc_plane_state_release(dc_new_plane_state);
7996 dm_new_plane_state->dc_state = dc_new_plane_state;
7998 /* Tell DC to do a full surface update every time there
7999 * is a plane change. Inefficient, but works for now.
8001 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8003 *lock_and_validation_needed = true;
8011 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8012 struct drm_atomic_state *state,
8013 enum surface_update_type *out_type)
8015 struct dc *dc = dm->dc;
8016 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8017 int i, j, num_plane, ret = 0;
8018 struct drm_plane_state *old_plane_state, *new_plane_state;
8019 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8020 struct drm_crtc *new_plane_crtc;
8021 struct drm_plane *plane;
8023 struct drm_crtc *crtc;
8024 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8025 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8026 struct dc_stream_status *status = NULL;
8027 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8028 struct surface_info_bundle {
8029 struct dc_surface_update surface_updates[MAX_SURFACES];
8030 struct dc_plane_info plane_infos[MAX_SURFACES];
8031 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8032 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8033 struct dc_stream_update stream_update;
8036 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8039 DRM_ERROR("Failed to allocate update bundle\n");
8040 /* Set type to FULL to avoid crashing in DC*/
8041 update_type = UPDATE_TYPE_FULL;
8045 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8047 memset(bundle, 0, sizeof(struct surface_info_bundle));
8049 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8050 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8053 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8054 update_type = UPDATE_TYPE_FULL;
8058 if (!new_dm_crtc_state->stream)
8061 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8062 const struct amdgpu_framebuffer *amdgpu_fb =
8063 to_amdgpu_framebuffer(new_plane_state->fb);
8064 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8065 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8066 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8067 uint64_t tiling_flags;
8069 new_plane_crtc = new_plane_state->crtc;
8070 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8071 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8073 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8076 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8077 update_type = UPDATE_TYPE_FULL;
8081 if (crtc != new_plane_crtc)
8084 bundle->surface_updates[num_plane].surface =
8085 new_dm_plane_state->dc_state;
8087 if (new_crtc_state->mode_changed) {
8088 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8089 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8092 if (new_crtc_state->color_mgmt_changed) {
8093 bundle->surface_updates[num_plane].gamma =
8094 new_dm_plane_state->dc_state->gamma_correction;
8095 bundle->surface_updates[num_plane].in_transfer_func =
8096 new_dm_plane_state->dc_state->in_transfer_func;
8097 bundle->surface_updates[num_plane].gamut_remap_matrix =
8098 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8099 bundle->stream_update.gamut_remap =
8100 &new_dm_crtc_state->stream->gamut_remap_matrix;
8101 bundle->stream_update.output_csc_transform =
8102 &new_dm_crtc_state->stream->csc_color_matrix;
8103 bundle->stream_update.out_transfer_func =
8104 new_dm_crtc_state->stream->out_transfer_func;
8107 ret = fill_dc_scaling_info(new_plane_state,
8112 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8115 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8119 ret = fill_dc_plane_info_and_addr(
8120 dm->adev, new_plane_state, tiling_flags,
8122 &flip_addr->address,
8127 bundle->surface_updates[num_plane].plane_info = plane_info;
8128 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8137 ret = dm_atomic_get_state(state, &dm_state);
8141 old_dm_state = dm_atomic_get_old_state(state);
8142 if (!old_dm_state) {
8147 status = dc_stream_get_status_from_state(old_dm_state->context,
8148 new_dm_crtc_state->stream);
8149 bundle->stream_update.stream = new_dm_crtc_state->stream;
8151 * TODO: DC modifies the surface during this call so we need
8152 * to lock here - find a way to do this without locking.
8154 mutex_lock(&dm->dc_lock);
8155 update_type = dc_check_update_surfaces_for_stream(
8156 dc, bundle->surface_updates, num_plane,
8157 &bundle->stream_update, status);
8158 mutex_unlock(&dm->dc_lock);
8160 if (update_type > UPDATE_TYPE_MED) {
8161 update_type = UPDATE_TYPE_FULL;
8169 *out_type = update_type;
8173 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8175 struct drm_connector *connector;
8176 struct drm_connector_state *conn_state;
8177 struct amdgpu_dm_connector *aconnector = NULL;
8179 for_each_new_connector_in_state(state, connector, conn_state, i) {
8180 if (conn_state->crtc != crtc)
8183 aconnector = to_amdgpu_dm_connector(connector);
8184 if (!aconnector->port || !aconnector->mst_port)
8193 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8197 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8198 * @dev: The DRM device
8199 * @state: The atomic state to commit
8201 * Validate that the given atomic state is programmable by DC into hardware.
8202 * This involves constructing a &struct dc_state reflecting the new hardware
8203 * state we wish to commit, then querying DC to see if it is programmable. It's
8204 * important not to modify the existing DC state. Otherwise, atomic_check
8205 * may unexpectedly commit hardware changes.
8207 * When validating the DC state, it's important that the right locks are
8208 * acquired. For full updates case which removes/adds/updates streams on one
8209 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8210 * that any such full update commit will wait for completion of any outstanding
8211 * flip using DRMs synchronization events. See
8212 * dm_determine_update_type_for_commit()
8214 * Note that DM adds the affected connectors for all CRTCs in state, when that
8215 * might not seem necessary. This is because DC stream creation requires the
8216 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8217 * be possible but non-trivial - a possible TODO item.
8219 * Return: -Error code if validation failed.
8221 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8222 struct drm_atomic_state *state)
8224 struct amdgpu_device *adev = dev->dev_private;
8225 struct dm_atomic_state *dm_state = NULL;
8226 struct dc *dc = adev->dm.dc;
8227 struct drm_connector *connector;
8228 struct drm_connector_state *old_con_state, *new_con_state;
8229 struct drm_crtc *crtc;
8230 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8231 struct drm_plane *plane;
8232 struct drm_plane_state *old_plane_state, *new_plane_state;
8233 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8234 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8239 * This bool will be set for true for any modeset/reset
8240 * or plane update which implies non fast surface update.
8242 bool lock_and_validation_needed = false;
8244 ret = drm_atomic_helper_check_modeset(dev, state);
8248 if (adev->asic_type >= CHIP_NAVI10) {
8249 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8250 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8251 ret = add_affected_mst_dsc_crtcs(state, crtc);
8258 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8259 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8260 !new_crtc_state->color_mgmt_changed &&
8261 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8264 if (!new_crtc_state->enable)
8267 ret = drm_atomic_add_affected_connectors(state, crtc);
8271 ret = drm_atomic_add_affected_planes(state, crtc);
8277 * Add all primary and overlay planes on the CRTC to the state
8278 * whenever a plane is enabled to maintain correct z-ordering
8279 * and to enable fast surface updates.
8281 drm_for_each_crtc(crtc, dev) {
8282 bool modified = false;
8284 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8285 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8288 if (new_plane_state->crtc == crtc ||
8289 old_plane_state->crtc == crtc) {
8298 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8299 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8303 drm_atomic_get_plane_state(state, plane);
8305 if (IS_ERR(new_plane_state)) {
8306 ret = PTR_ERR(new_plane_state);
8312 /* Remove exiting planes if they are modified */
8313 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8314 ret = dm_update_plane_state(dc, state, plane,
8318 &lock_and_validation_needed);
8323 /* Disable all crtcs which require disable */
8324 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8325 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8329 &lock_and_validation_needed);
8334 /* Enable all crtcs which require enable */
8335 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8336 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8340 &lock_and_validation_needed);
8345 /* Add new/modified planes */
8346 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8347 ret = dm_update_plane_state(dc, state, plane,
8351 &lock_and_validation_needed);
8356 /* Run this here since we want to validate the streams we created */
8357 ret = drm_atomic_helper_check_planes(dev, state);
8361 if (state->legacy_cursor_update) {
8363 * This is a fast cursor update coming from the plane update
8364 * helper, check if it can be done asynchronously for better
8367 state->async_update =
8368 !drm_atomic_helper_async_check(dev, state);
8371 * Skip the remaining global validation if this is an async
8372 * update. Cursor updates can be done without affecting
8373 * state or bandwidth calcs and this avoids the performance
8374 * penalty of locking the private state object and
8375 * allocating a new dc_state.
8377 if (state->async_update)
8381 /* Check scaling and underscan changes*/
8382 /* TODO Removed scaling changes validation due to inability to commit
8383 * new stream into context w\o causing full reset. Need to
8384 * decide how to handle.
8386 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8387 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8388 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8389 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8391 /* Skip any modesets/resets */
8392 if (!acrtc || drm_atomic_crtc_needs_modeset(
8393 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8396 /* Skip any thing not scale or underscan changes */
8397 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8400 overall_update_type = UPDATE_TYPE_FULL;
8401 lock_and_validation_needed = true;
8404 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8408 if (overall_update_type < update_type)
8409 overall_update_type = update_type;
8412 * lock_and_validation_needed was an old way to determine if we need to set
8413 * the global lock. Leaving it in to check if we broke any corner cases
8414 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8415 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8417 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8418 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8420 if (overall_update_type > UPDATE_TYPE_FAST) {
8421 ret = dm_atomic_get_state(state, &dm_state);
8425 ret = do_aquire_global_lock(dev, state);
8429 #if defined(CONFIG_DRM_AMD_DC_DCN)
8430 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8433 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8439 * Perform validation of MST topology in the state:
8440 * We need to perform MST atomic check before calling
8441 * dc_validate_global_state(), or there is a chance
8442 * to get stuck in an infinite loop and hang eventually.
8444 ret = drm_dp_mst_atomic_check(state);
8448 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8454 * The commit is a fast update. Fast updates shouldn't change
8455 * the DC context, affect global validation, and can have their
8456 * commit work done in parallel with other commits not touching
8457 * the same resource. If we have a new DC context as part of
8458 * the DM atomic state from validation we need to free it and
8459 * retain the existing one instead.
8461 struct dm_atomic_state *new_dm_state, *old_dm_state;
8463 new_dm_state = dm_atomic_get_new_state(state);
8464 old_dm_state = dm_atomic_get_old_state(state);
8466 if (new_dm_state && old_dm_state) {
8467 if (new_dm_state->context)
8468 dc_release_state(new_dm_state->context);
8470 new_dm_state->context = old_dm_state->context;
8472 if (old_dm_state->context)
8473 dc_retain_state(old_dm_state->context);
8477 /* Store the overall update type for use later in atomic check. */
8478 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8479 struct dm_crtc_state *dm_new_crtc_state =
8480 to_dm_crtc_state(new_crtc_state);
8482 dm_new_crtc_state->update_type = (int)overall_update_type;
8485 /* Must be success */
8490 if (ret == -EDEADLK)
8491 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8492 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8493 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8495 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8500 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8501 struct amdgpu_dm_connector *amdgpu_dm_connector)
8504 bool capable = false;
8506 if (amdgpu_dm_connector->dc_link &&
8507 dm_helpers_dp_read_dpcd(
8509 amdgpu_dm_connector->dc_link,
8510 DP_DOWN_STREAM_PORT_COUNT,
8512 sizeof(dpcd_data))) {
8513 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8518 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8522 bool edid_check_required;
8523 struct detailed_timing *timing;
8524 struct detailed_non_pixel *data;
8525 struct detailed_data_monitor_range *range;
8526 struct amdgpu_dm_connector *amdgpu_dm_connector =
8527 to_amdgpu_dm_connector(connector);
8528 struct dm_connector_state *dm_con_state = NULL;
8530 struct drm_device *dev = connector->dev;
8531 struct amdgpu_device *adev = dev->dev_private;
8532 bool freesync_capable = false;
8534 if (!connector->state) {
8535 DRM_ERROR("%s - Connector has no state", __func__);
8540 dm_con_state = to_dm_connector_state(connector->state);
8542 amdgpu_dm_connector->min_vfreq = 0;
8543 amdgpu_dm_connector->max_vfreq = 0;
8544 amdgpu_dm_connector->pixel_clock_mhz = 0;
8549 dm_con_state = to_dm_connector_state(connector->state);
8551 edid_check_required = false;
8552 if (!amdgpu_dm_connector->dc_sink) {
8553 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8556 if (!adev->dm.freesync_module)
8559 * if edid non zero restrict freesync only for dp and edp
8562 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8563 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8564 edid_check_required = is_dp_capable_without_timing_msa(
8566 amdgpu_dm_connector);
8569 if (edid_check_required == true && (edid->version > 1 ||
8570 (edid->version == 1 && edid->revision > 1))) {
8571 for (i = 0; i < 4; i++) {
8573 timing = &edid->detailed_timings[i];
8574 data = &timing->data.other_data;
8575 range = &data->data.range;
8577 * Check if monitor has continuous frequency mode
8579 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8582 * Check for flag range limits only. If flag == 1 then
8583 * no additional timing information provided.
8584 * Default GTF, GTF Secondary curve and CVT are not
8587 if (range->flags != 1)
8590 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8591 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8592 amdgpu_dm_connector->pixel_clock_mhz =
8593 range->pixel_clock_mhz * 10;
8597 if (amdgpu_dm_connector->max_vfreq -
8598 amdgpu_dm_connector->min_vfreq > 10) {
8600 freesync_capable = true;
8606 dm_con_state->freesync_capable = freesync_capable;
8608 if (connector->vrr_capable_property)
8609 drm_connector_set_vrr_capable_property(connector,
8613 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8615 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8617 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8619 if (link->type == dc_connection_none)
8621 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8622 dpcd_data, sizeof(dpcd_data))) {
8623 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8625 if (dpcd_data[0] == 0) {
8626 link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED;
8627 link->psr_settings.psr_feature_enabled = false;
8629 link->psr_settings.psr_version = PSR_VERSION_1;
8630 link->psr_settings.psr_feature_enabled = true;
8633 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8638 * amdgpu_dm_link_setup_psr() - configure psr link
8639 * @stream: stream state
8641 * Return: true if success
8643 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8645 struct dc_link *link = NULL;
8646 struct psr_config psr_config = {0};
8647 struct psr_context psr_context = {0};
8648 struct dc *dc = NULL;
8654 link = stream->link;
8657 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8659 if (psr_config.psr_version > 0) {
8660 psr_config.psr_exit_link_training_required = 0x1;
8661 psr_config.psr_frame_capture_indication_req = 0;
8662 psr_config.psr_rfb_setup_time = 0x37;
8663 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8664 psr_config.allow_smu_optimizations = 0x0;
8666 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8669 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8675 * amdgpu_dm_psr_enable() - enable psr f/w
8676 * @stream: stream state
8678 * Return: true if success
8680 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8682 struct dc_link *link = stream->link;
8683 unsigned int vsync_rate_hz = 0;
8684 struct dc_static_screen_params params = {0};
8685 /* Calculate number of static frames before generating interrupt to
8688 // Init fail safe of 2 frames static
8689 unsigned int num_frames_static = 2;
8691 DRM_DEBUG_DRIVER("Enabling psr...\n");
8693 vsync_rate_hz = div64_u64(div64_u64((
8694 stream->timing.pix_clk_100hz * 100),
8695 stream->timing.v_total),
8696 stream->timing.h_total);
8699 * Calculate number of frames such that at least 30 ms of time has
8702 if (vsync_rate_hz != 0) {
8703 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8704 num_frames_static = (30000 / frame_time_microsec) + 1;
8707 params.triggers.cursor_update = true;
8708 params.triggers.overlay_update = true;
8709 params.triggers.surface_update = true;
8710 params.num_frames = num_frames_static;
8712 dc_stream_set_static_screen_params(link->ctx->dc,
8716 return dc_link_set_psr_allow_active(link, true, false);
8720 * amdgpu_dm_psr_disable() - disable psr f/w
8721 * @stream: stream state
8723 * Return: true if success
8725 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8728 DRM_DEBUG_DRIVER("Disabling psr...\n");
8730 return dc_link_set_psr_allow_active(stream->link, false, true);