2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
117 * The root control structure is &struct amdgpu_display_manager.
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
129 * Returns 0 on success
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
145 struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
161 static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
185 if (crtc >= adev->mode_info.num_crtc)
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state->stream,
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
238 static bool dm_is_idle(void *handle)
244 static int dm_wait_for_idle(void *handle)
250 static bool dm_check_soft_reset(void *handle)
255 static int dm_soft_reset(void *handle)
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
269 if (otg_inst == -1) {
271 return adev->mode_info.crtcs[0];
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
277 if (amdgpu_crtc->otg_inst == otg_inst)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params)
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
397 static void dm_vupdate_high_irq(void *interrupt_params)
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
412 amdgpu_dm_vrr_active(acrtc_state));
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
420 if (amdgpu_dm_vrr_active(acrtc_state)) {
421 drm_crtc_handle_vblank(&acrtc->base);
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
430 &acrtc_state->vrr_params);
432 dc_stream_adjust_vmin_vmax(
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: ignored
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 static void dm_crtc_high_irq(void *interrupt_params)
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
453 struct amdgpu_crtc *acrtc;
454 struct dm_crtc_state *acrtc_state;
457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
460 acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
464 amdgpu_dm_vrr_active(acrtc_state));
466 /* Core vblank handling at start of front-porch is only possible
467 * in non-vrr mode, as only there vblank timestamping will give
468 * valid results while done in front-porch. Otherwise defer it
469 * to dm_vupdate_high_irq after end of front-porch.
471 if (!amdgpu_dm_vrr_active(acrtc_state))
472 drm_crtc_handle_vblank(&acrtc->base);
474 /* Following stuff must happen at start of vblank, for crc
475 * computation and below-the-range btr support in vrr mode.
477 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
479 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480 acrtc_state->vrr_params.supported &&
481 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482 spin_lock_irqsave(&adev->ddev->event_lock, flags);
483 mod_freesync_handle_v_update(
484 adev->dm.freesync_module,
486 &acrtc_state->vrr_params);
488 dc_stream_adjust_vmin_vmax(
491 &acrtc_state->vrr_params.adjust);
492 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
499 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500 * @interrupt params - interrupt parameters
502 * Notify DRM's vblank event handler at VSTARTUP
504 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505 * * We are close enough to VUPDATE - the point of no return for hw
506 * * We are in the fixed portion of variable front porch when vrr is enabled
507 * * We are before VUPDATE, where double-buffered vrr registers are swapped
509 * It is therefore the correct place to signal vblank, send user flip events,
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
514 struct common_irq_params *irq_params = interrupt_params;
515 struct amdgpu_device *adev = irq_params->adev;
516 struct amdgpu_crtc *acrtc;
517 struct dm_crtc_state *acrtc_state;
520 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
525 acrtc_state = to_dm_crtc_state(acrtc->base.state);
527 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 amdgpu_dm_vrr_active(acrtc_state),
529 acrtc_state->active_planes);
531 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 drm_crtc_handle_vblank(&acrtc->base);
534 spin_lock_irqsave(&adev->ddev->event_lock, flags);
536 if (acrtc_state->vrr_params.supported &&
537 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(
539 adev->dm.freesync_module,
541 &acrtc_state->vrr_params);
543 dc_stream_adjust_vmin_vmax(
546 &acrtc_state->vrr_params.adjust);
550 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 * In that case, pageflip completion interrupts won't fire and pageflip
552 * completion events won't get delivered. Prevent this by sending
553 * pending pageflip events from here if a flip is still pending.
555 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 * avoid race conditions between flip programming and completion,
557 * which could cause too early flip completion events.
559 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 acrtc_state->active_planes == 0) {
562 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
564 drm_crtc_vblank_put(&acrtc->base);
566 acrtc->pflip_status = AMDGPU_FLIP_NONE;
569 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
573 static int dm_set_clockgating_state(void *handle,
574 enum amd_clockgating_state state)
579 static int dm_set_powergating_state(void *handle,
580 enum amd_powergating_state state)
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
588 /* Allocate memory for FBC compressed data */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
591 struct drm_device *dev = connector->dev;
592 struct amdgpu_device *adev = dev->dev_private;
593 struct dm_comressor_info *compressor = &adev->dm.compressor;
594 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 struct drm_display_mode *mode;
596 unsigned long max_size = 0;
598 if (adev->dm.dc->fbc_compressor == NULL)
601 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
604 if (compressor->bo_ptr)
608 list_for_each_entry(mode, &connector->modes, head) {
609 if (max_size < mode->htotal * mode->vtotal)
610 max_size = mode->htotal * mode->vtotal;
614 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616 &compressor->gpu_addr, &compressor->cpu_addr);
619 DRM_ERROR("DM: Failed to initialize FBC\n");
621 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 int pipe, bool *enabled,
631 unsigned char *buf, int max_bytes)
633 struct drm_device *dev = dev_get_drvdata(kdev);
634 struct amdgpu_device *adev = dev->dev_private;
635 struct drm_connector *connector;
636 struct drm_connector_list_iter conn_iter;
637 struct amdgpu_dm_connector *aconnector;
642 mutex_lock(&adev->dm.audio_lock);
644 drm_connector_list_iter_begin(dev, &conn_iter);
645 drm_for_each_connector_iter(connector, &conn_iter) {
646 aconnector = to_amdgpu_dm_connector(connector);
647 if (aconnector->audio_inst != port)
651 ret = drm_eld_size(connector->eld);
652 memcpy(buf, connector->eld, min(max_bytes, ret));
656 drm_connector_list_iter_end(&conn_iter);
658 mutex_unlock(&adev->dm.audio_lock);
660 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 .get_eld = amdgpu_dm_audio_component_get_eld,
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 struct device *hda_kdev, void *data)
672 struct drm_device *dev = dev_get_drvdata(kdev);
673 struct amdgpu_device *adev = dev->dev_private;
674 struct drm_audio_component *acomp = data;
676 acomp->ops = &amdgpu_dm_audio_component_ops;
678 adev->dm.audio_component = acomp;
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 struct device *hda_kdev, void *data)
686 struct drm_device *dev = dev_get_drvdata(kdev);
687 struct amdgpu_device *adev = dev->dev_private;
688 struct drm_audio_component *acomp = data;
692 adev->dm.audio_component = NULL;
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 .bind = amdgpu_dm_audio_component_bind,
697 .unbind = amdgpu_dm_audio_component_unbind,
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
707 adev->mode_info.audio.enabled = true;
709 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 adev->mode_info.audio.pin[i].channels = -1;
713 adev->mode_info.audio.pin[i].rate = -1;
714 adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 adev->mode_info.audio.pin[i].status_bits = 0;
716 adev->mode_info.audio.pin[i].category_code = 0;
717 adev->mode_info.audio.pin[i].connected = false;
718 adev->mode_info.audio.pin[i].id =
719 adev->dm.dc->res_pool->audios[i]->inst;
720 adev->mode_info.audio.pin[i].offset = 0;
723 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
727 adev->dm.audio_registered = true;
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
737 if (!adev->mode_info.audio.enabled)
740 if (adev->dm.audio_registered) {
741 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 adev->dm.audio_registered = false;
745 /* TODO: Disable audio? */
747 adev->mode_info.audio.enabled = false;
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
752 struct drm_audio_component *acomp = adev->dm.audio_component;
754 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
764 const struct dmcub_firmware_header_v1_0 *hdr;
765 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767 const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 struct abm *abm = adev->dm.dc->res_pool->abm;
770 struct dmub_srv_hw_params hw_params;
771 enum dmub_status status;
772 const unsigned char *fw_inst_const, *fw_bss_data;
773 uint32_t i, fw_inst_const_size, fw_bss_data_size;
777 /* DMUB isn't supported on the ASIC. */
781 DRM_ERROR("No framebuffer info for DMUB service.\n");
786 /* Firmware required for DMUB support. */
787 DRM_ERROR("No firmware provided for DMUB.\n");
791 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 if (status != DMUB_STATUS_OK) {
793 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
797 if (!has_hw_support) {
798 DRM_INFO("DMUB unsupported on ASIC\n");
802 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804 fw_inst_const = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
808 fw_bss_data = dmub_fw->data +
809 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 le32_to_cpu(hdr->inst_const_bytes);
812 /* Copy firmware and bios info into FB memory. */
813 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 * amdgpu_ucode_init_single_fw will load dmub firmware
820 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 * will be done by dm_dmub_hw_init
823 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
828 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
831 /* Copy firmware bios info into FB memory. */
832 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
835 /* Reset regions that need to be reset. */
836 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
839 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
842 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
843 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
845 /* Initialize hardware. */
846 memset(&hw_params, 0, sizeof(hw_params));
847 hw_params.fb_base = adev->gmc.fb_start;
848 hw_params.fb_offset = adev->gmc.aper_base;
850 /* backdoor load firmware and trigger dmub running */
851 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
852 hw_params.load_inst_const = true;
855 hw_params.psp_version = dmcu->psp_version;
857 for (i = 0; i < fb_info->num_fb; ++i)
858 hw_params.fb[i] = &fb_info->fb[i];
860 status = dmub_srv_hw_init(dmub_srv, &hw_params);
861 if (status != DMUB_STATUS_OK) {
862 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
866 /* Wait for firmware load to finish. */
867 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
868 if (status != DMUB_STATUS_OK)
869 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
871 /* Init DMCU and ABM if available. */
873 dmcu->funcs->dmcu_init(dmcu);
874 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
877 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
878 if (!adev->dm.dc->ctx->dmub_srv) {
879 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
883 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
884 adev->dm.dmcub_fw_version);
889 static int amdgpu_dm_init(struct amdgpu_device *adev)
891 struct dc_init_data init_data;
892 #ifdef CONFIG_DRM_AMD_DC_HDCP
893 struct dc_callback_init init_params;
897 adev->dm.ddev = adev->ddev;
898 adev->dm.adev = adev;
900 /* Zero all the fields */
901 memset(&init_data, 0, sizeof(init_data));
902 #ifdef CONFIG_DRM_AMD_DC_HDCP
903 memset(&init_params, 0, sizeof(init_params));
906 mutex_init(&adev->dm.dc_lock);
907 mutex_init(&adev->dm.audio_lock);
909 if(amdgpu_dm_irq_init(adev)) {
910 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
914 init_data.asic_id.chip_family = adev->family;
916 init_data.asic_id.pci_revision_id = adev->pdev->revision;
917 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
919 init_data.asic_id.vram_width = adev->gmc.vram_width;
920 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
921 init_data.asic_id.atombios_base_address =
922 adev->mode_info.atom_context->bios;
924 init_data.driver = adev;
926 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
928 if (!adev->dm.cgs_device) {
929 DRM_ERROR("amdgpu: failed to create cgs device.\n");
933 init_data.cgs_device = adev->dm.cgs_device;
935 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
937 switch (adev->asic_type) {
942 init_data.flags.gpu_vm_support = true;
948 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
949 init_data.flags.fbc_support = true;
951 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
952 init_data.flags.multi_mon_pp_mclk_switch = true;
954 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
955 init_data.flags.disable_fractional_pwm = true;
957 init_data.flags.power_down_display_on_boot = true;
959 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
961 /* Display Core create. */
962 adev->dm.dc = dc_create(&init_data);
965 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
967 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
971 r = dm_dmub_hw_init(adev);
973 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
977 dc_hardware_init(adev->dm.dc);
979 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
980 if (!adev->dm.freesync_module) {
982 "amdgpu: failed to initialize freesync_module.\n");
984 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
985 adev->dm.freesync_module);
987 amdgpu_dm_init_color_mod();
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990 if (adev->asic_type >= CHIP_RAVEN) {
991 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
993 if (!adev->dm.hdcp_workqueue)
994 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
996 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
998 dc_init_callbacks(adev->dm.dc, &init_params);
1001 if (amdgpu_dm_initialize_drm_device(adev)) {
1003 "amdgpu: failed to initialize sw for display support.\n");
1007 /* Update the actual used number of crtc */
1008 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1010 /* TODO: Add_display_info? */
1012 /* TODO use dynamic cursor width */
1013 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1014 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1016 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1018 "amdgpu: failed to initialize sw for display support.\n");
1022 DRM_DEBUG_DRIVER("KMS initialized.\n");
1026 amdgpu_dm_fini(adev);
1031 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1033 amdgpu_dm_audio_fini(adev);
1035 amdgpu_dm_destroy_drm_device(&adev->dm);
1037 #ifdef CONFIG_DRM_AMD_DC_HDCP
1038 if (adev->dm.hdcp_workqueue) {
1039 hdcp_destroy(adev->dm.hdcp_workqueue);
1040 adev->dm.hdcp_workqueue = NULL;
1044 dc_deinit_callbacks(adev->dm.dc);
1046 if (adev->dm.dc->ctx->dmub_srv) {
1047 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1048 adev->dm.dc->ctx->dmub_srv = NULL;
1051 if (adev->dm.dmub_bo)
1052 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1053 &adev->dm.dmub_bo_gpu_addr,
1054 &adev->dm.dmub_bo_cpu_addr);
1056 /* DC Destroy TODO: Replace destroy DAL */
1058 dc_destroy(&adev->dm.dc);
1060 * TODO: pageflip, vlank interrupt
1062 * amdgpu_dm_irq_fini(adev);
1065 if (adev->dm.cgs_device) {
1066 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1067 adev->dm.cgs_device = NULL;
1069 if (adev->dm.freesync_module) {
1070 mod_freesync_destroy(adev->dm.freesync_module);
1071 adev->dm.freesync_module = NULL;
1074 mutex_destroy(&adev->dm.audio_lock);
1075 mutex_destroy(&adev->dm.dc_lock);
1080 static int load_dmcu_fw(struct amdgpu_device *adev)
1082 const char *fw_name_dmcu = NULL;
1084 const struct dmcu_firmware_header_v1_0 *hdr;
1086 switch(adev->asic_type) {
1096 case CHIP_POLARIS11:
1097 case CHIP_POLARIS10:
1098 case CHIP_POLARIS12:
1108 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1111 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1112 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1113 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1114 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1119 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1123 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1124 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1128 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1130 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1131 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1132 adev->dm.fw_dmcu = NULL;
1136 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1141 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1143 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1145 release_firmware(adev->dm.fw_dmcu);
1146 adev->dm.fw_dmcu = NULL;
1150 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1151 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1152 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1153 adev->firmware.fw_size +=
1154 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1156 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1157 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1158 adev->firmware.fw_size +=
1159 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1161 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1163 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1168 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1170 struct amdgpu_device *adev = ctx;
1172 return dm_read_reg(adev->dm.dc->ctx, address);
1175 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1178 struct amdgpu_device *adev = ctx;
1180 return dm_write_reg(adev->dm.dc->ctx, address, value);
1183 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1185 struct dmub_srv_create_params create_params;
1186 struct dmub_srv_region_params region_params;
1187 struct dmub_srv_region_info region_info;
1188 struct dmub_srv_fb_params fb_params;
1189 struct dmub_srv_fb_info *fb_info;
1190 struct dmub_srv *dmub_srv;
1191 const struct dmcub_firmware_header_v1_0 *hdr;
1192 const char *fw_name_dmub;
1193 enum dmub_asic dmub_asic;
1194 enum dmub_status status;
1197 switch (adev->asic_type) {
1199 dmub_asic = DMUB_ASIC_DCN21;
1200 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1204 /* ASIC doesn't support DMUB. */
1208 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1210 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1214 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1216 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1220 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1222 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1223 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1224 AMDGPU_UCODE_ID_DMCUB;
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1227 adev->firmware.fw_size +=
1228 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1230 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1231 adev->dm.dmcub_fw_version);
1234 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1236 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1237 dmub_srv = adev->dm.dmub_srv;
1240 DRM_ERROR("Failed to allocate DMUB service!\n");
1244 memset(&create_params, 0, sizeof(create_params));
1245 create_params.user_ctx = adev;
1246 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1247 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1248 create_params.asic = dmub_asic;
1250 /* Create the DMUB service. */
1251 status = dmub_srv_create(dmub_srv, &create_params);
1252 if (status != DMUB_STATUS_OK) {
1253 DRM_ERROR("Error creating DMUB service: %d\n", status);
1257 /* Calculate the size of all the regions for the DMUB service. */
1258 memset(®ion_params, 0, sizeof(region_params));
1260 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1261 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1262 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1263 region_params.vbios_size = adev->bios_size;
1264 region_params.fw_bss_data =
1265 adev->dm.dmub_fw->data +
1266 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 le32_to_cpu(hdr->inst_const_bytes);
1269 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1272 if (status != DMUB_STATUS_OK) {
1273 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1278 * Allocate a framebuffer based on the total size of all the regions.
1279 * TODO: Move this into GART.
1281 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 &adev->dm.dmub_bo_gpu_addr,
1284 &adev->dm.dmub_bo_cpu_addr);
1288 /* Rebase the regions on the framebuffer address. */
1289 memset(&fb_params, 0, sizeof(fb_params));
1290 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 fb_params.region_info = ®ion_info;
1294 adev->dm.dmub_fb_info =
1295 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 fb_info = adev->dm.dmub_fb_info;
1300 "Failed to allocate framebuffer info for DMUB service!\n");
1304 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 if (status != DMUB_STATUS_OK) {
1306 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313 static int dm_sw_init(void *handle)
1315 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 r = dm_dmub_sw_init(adev);
1322 return load_dmcu_fw(adev);
1325 static int dm_sw_fini(void *handle)
1327 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1329 kfree(adev->dm.dmub_fb_info);
1330 adev->dm.dmub_fb_info = NULL;
1332 if (adev->dm.dmub_srv) {
1333 dmub_srv_destroy(adev->dm.dmub_srv);
1334 adev->dm.dmub_srv = NULL;
1337 if (adev->dm.dmub_fw) {
1338 release_firmware(adev->dm.dmub_fw);
1339 adev->dm.dmub_fw = NULL;
1342 if(adev->dm.fw_dmcu) {
1343 release_firmware(adev->dm.fw_dmcu);
1344 adev->dm.fw_dmcu = NULL;
1350 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1352 struct amdgpu_dm_connector *aconnector;
1353 struct drm_connector *connector;
1354 struct drm_connector_list_iter iter;
1357 drm_connector_list_iter_begin(dev, &iter);
1358 drm_for_each_connector_iter(connector, &iter) {
1359 aconnector = to_amdgpu_dm_connector(connector);
1360 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1361 aconnector->mst_mgr.aux) {
1362 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1364 aconnector->base.base.id);
1366 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1368 DRM_ERROR("DM_MST: Failed to start MST\n");
1369 aconnector->dc_link->type =
1370 dc_connection_single;
1375 drm_connector_list_iter_end(&iter);
1380 static int dm_late_init(void *handle)
1382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1384 struct dmcu_iram_parameters params;
1385 unsigned int linear_lut[16];
1387 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1390 for (i = 0; i < 16; i++)
1391 linear_lut[i] = 0xFFFF * i / 15;
1394 params.backlight_ramping_start = 0xCCCC;
1395 params.backlight_ramping_reduction = 0xCCCCCCCC;
1396 params.backlight_lut_array_size = 16;
1397 params.backlight_lut_array = linear_lut;
1399 /* Min backlight level after ABM reduction, Don't allow below 1%
1400 * 0xFFFF x 0.01 = 0x28F
1402 params.min_abm_backlight = 0x28F;
1404 /* todo will enable for navi10 */
1405 if (adev->asic_type <= CHIP_RAVEN) {
1406 ret = dmcu_load_iram(dmcu, params);
1412 return detect_mst_link_for_all_connectors(adev->ddev);
1415 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1417 struct amdgpu_dm_connector *aconnector;
1418 struct drm_connector *connector;
1419 struct drm_connector_list_iter iter;
1420 struct drm_dp_mst_topology_mgr *mgr;
1422 bool need_hotplug = false;
1424 drm_connector_list_iter_begin(dev, &iter);
1425 drm_for_each_connector_iter(connector, &iter) {
1426 aconnector = to_amdgpu_dm_connector(connector);
1427 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1428 aconnector->mst_port)
1431 mgr = &aconnector->mst_mgr;
1434 drm_dp_mst_topology_mgr_suspend(mgr);
1436 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1438 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1439 need_hotplug = true;
1443 drm_connector_list_iter_end(&iter);
1446 drm_kms_helper_hotplug_event(dev);
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1451 struct smu_context *smu = &adev->smu;
1454 if (!is_support_sw_smu(adev))
1457 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458 * on window driver dc implementation.
1459 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460 * should be passed to smu during boot up and resume from s3.
1461 * boot up: dc calculate dcn watermark clock settings within dc_create,
1462 * dcn20_resource_construct
1463 * then call pplib functions below to pass the settings to smu:
1464 * smu_set_watermarks_for_clock_ranges
1465 * smu_set_watermarks_table
1466 * navi10_set_watermarks_table
1467 * smu_write_watermarks_table
1469 * For Renoir, clock settings of dcn watermark are also fixed values.
1470 * dc has implemented different flow for window driver:
1471 * dc_hardware_init / dc_set_power_state
1476 * smu_set_watermarks_for_clock_ranges
1477 * renoir_set_watermarks_table
1478 * smu_write_watermarks_table
1481 * dc_hardware_init -> amdgpu_dm_init
1482 * dc_set_power_state --> dm_resume
1484 * therefore, this function apply to navi10/12/14 but not Renoir
1487 switch(adev->asic_type) {
1496 mutex_lock(&smu->mutex);
1498 /* pass data to smu controller */
1499 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1500 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1501 ret = smu_write_watermarks_table(smu);
1504 mutex_unlock(&smu->mutex);
1505 DRM_ERROR("Failed to update WMTABLE!\n");
1508 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1511 mutex_unlock(&smu->mutex);
1517 * dm_hw_init() - Initialize DC device
1518 * @handle: The base driver device containing the amdgpu_dm device.
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1527 * Some notable things that are initialized here:
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1534 * - Debug FS entries, if enabled
1536 static int dm_hw_init(void *handle)
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev);
1541 amdgpu_dm_hpd_init(adev);
1547 * dm_hw_fini() - Teardown DC device
1548 * @handle: The base driver device containing the amdgpu_dm device.
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1554 static int dm_hw_fini(void *handle)
1556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1558 amdgpu_dm_hpd_fini(adev);
1560 amdgpu_dm_irq_fini(adev);
1561 amdgpu_dm_fini(adev);
1565 static int dm_suspend(void *handle)
1567 struct amdgpu_device *adev = handle;
1568 struct amdgpu_display_manager *dm = &adev->dm;
1571 WARN_ON(adev->dm.cached_state);
1572 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1574 s3_handle_mst(adev->ddev, true);
1576 amdgpu_dm_irq_suspend(adev);
1579 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1584 static struct amdgpu_dm_connector *
1585 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1586 struct drm_crtc *crtc)
1589 struct drm_connector_state *new_con_state;
1590 struct drm_connector *connector;
1591 struct drm_crtc *crtc_from_state;
1593 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1594 crtc_from_state = new_con_state->crtc;
1596 if (crtc_from_state == crtc)
1597 return to_amdgpu_dm_connector(connector);
1603 static void emulated_link_detect(struct dc_link *link)
1605 struct dc_sink_init_data sink_init_data = { 0 };
1606 struct display_sink_capability sink_caps = { 0 };
1607 enum dc_edid_status edid_status;
1608 struct dc_context *dc_ctx = link->ctx;
1609 struct dc_sink *sink = NULL;
1610 struct dc_sink *prev_sink = NULL;
1612 link->type = dc_connection_none;
1613 prev_sink = link->local_sink;
1615 if (prev_sink != NULL)
1616 dc_sink_retain(prev_sink);
1618 switch (link->connector_signal) {
1619 case SIGNAL_TYPE_HDMI_TYPE_A: {
1620 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1621 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1625 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1626 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1627 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1631 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1632 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1633 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1637 case SIGNAL_TYPE_LVDS: {
1638 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1639 sink_caps.signal = SIGNAL_TYPE_LVDS;
1643 case SIGNAL_TYPE_EDP: {
1644 sink_caps.transaction_type =
1645 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1646 sink_caps.signal = SIGNAL_TYPE_EDP;
1650 case SIGNAL_TYPE_DISPLAY_PORT: {
1651 sink_caps.transaction_type =
1652 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1653 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1658 DC_ERROR("Invalid connector type! signal:%d\n",
1659 link->connector_signal);
1663 sink_init_data.link = link;
1664 sink_init_data.sink_signal = sink_caps.signal;
1666 sink = dc_sink_create(&sink_init_data);
1668 DC_ERROR("Failed to create sink!\n");
1672 /* dc_sink_create returns a new reference */
1673 link->local_sink = sink;
1675 edid_status = dm_helpers_read_local_edid(
1680 if (edid_status != EDID_OK)
1681 DC_ERROR("Failed to read EDID");
1685 static int dm_resume(void *handle)
1687 struct amdgpu_device *adev = handle;
1688 struct drm_device *ddev = adev->ddev;
1689 struct amdgpu_display_manager *dm = &adev->dm;
1690 struct amdgpu_dm_connector *aconnector;
1691 struct drm_connector *connector;
1692 struct drm_connector_list_iter iter;
1693 struct drm_crtc *crtc;
1694 struct drm_crtc_state *new_crtc_state;
1695 struct dm_crtc_state *dm_new_crtc_state;
1696 struct drm_plane *plane;
1697 struct drm_plane_state *new_plane_state;
1698 struct dm_plane_state *dm_new_plane_state;
1699 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1700 enum dc_connection_type new_connection_type = dc_connection_none;
1703 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1704 dc_release_state(dm_state->context);
1705 dm_state->context = dc_create_state(dm->dc);
1706 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1707 dc_resource_state_construct(dm->dc, dm_state->context);
1709 /* Before powering on DC we need to re-initialize DMUB. */
1710 r = dm_dmub_hw_init(adev);
1712 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1714 /* power on hardware */
1715 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1717 /* program HPD filter */
1721 * early enable HPD Rx IRQ, should be done before set mode as short
1722 * pulse interrupts are used for MST
1724 amdgpu_dm_irq_resume_early(adev);
1726 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1727 s3_handle_mst(ddev, false);
1730 drm_connector_list_iter_begin(ddev, &iter);
1731 drm_for_each_connector_iter(connector, &iter) {
1732 aconnector = to_amdgpu_dm_connector(connector);
1735 * this is the case when traversing through already created
1736 * MST connectors, should be skipped
1738 if (aconnector->mst_port)
1741 mutex_lock(&aconnector->hpd_lock);
1742 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1743 DRM_ERROR("KMS: Failed to detect connector\n");
1745 if (aconnector->base.force && new_connection_type == dc_connection_none)
1746 emulated_link_detect(aconnector->dc_link);
1748 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1750 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1751 aconnector->fake_enable = false;
1753 if (aconnector->dc_sink)
1754 dc_sink_release(aconnector->dc_sink);
1755 aconnector->dc_sink = NULL;
1756 amdgpu_dm_update_connector_after_detect(aconnector);
1757 mutex_unlock(&aconnector->hpd_lock);
1759 drm_connector_list_iter_end(&iter);
1761 /* Force mode set in atomic commit */
1762 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1763 new_crtc_state->active_changed = true;
1766 * atomic_check is expected to create the dc states. We need to release
1767 * them here, since they were duplicated as part of the suspend
1770 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1771 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1772 if (dm_new_crtc_state->stream) {
1773 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1774 dc_stream_release(dm_new_crtc_state->stream);
1775 dm_new_crtc_state->stream = NULL;
1779 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1780 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1781 if (dm_new_plane_state->dc_state) {
1782 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1783 dc_plane_state_release(dm_new_plane_state->dc_state);
1784 dm_new_plane_state->dc_state = NULL;
1788 drm_atomic_helper_resume(ddev, dm->cached_state);
1790 dm->cached_state = NULL;
1792 amdgpu_dm_irq_resume_late(adev);
1794 amdgpu_dm_smu_write_watermarks_table(adev);
1802 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1803 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1804 * the base driver's device list to be initialized and torn down accordingly.
1806 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1809 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1811 .early_init = dm_early_init,
1812 .late_init = dm_late_init,
1813 .sw_init = dm_sw_init,
1814 .sw_fini = dm_sw_fini,
1815 .hw_init = dm_hw_init,
1816 .hw_fini = dm_hw_fini,
1817 .suspend = dm_suspend,
1818 .resume = dm_resume,
1819 .is_idle = dm_is_idle,
1820 .wait_for_idle = dm_wait_for_idle,
1821 .check_soft_reset = dm_check_soft_reset,
1822 .soft_reset = dm_soft_reset,
1823 .set_clockgating_state = dm_set_clockgating_state,
1824 .set_powergating_state = dm_set_powergating_state,
1827 const struct amdgpu_ip_block_version dm_ip_block =
1829 .type = AMD_IP_BLOCK_TYPE_DCE,
1833 .funcs = &amdgpu_dm_funcs,
1843 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1844 .fb_create = amdgpu_display_user_framebuffer_create,
1845 .output_poll_changed = drm_fb_helper_output_poll_changed,
1846 .atomic_check = amdgpu_dm_atomic_check,
1847 .atomic_commit = amdgpu_dm_atomic_commit,
1850 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1851 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1854 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1856 u32 max_cll, min_cll, max, min, q, r;
1857 struct amdgpu_dm_backlight_caps *caps;
1858 struct amdgpu_display_manager *dm;
1859 struct drm_connector *conn_base;
1860 struct amdgpu_device *adev;
1861 static const u8 pre_computed_values[] = {
1862 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1863 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1865 if (!aconnector || !aconnector->dc_link)
1868 conn_base = &aconnector->base;
1869 adev = conn_base->dev->dev_private;
1871 caps = &dm->backlight_caps;
1872 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1873 caps->aux_support = false;
1874 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1875 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1877 if (caps->ext_caps->bits.oled == 1 ||
1878 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1879 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1880 caps->aux_support = true;
1882 /* From the specification (CTA-861-G), for calculating the maximum
1883 * luminance we need to use:
1884 * Luminance = 50*2**(CV/32)
1885 * Where CV is a one-byte value.
1886 * For calculating this expression we may need float point precision;
1887 * to avoid this complexity level, we take advantage that CV is divided
1888 * by a constant. From the Euclids division algorithm, we know that CV
1889 * can be written as: CV = 32*q + r. Next, we replace CV in the
1890 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1891 * need to pre-compute the value of r/32. For pre-computing the values
1892 * We just used the following Ruby line:
1893 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1894 * The results of the above expressions can be verified at
1895 * pre_computed_values.
1899 max = (1 << q) * pre_computed_values[r];
1901 // min luminance: maxLum * (CV/255)^2 / 100
1902 q = DIV_ROUND_CLOSEST(min_cll, 255);
1903 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1905 caps->aux_max_input_signal = max;
1906 caps->aux_min_input_signal = min;
1909 void amdgpu_dm_update_connector_after_detect(
1910 struct amdgpu_dm_connector *aconnector)
1912 struct drm_connector *connector = &aconnector->base;
1913 struct drm_device *dev = connector->dev;
1914 struct dc_sink *sink;
1916 /* MST handled by drm_mst framework */
1917 if (aconnector->mst_mgr.mst_state == true)
1921 sink = aconnector->dc_link->local_sink;
1923 dc_sink_retain(sink);
1926 * Edid mgmt connector gets first update only in mode_valid hook and then
1927 * the connector sink is set to either fake or physical sink depends on link status.
1928 * Skip if already done during boot.
1930 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1931 && aconnector->dc_em_sink) {
1934 * For S3 resume with headless use eml_sink to fake stream
1935 * because on resume connector->sink is set to NULL
1937 mutex_lock(&dev->mode_config.mutex);
1940 if (aconnector->dc_sink) {
1941 amdgpu_dm_update_freesync_caps(connector, NULL);
1943 * retain and release below are used to
1944 * bump up refcount for sink because the link doesn't point
1945 * to it anymore after disconnect, so on next crtc to connector
1946 * reshuffle by UMD we will get into unwanted dc_sink release
1948 dc_sink_release(aconnector->dc_sink);
1950 aconnector->dc_sink = sink;
1951 dc_sink_retain(aconnector->dc_sink);
1952 amdgpu_dm_update_freesync_caps(connector,
1955 amdgpu_dm_update_freesync_caps(connector, NULL);
1956 if (!aconnector->dc_sink) {
1957 aconnector->dc_sink = aconnector->dc_em_sink;
1958 dc_sink_retain(aconnector->dc_sink);
1962 mutex_unlock(&dev->mode_config.mutex);
1965 dc_sink_release(sink);
1970 * TODO: temporary guard to look for proper fix
1971 * if this sink is MST sink, we should not do anything
1973 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1974 dc_sink_release(sink);
1978 if (aconnector->dc_sink == sink) {
1980 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1983 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1984 aconnector->connector_id);
1986 dc_sink_release(sink);
1990 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1991 aconnector->connector_id, aconnector->dc_sink, sink);
1993 mutex_lock(&dev->mode_config.mutex);
1996 * 1. Update status of the drm connector
1997 * 2. Send an event and let userspace tell us what to do
2001 * TODO: check if we still need the S3 mode update workaround.
2002 * If yes, put it here.
2004 if (aconnector->dc_sink)
2005 amdgpu_dm_update_freesync_caps(connector, NULL);
2007 aconnector->dc_sink = sink;
2008 dc_sink_retain(aconnector->dc_sink);
2009 if (sink->dc_edid.length == 0) {
2010 aconnector->edid = NULL;
2011 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2014 (struct edid *) sink->dc_edid.raw_edid;
2017 drm_connector_update_edid_property(connector,
2019 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2022 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2023 update_connector_ext_caps(aconnector);
2025 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2026 amdgpu_dm_update_freesync_caps(connector, NULL);
2027 drm_connector_update_edid_property(connector, NULL);
2028 aconnector->num_modes = 0;
2029 dc_sink_release(aconnector->dc_sink);
2030 aconnector->dc_sink = NULL;
2031 aconnector->edid = NULL;
2032 #ifdef CONFIG_DRM_AMD_DC_HDCP
2033 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2034 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2035 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2039 mutex_unlock(&dev->mode_config.mutex);
2042 dc_sink_release(sink);
2045 static void handle_hpd_irq(void *param)
2047 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2048 struct drm_connector *connector = &aconnector->base;
2049 struct drm_device *dev = connector->dev;
2050 enum dc_connection_type new_connection_type = dc_connection_none;
2051 #ifdef CONFIG_DRM_AMD_DC_HDCP
2052 struct amdgpu_device *adev = dev->dev_private;
2056 * In case of failure or MST no need to update connector status or notify the OS
2057 * since (for MST case) MST does this in its own context.
2059 mutex_lock(&aconnector->hpd_lock);
2061 #ifdef CONFIG_DRM_AMD_DC_HDCP
2062 if (adev->dm.hdcp_workqueue)
2063 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2065 if (aconnector->fake_enable)
2066 aconnector->fake_enable = false;
2068 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2069 DRM_ERROR("KMS: Failed to detect connector\n");
2071 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2072 emulated_link_detect(aconnector->dc_link);
2075 drm_modeset_lock_all(dev);
2076 dm_restore_drm_connector_state(dev, connector);
2077 drm_modeset_unlock_all(dev);
2079 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2080 drm_kms_helper_hotplug_event(dev);
2082 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2083 amdgpu_dm_update_connector_after_detect(aconnector);
2086 drm_modeset_lock_all(dev);
2087 dm_restore_drm_connector_state(dev, connector);
2088 drm_modeset_unlock_all(dev);
2090 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2091 drm_kms_helper_hotplug_event(dev);
2093 mutex_unlock(&aconnector->hpd_lock);
2097 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2099 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2101 bool new_irq_handled = false;
2103 int dpcd_bytes_to_read;
2105 const int max_process_count = 30;
2106 int process_count = 0;
2108 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2110 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2111 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2112 /* DPCD 0x200 - 0x201 for downstream IRQ */
2113 dpcd_addr = DP_SINK_COUNT;
2115 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2116 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2117 dpcd_addr = DP_SINK_COUNT_ESI;
2120 dret = drm_dp_dpcd_read(
2121 &aconnector->dm_dp_aux.aux,
2124 dpcd_bytes_to_read);
2126 while (dret == dpcd_bytes_to_read &&
2127 process_count < max_process_count) {
2133 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2134 /* handle HPD short pulse irq */
2135 if (aconnector->mst_mgr.mst_state)
2137 &aconnector->mst_mgr,
2141 if (new_irq_handled) {
2142 /* ACK at DPCD to notify down stream */
2143 const int ack_dpcd_bytes_to_write =
2144 dpcd_bytes_to_read - 1;
2146 for (retry = 0; retry < 3; retry++) {
2149 wret = drm_dp_dpcd_write(
2150 &aconnector->dm_dp_aux.aux,
2153 ack_dpcd_bytes_to_write);
2154 if (wret == ack_dpcd_bytes_to_write)
2158 /* check if there is new irq to be handled */
2159 dret = drm_dp_dpcd_read(
2160 &aconnector->dm_dp_aux.aux,
2163 dpcd_bytes_to_read);
2165 new_irq_handled = false;
2171 if (process_count == max_process_count)
2172 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2175 static void handle_hpd_rx_irq(void *param)
2177 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2178 struct drm_connector *connector = &aconnector->base;
2179 struct drm_device *dev = connector->dev;
2180 struct dc_link *dc_link = aconnector->dc_link;
2181 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2182 enum dc_connection_type new_connection_type = dc_connection_none;
2183 #ifdef CONFIG_DRM_AMD_DC_HDCP
2184 union hpd_irq_data hpd_irq_data;
2185 struct amdgpu_device *adev = dev->dev_private;
2187 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2191 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2192 * conflict, after implement i2c helper, this mutex should be
2195 if (dc_link->type != dc_connection_mst_branch)
2196 mutex_lock(&aconnector->hpd_lock);
2199 #ifdef CONFIG_DRM_AMD_DC_HDCP
2200 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2202 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2204 !is_mst_root_connector) {
2205 /* Downstream Port status changed. */
2206 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2207 DRM_ERROR("KMS: Failed to detect connector\n");
2209 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2210 emulated_link_detect(dc_link);
2212 if (aconnector->fake_enable)
2213 aconnector->fake_enable = false;
2215 amdgpu_dm_update_connector_after_detect(aconnector);
2218 drm_modeset_lock_all(dev);
2219 dm_restore_drm_connector_state(dev, connector);
2220 drm_modeset_unlock_all(dev);
2222 drm_kms_helper_hotplug_event(dev);
2223 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2225 if (aconnector->fake_enable)
2226 aconnector->fake_enable = false;
2228 amdgpu_dm_update_connector_after_detect(aconnector);
2231 drm_modeset_lock_all(dev);
2232 dm_restore_drm_connector_state(dev, connector);
2233 drm_modeset_unlock_all(dev);
2235 drm_kms_helper_hotplug_event(dev);
2238 #ifdef CONFIG_DRM_AMD_DC_HDCP
2239 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2240 if (adev->dm.hdcp_workqueue)
2241 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2244 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2245 (dc_link->type == dc_connection_mst_branch))
2246 dm_handle_hpd_rx_irq(aconnector);
2248 if (dc_link->type != dc_connection_mst_branch) {
2249 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2250 mutex_unlock(&aconnector->hpd_lock);
2254 static void register_hpd_handlers(struct amdgpu_device *adev)
2256 struct drm_device *dev = adev->ddev;
2257 struct drm_connector *connector;
2258 struct amdgpu_dm_connector *aconnector;
2259 const struct dc_link *dc_link;
2260 struct dc_interrupt_params int_params = {0};
2262 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2263 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2265 list_for_each_entry(connector,
2266 &dev->mode_config.connector_list, head) {
2268 aconnector = to_amdgpu_dm_connector(connector);
2269 dc_link = aconnector->dc_link;
2271 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2272 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2273 int_params.irq_source = dc_link->irq_source_hpd;
2275 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2277 (void *) aconnector);
2280 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2282 /* Also register for DP short pulse (hpd_rx). */
2283 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2284 int_params.irq_source = dc_link->irq_source_hpd_rx;
2286 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2288 (void *) aconnector);
2293 /* Register IRQ sources and initialize IRQ callbacks */
2294 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2296 struct dc *dc = adev->dm.dc;
2297 struct common_irq_params *c_irq_params;
2298 struct dc_interrupt_params int_params = {0};
2301 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2303 if (adev->asic_type >= CHIP_VEGA10)
2304 client_id = SOC15_IH_CLIENTID_DCE;
2306 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2307 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2310 * Actions of amdgpu_irq_add_id():
2311 * 1. Register a set() function with base driver.
2312 * Base driver will call set() function to enable/disable an
2313 * interrupt in DC hardware.
2314 * 2. Register amdgpu_dm_irq_handler().
2315 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2316 * coming from DC hardware.
2317 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2318 * for acknowledging and handling. */
2320 /* Use VBLANK interrupt */
2321 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2322 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2324 DRM_ERROR("Failed to add crtc irq id!\n");
2328 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2329 int_params.irq_source =
2330 dc_interrupt_to_irq_source(dc, i, 0);
2332 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2334 c_irq_params->adev = adev;
2335 c_irq_params->irq_src = int_params.irq_source;
2337 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2338 dm_crtc_high_irq, c_irq_params);
2341 /* Use VUPDATE interrupt */
2342 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2343 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2345 DRM_ERROR("Failed to add vupdate irq id!\n");
2349 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2350 int_params.irq_source =
2351 dc_interrupt_to_irq_source(dc, i, 0);
2353 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2355 c_irq_params->adev = adev;
2356 c_irq_params->irq_src = int_params.irq_source;
2358 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2359 dm_vupdate_high_irq, c_irq_params);
2362 /* Use GRPH_PFLIP interrupt */
2363 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2364 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2365 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2367 DRM_ERROR("Failed to add page flip irq id!\n");
2371 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2372 int_params.irq_source =
2373 dc_interrupt_to_irq_source(dc, i, 0);
2375 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2377 c_irq_params->adev = adev;
2378 c_irq_params->irq_src = int_params.irq_source;
2380 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2381 dm_pflip_high_irq, c_irq_params);
2386 r = amdgpu_irq_add_id(adev, client_id,
2387 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2389 DRM_ERROR("Failed to add hpd irq id!\n");
2393 register_hpd_handlers(adev);
2398 #if defined(CONFIG_DRM_AMD_DC_DCN)
2399 /* Register IRQ sources and initialize IRQ callbacks */
2400 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2402 struct dc *dc = adev->dm.dc;
2403 struct common_irq_params *c_irq_params;
2404 struct dc_interrupt_params int_params = {0};
2408 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2409 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2412 * Actions of amdgpu_irq_add_id():
2413 * 1. Register a set() function with base driver.
2414 * Base driver will call set() function to enable/disable an
2415 * interrupt in DC hardware.
2416 * 2. Register amdgpu_dm_irq_handler().
2417 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2418 * coming from DC hardware.
2419 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2420 * for acknowledging and handling.
2423 /* Use VSTARTUP interrupt */
2424 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2425 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2427 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2430 DRM_ERROR("Failed to add crtc irq id!\n");
2434 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2435 int_params.irq_source =
2436 dc_interrupt_to_irq_source(dc, i, 0);
2438 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2440 c_irq_params->adev = adev;
2441 c_irq_params->irq_src = int_params.irq_source;
2443 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444 dm_dcn_crtc_high_irq, c_irq_params);
2447 /* Use GRPH_PFLIP interrupt */
2448 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2449 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2451 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2453 DRM_ERROR("Failed to add page flip irq id!\n");
2457 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2458 int_params.irq_source =
2459 dc_interrupt_to_irq_source(dc, i, 0);
2461 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2463 c_irq_params->adev = adev;
2464 c_irq_params->irq_src = int_params.irq_source;
2466 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467 dm_pflip_high_irq, c_irq_params);
2472 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2475 DRM_ERROR("Failed to add hpd irq id!\n");
2479 register_hpd_handlers(adev);
2486 * Acquires the lock for the atomic state object and returns
2487 * the new atomic state.
2489 * This should only be called during atomic check.
2491 static int dm_atomic_get_state(struct drm_atomic_state *state,
2492 struct dm_atomic_state **dm_state)
2494 struct drm_device *dev = state->dev;
2495 struct amdgpu_device *adev = dev->dev_private;
2496 struct amdgpu_display_manager *dm = &adev->dm;
2497 struct drm_private_state *priv_state;
2502 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2503 if (IS_ERR(priv_state))
2504 return PTR_ERR(priv_state);
2506 *dm_state = to_dm_atomic_state(priv_state);
2511 struct dm_atomic_state *
2512 dm_atomic_get_new_state(struct drm_atomic_state *state)
2514 struct drm_device *dev = state->dev;
2515 struct amdgpu_device *adev = dev->dev_private;
2516 struct amdgpu_display_manager *dm = &adev->dm;
2517 struct drm_private_obj *obj;
2518 struct drm_private_state *new_obj_state;
2521 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2522 if (obj->funcs == dm->atomic_obj.funcs)
2523 return to_dm_atomic_state(new_obj_state);
2529 struct dm_atomic_state *
2530 dm_atomic_get_old_state(struct drm_atomic_state *state)
2532 struct drm_device *dev = state->dev;
2533 struct amdgpu_device *adev = dev->dev_private;
2534 struct amdgpu_display_manager *dm = &adev->dm;
2535 struct drm_private_obj *obj;
2536 struct drm_private_state *old_obj_state;
2539 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2540 if (obj->funcs == dm->atomic_obj.funcs)
2541 return to_dm_atomic_state(old_obj_state);
2547 static struct drm_private_state *
2548 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2550 struct dm_atomic_state *old_state, *new_state;
2552 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2556 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2558 old_state = to_dm_atomic_state(obj->state);
2560 if (old_state && old_state->context)
2561 new_state->context = dc_copy_state(old_state->context);
2563 if (!new_state->context) {
2568 return &new_state->base;
2571 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2572 struct drm_private_state *state)
2574 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2576 if (dm_state && dm_state->context)
2577 dc_release_state(dm_state->context);
2582 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2583 .atomic_duplicate_state = dm_atomic_duplicate_state,
2584 .atomic_destroy_state = dm_atomic_destroy_state,
2587 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2589 struct dm_atomic_state *state;
2592 adev->mode_info.mode_config_initialized = true;
2594 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2595 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2597 adev->ddev->mode_config.max_width = 16384;
2598 adev->ddev->mode_config.max_height = 16384;
2600 adev->ddev->mode_config.preferred_depth = 24;
2601 adev->ddev->mode_config.prefer_shadow = 1;
2602 /* indicates support for immediate flip */
2603 adev->ddev->mode_config.async_page_flip = true;
2605 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2607 state = kzalloc(sizeof(*state), GFP_KERNEL);
2611 state->context = dc_create_state(adev->dm.dc);
2612 if (!state->context) {
2617 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2619 drm_atomic_private_obj_init(adev->ddev,
2620 &adev->dm.atomic_obj,
2622 &dm_atomic_state_funcs);
2624 r = amdgpu_display_modeset_create_props(adev);
2628 r = amdgpu_dm_audio_init(adev);
2635 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2636 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2637 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2640 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2642 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2644 #if defined(CONFIG_ACPI)
2645 struct amdgpu_dm_backlight_caps caps;
2647 if (dm->backlight_caps.caps_valid)
2650 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2651 if (caps.caps_valid) {
2652 dm->backlight_caps.caps_valid = true;
2653 if (caps.aux_support)
2655 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2656 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2658 dm->backlight_caps.min_input_signal =
2659 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2660 dm->backlight_caps.max_input_signal =
2661 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2664 if (dm->backlight_caps.aux_support)
2667 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2668 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2672 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2679 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2680 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2685 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2686 const uint32_t user_brightness)
2688 u32 min, max, conversion_pace;
2689 u32 brightness = user_brightness;
2694 if (!caps->aux_support) {
2695 max = caps->max_input_signal;
2696 min = caps->min_input_signal;
2698 * The brightness input is in the range 0-255
2699 * It needs to be rescaled to be between the
2700 * requested min and max input signal
2701 * It also needs to be scaled up by 0x101 to
2702 * match the DC interface which has a range of
2705 conversion_pace = 0x101;
2710 / AMDGPU_MAX_BL_LEVEL
2711 + min * conversion_pace;
2714 * We are doing a linear interpolation here, which is OK but
2715 * does not provide the optimal result. We probably want
2716 * something close to the Perceptual Quantizer (PQ) curve.
2718 max = caps->aux_max_input_signal;
2719 min = caps->aux_min_input_signal;
2721 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2722 + user_brightness * max;
2723 // Multiple the value by 1000 since we use millinits
2725 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2732 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2734 struct amdgpu_display_manager *dm = bl_get_data(bd);
2735 struct amdgpu_dm_backlight_caps caps;
2736 struct dc_link *link = NULL;
2740 amdgpu_dm_update_backlight_caps(dm);
2741 caps = dm->backlight_caps;
2743 link = (struct dc_link *)dm->backlight_link;
2745 brightness = convert_brightness(&caps, bd->props.brightness);
2746 // Change brightness based on AUX property
2747 if (caps.aux_support)
2748 return set_backlight_via_aux(link, brightness);
2750 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2755 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2757 struct amdgpu_display_manager *dm = bl_get_data(bd);
2758 int ret = dc_link_get_backlight_level(dm->backlight_link);
2760 if (ret == DC_ERROR_UNEXPECTED)
2761 return bd->props.brightness;
2765 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2766 .options = BL_CORE_SUSPENDRESUME,
2767 .get_brightness = amdgpu_dm_backlight_get_brightness,
2768 .update_status = amdgpu_dm_backlight_update_status,
2772 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2775 struct backlight_properties props = { 0 };
2777 amdgpu_dm_update_backlight_caps(dm);
2779 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2780 props.brightness = AMDGPU_MAX_BL_LEVEL;
2781 props.type = BACKLIGHT_RAW;
2783 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2784 dm->adev->ddev->primary->index);
2786 dm->backlight_dev = backlight_device_register(bl_name,
2787 dm->adev->ddev->dev,
2789 &amdgpu_dm_backlight_ops,
2792 if (IS_ERR(dm->backlight_dev))
2793 DRM_ERROR("DM: Backlight registration failed!\n");
2795 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2800 static int initialize_plane(struct amdgpu_display_manager *dm,
2801 struct amdgpu_mode_info *mode_info, int plane_id,
2802 enum drm_plane_type plane_type,
2803 const struct dc_plane_cap *plane_cap)
2805 struct drm_plane *plane;
2806 unsigned long possible_crtcs;
2809 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2811 DRM_ERROR("KMS: Failed to allocate plane\n");
2814 plane->type = plane_type;
2817 * HACK: IGT tests expect that the primary plane for a CRTC
2818 * can only have one possible CRTC. Only expose support for
2819 * any CRTC if they're not going to be used as a primary plane
2820 * for a CRTC - like overlay or underlay planes.
2822 possible_crtcs = 1 << plane_id;
2823 if (plane_id >= dm->dc->caps.max_streams)
2824 possible_crtcs = 0xff;
2826 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2829 DRM_ERROR("KMS: Failed to initialize plane\n");
2835 mode_info->planes[plane_id] = plane;
2841 static void register_backlight_device(struct amdgpu_display_manager *dm,
2842 struct dc_link *link)
2844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2845 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2847 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2848 link->type != dc_connection_none) {
2850 * Event if registration failed, we should continue with
2851 * DM initialization because not having a backlight control
2852 * is better then a black screen.
2854 amdgpu_dm_register_backlight_device(dm);
2856 if (dm->backlight_dev)
2857 dm->backlight_link = link;
2864 * In this architecture, the association
2865 * connector -> encoder -> crtc
2866 * id not really requried. The crtc and connector will hold the
2867 * display_index as an abstraction to use with DAL component
2869 * Returns 0 on success
2871 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2873 struct amdgpu_display_manager *dm = &adev->dm;
2875 struct amdgpu_dm_connector *aconnector = NULL;
2876 struct amdgpu_encoder *aencoder = NULL;
2877 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2879 int32_t primary_planes;
2880 enum dc_connection_type new_connection_type = dc_connection_none;
2881 const struct dc_plane_cap *plane;
2883 link_cnt = dm->dc->caps.max_links;
2884 if (amdgpu_dm_mode_config_init(dm->adev)) {
2885 DRM_ERROR("DM: Failed to initialize mode config\n");
2889 /* There is one primary plane per CRTC */
2890 primary_planes = dm->dc->caps.max_streams;
2891 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2894 * Initialize primary planes, implicit planes for legacy IOCTLS.
2895 * Order is reversed to match iteration order in atomic check.
2897 for (i = (primary_planes - 1); i >= 0; i--) {
2898 plane = &dm->dc->caps.planes[i];
2900 if (initialize_plane(dm, mode_info, i,
2901 DRM_PLANE_TYPE_PRIMARY, plane)) {
2902 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2908 * Initialize overlay planes, index starting after primary planes.
2909 * These planes have a higher DRM index than the primary planes since
2910 * they should be considered as having a higher z-order.
2911 * Order is reversed to match iteration order in atomic check.
2913 * Only support DCN for now, and only expose one so we don't encourage
2914 * userspace to use up all the pipes.
2916 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2917 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2919 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2922 if (!plane->blends_with_above || !plane->blends_with_below)
2925 if (!plane->pixel_format_support.argb8888)
2928 if (initialize_plane(dm, NULL, primary_planes + i,
2929 DRM_PLANE_TYPE_OVERLAY, plane)) {
2930 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2934 /* Only create one overlay plane. */
2938 for (i = 0; i < dm->dc->caps.max_streams; i++)
2939 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2940 DRM_ERROR("KMS: Failed to initialize crtc\n");
2944 dm->display_indexes_num = dm->dc->caps.max_streams;
2946 /* loops over all connectors on the board */
2947 for (i = 0; i < link_cnt; i++) {
2948 struct dc_link *link = NULL;
2950 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2952 "KMS: Cannot support more than %d display indexes\n",
2953 AMDGPU_DM_MAX_DISPLAY_INDEX);
2957 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2961 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2965 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2966 DRM_ERROR("KMS: Failed to initialize encoder\n");
2970 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2971 DRM_ERROR("KMS: Failed to initialize connector\n");
2975 link = dc_get_link_at_index(dm->dc, i);
2977 if (!dc_link_detect_sink(link, &new_connection_type))
2978 DRM_ERROR("KMS: Failed to detect connector\n");
2980 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2981 emulated_link_detect(link);
2982 amdgpu_dm_update_connector_after_detect(aconnector);
2984 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2985 amdgpu_dm_update_connector_after_detect(aconnector);
2986 register_backlight_device(dm, link);
2987 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2988 amdgpu_dm_set_psr_caps(link);
2994 /* Software is initialized. Now we can register interrupt handlers. */
2995 switch (adev->asic_type) {
3005 case CHIP_POLARIS11:
3006 case CHIP_POLARIS10:
3007 case CHIP_POLARIS12:
3012 if (dce110_register_irq_handlers(dm->adev)) {
3013 DRM_ERROR("DM: Failed to initialize IRQ\n");
3017 #if defined(CONFIG_DRM_AMD_DC_DCN)
3023 if (dcn10_register_irq_handlers(dm->adev)) {
3024 DRM_ERROR("DM: Failed to initialize IRQ\n");
3030 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3034 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3035 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3037 /* No userspace support. */
3038 dm->dc->debug.disable_tri_buf = true;
3048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3050 drm_mode_config_cleanup(dm->ddev);
3051 drm_atomic_private_obj_fini(&dm->atomic_obj);
3055 /******************************************************************************
3056 * amdgpu_display_funcs functions
3057 *****************************************************************************/
3060 * dm_bandwidth_update - program display watermarks
3062 * @adev: amdgpu_device pointer
3064 * Calculate and program the display watermarks and line buffer allocation.
3066 static void dm_bandwidth_update(struct amdgpu_device *adev)
3068 /* TODO: implement later */
3071 static const struct amdgpu_display_funcs dm_display_funcs = {
3072 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3073 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3074 .backlight_set_level = NULL, /* never called for DC */
3075 .backlight_get_level = NULL, /* never called for DC */
3076 .hpd_sense = NULL,/* called unconditionally */
3077 .hpd_set_polarity = NULL, /* called unconditionally */
3078 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3079 .page_flip_get_scanoutpos =
3080 dm_crtc_get_scanoutpos,/* called unconditionally */
3081 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3082 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3085 #if defined(CONFIG_DEBUG_KERNEL_DC)
3087 static ssize_t s3_debug_store(struct device *device,
3088 struct device_attribute *attr,
3094 struct drm_device *drm_dev = dev_get_drvdata(device);
3095 struct amdgpu_device *adev = drm_dev->dev_private;
3097 ret = kstrtoint(buf, 0, &s3_state);
3102 drm_kms_helper_hotplug_event(adev->ddev);
3107 return ret == 0 ? count : 0;
3110 DEVICE_ATTR_WO(s3_debug);
3114 static int dm_early_init(void *handle)
3116 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3118 switch (adev->asic_type) {
3121 adev->mode_info.num_crtc = 6;
3122 adev->mode_info.num_hpd = 6;
3123 adev->mode_info.num_dig = 6;
3126 adev->mode_info.num_crtc = 4;
3127 adev->mode_info.num_hpd = 6;
3128 adev->mode_info.num_dig = 7;
3132 adev->mode_info.num_crtc = 2;
3133 adev->mode_info.num_hpd = 6;
3134 adev->mode_info.num_dig = 6;
3138 adev->mode_info.num_crtc = 6;
3139 adev->mode_info.num_hpd = 6;
3140 adev->mode_info.num_dig = 7;
3143 adev->mode_info.num_crtc = 3;
3144 adev->mode_info.num_hpd = 6;
3145 adev->mode_info.num_dig = 9;
3148 adev->mode_info.num_crtc = 2;
3149 adev->mode_info.num_hpd = 6;
3150 adev->mode_info.num_dig = 9;
3152 case CHIP_POLARIS11:
3153 case CHIP_POLARIS12:
3154 adev->mode_info.num_crtc = 5;
3155 adev->mode_info.num_hpd = 5;
3156 adev->mode_info.num_dig = 5;
3158 case CHIP_POLARIS10:
3160 adev->mode_info.num_crtc = 6;
3161 adev->mode_info.num_hpd = 6;
3162 adev->mode_info.num_dig = 6;
3167 adev->mode_info.num_crtc = 6;
3168 adev->mode_info.num_hpd = 6;
3169 adev->mode_info.num_dig = 6;
3171 #if defined(CONFIG_DRM_AMD_DC_DCN)
3173 adev->mode_info.num_crtc = 4;
3174 adev->mode_info.num_hpd = 4;
3175 adev->mode_info.num_dig = 4;
3180 adev->mode_info.num_crtc = 6;
3181 adev->mode_info.num_hpd = 6;
3182 adev->mode_info.num_dig = 6;
3185 adev->mode_info.num_crtc = 5;
3186 adev->mode_info.num_hpd = 5;
3187 adev->mode_info.num_dig = 5;
3190 adev->mode_info.num_crtc = 4;
3191 adev->mode_info.num_hpd = 4;
3192 adev->mode_info.num_dig = 4;
3195 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3199 amdgpu_dm_set_irq_funcs(adev);
3201 if (adev->mode_info.funcs == NULL)
3202 adev->mode_info.funcs = &dm_display_funcs;
3205 * Note: Do NOT change adev->audio_endpt_rreg and
3206 * adev->audio_endpt_wreg because they are initialised in
3207 * amdgpu_device_init()
3209 #if defined(CONFIG_DEBUG_KERNEL_DC)
3212 &dev_attr_s3_debug);
3218 static bool modeset_required(struct drm_crtc_state *crtc_state,
3219 struct dc_stream_state *new_stream,
3220 struct dc_stream_state *old_stream)
3222 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3225 if (!crtc_state->enable)
3228 return crtc_state->active;
3231 static bool modereset_required(struct drm_crtc_state *crtc_state)
3233 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3236 return !crtc_state->enable || !crtc_state->active;
3239 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3241 drm_encoder_cleanup(encoder);
3245 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3246 .destroy = amdgpu_dm_encoder_destroy,
3250 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3251 struct dc_scaling_info *scaling_info)
3253 int scale_w, scale_h;
3255 memset(scaling_info, 0, sizeof(*scaling_info));
3257 /* Source is fixed 16.16 but we ignore mantissa for now... */
3258 scaling_info->src_rect.x = state->src_x >> 16;
3259 scaling_info->src_rect.y = state->src_y >> 16;
3261 scaling_info->src_rect.width = state->src_w >> 16;
3262 if (scaling_info->src_rect.width == 0)
3265 scaling_info->src_rect.height = state->src_h >> 16;
3266 if (scaling_info->src_rect.height == 0)
3269 scaling_info->dst_rect.x = state->crtc_x;
3270 scaling_info->dst_rect.y = state->crtc_y;
3272 if (state->crtc_w == 0)
3275 scaling_info->dst_rect.width = state->crtc_w;
3277 if (state->crtc_h == 0)
3280 scaling_info->dst_rect.height = state->crtc_h;
3282 /* DRM doesn't specify clipping on destination output. */
3283 scaling_info->clip_rect = scaling_info->dst_rect;
3285 /* TODO: Validate scaling per-format with DC plane caps */
3286 scale_w = scaling_info->dst_rect.width * 1000 /
3287 scaling_info->src_rect.width;
3289 if (scale_w < 250 || scale_w > 16000)
3292 scale_h = scaling_info->dst_rect.height * 1000 /
3293 scaling_info->src_rect.height;
3295 if (scale_h < 250 || scale_h > 16000)
3299 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3300 * assume reasonable defaults based on the format.
3306 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3307 uint64_t *tiling_flags)
3309 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3310 int r = amdgpu_bo_reserve(rbo, false);
3313 /* Don't show error message when returning -ERESTARTSYS */
3314 if (r != -ERESTARTSYS)
3315 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3320 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3322 amdgpu_bo_unreserve(rbo);
3327 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3329 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3331 return offset ? (address + offset * 256) : 0;
3335 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3336 const struct amdgpu_framebuffer *afb,
3337 const enum surface_pixel_format format,
3338 const enum dc_rotation_angle rotation,
3339 const struct plane_size *plane_size,
3340 const union dc_tiling_info *tiling_info,
3341 const uint64_t info,
3342 struct dc_plane_dcc_param *dcc,
3343 struct dc_plane_address *address)
3345 struct dc *dc = adev->dm.dc;
3346 struct dc_dcc_surface_param input;
3347 struct dc_surface_dcc_cap output;
3348 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3349 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3350 uint64_t dcc_address;
3352 memset(&input, 0, sizeof(input));
3353 memset(&output, 0, sizeof(output));
3358 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3361 if (!dc->cap_funcs.get_dcc_compression_cap)
3364 input.format = format;
3365 input.surface_size.width = plane_size->surface_size.width;
3366 input.surface_size.height = plane_size->surface_size.height;
3367 input.swizzle_mode = tiling_info->gfx9.swizzle;
3369 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3370 input.scan = SCAN_DIRECTION_HORIZONTAL;
3371 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3372 input.scan = SCAN_DIRECTION_VERTICAL;
3374 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3377 if (!output.capable)
3380 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3385 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3386 dcc->independent_64b_blks = i64b;
3388 dcc_address = get_dcc_address(afb->address, info);
3389 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3390 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3396 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3397 const struct amdgpu_framebuffer *afb,
3398 const enum surface_pixel_format format,
3399 const enum dc_rotation_angle rotation,
3400 const uint64_t tiling_flags,
3401 union dc_tiling_info *tiling_info,
3402 struct plane_size *plane_size,
3403 struct dc_plane_dcc_param *dcc,
3404 struct dc_plane_address *address)
3406 const struct drm_framebuffer *fb = &afb->base;
3409 memset(tiling_info, 0, sizeof(*tiling_info));
3410 memset(plane_size, 0, sizeof(*plane_size));
3411 memset(dcc, 0, sizeof(*dcc));
3412 memset(address, 0, sizeof(*address));
3414 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3415 plane_size->surface_size.x = 0;
3416 plane_size->surface_size.y = 0;
3417 plane_size->surface_size.width = fb->width;
3418 plane_size->surface_size.height = fb->height;
3419 plane_size->surface_pitch =
3420 fb->pitches[0] / fb->format->cpp[0];
3422 address->type = PLN_ADDR_TYPE_GRAPHICS;
3423 address->grph.addr.low_part = lower_32_bits(afb->address);
3424 address->grph.addr.high_part = upper_32_bits(afb->address);
3425 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3426 uint64_t chroma_addr = afb->address + fb->offsets[1];
3428 plane_size->surface_size.x = 0;
3429 plane_size->surface_size.y = 0;
3430 plane_size->surface_size.width = fb->width;
3431 plane_size->surface_size.height = fb->height;
3432 plane_size->surface_pitch =
3433 fb->pitches[0] / fb->format->cpp[0];
3435 plane_size->chroma_size.x = 0;
3436 plane_size->chroma_size.y = 0;
3437 /* TODO: set these based on surface format */
3438 plane_size->chroma_size.width = fb->width / 2;
3439 plane_size->chroma_size.height = fb->height / 2;
3441 plane_size->chroma_pitch =
3442 fb->pitches[1] / fb->format->cpp[1];
3444 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3445 address->video_progressive.luma_addr.low_part =
3446 lower_32_bits(afb->address);
3447 address->video_progressive.luma_addr.high_part =
3448 upper_32_bits(afb->address);
3449 address->video_progressive.chroma_addr.low_part =
3450 lower_32_bits(chroma_addr);
3451 address->video_progressive.chroma_addr.high_part =
3452 upper_32_bits(chroma_addr);
3455 /* Fill GFX8 params */
3456 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3457 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3459 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3460 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3461 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3462 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3463 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3465 /* XXX fix me for VI */
3466 tiling_info->gfx8.num_banks = num_banks;
3467 tiling_info->gfx8.array_mode =
3468 DC_ARRAY_2D_TILED_THIN1;
3469 tiling_info->gfx8.tile_split = tile_split;
3470 tiling_info->gfx8.bank_width = bankw;
3471 tiling_info->gfx8.bank_height = bankh;
3472 tiling_info->gfx8.tile_aspect = mtaspect;
3473 tiling_info->gfx8.tile_mode =
3474 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3475 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3476 == DC_ARRAY_1D_TILED_THIN1) {
3477 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3480 tiling_info->gfx8.pipe_config =
3481 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3483 if (adev->asic_type == CHIP_VEGA10 ||
3484 adev->asic_type == CHIP_VEGA12 ||
3485 adev->asic_type == CHIP_VEGA20 ||
3486 adev->asic_type == CHIP_NAVI10 ||
3487 adev->asic_type == CHIP_NAVI14 ||
3488 adev->asic_type == CHIP_NAVI12 ||
3489 adev->asic_type == CHIP_RENOIR ||
3490 adev->asic_type == CHIP_RAVEN) {
3491 /* Fill GFX9 params */
3492 tiling_info->gfx9.num_pipes =
3493 adev->gfx.config.gb_addr_config_fields.num_pipes;
3494 tiling_info->gfx9.num_banks =
3495 adev->gfx.config.gb_addr_config_fields.num_banks;
3496 tiling_info->gfx9.pipe_interleave =
3497 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3498 tiling_info->gfx9.num_shader_engines =
3499 adev->gfx.config.gb_addr_config_fields.num_se;
3500 tiling_info->gfx9.max_compressed_frags =
3501 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3502 tiling_info->gfx9.num_rb_per_se =
3503 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3504 tiling_info->gfx9.swizzle =
3505 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3506 tiling_info->gfx9.shaderEnable = 1;
3508 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3509 plane_size, tiling_info,
3510 tiling_flags, dcc, address);
3519 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3520 bool *per_pixel_alpha, bool *global_alpha,
3521 int *global_alpha_value)
3523 *per_pixel_alpha = false;
3524 *global_alpha = false;
3525 *global_alpha_value = 0xff;
3527 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3530 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3531 static const uint32_t alpha_formats[] = {
3532 DRM_FORMAT_ARGB8888,
3533 DRM_FORMAT_RGBA8888,
3534 DRM_FORMAT_ABGR8888,
3536 uint32_t format = plane_state->fb->format->format;
3539 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3540 if (format == alpha_formats[i]) {
3541 *per_pixel_alpha = true;
3547 if (plane_state->alpha < 0xffff) {
3548 *global_alpha = true;
3549 *global_alpha_value = plane_state->alpha >> 8;
3554 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3555 const enum surface_pixel_format format,
3556 enum dc_color_space *color_space)
3560 *color_space = COLOR_SPACE_SRGB;
3562 /* DRM color properties only affect non-RGB formats. */
3563 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3566 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3568 switch (plane_state->color_encoding) {
3569 case DRM_COLOR_YCBCR_BT601:
3571 *color_space = COLOR_SPACE_YCBCR601;
3573 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3576 case DRM_COLOR_YCBCR_BT709:
3578 *color_space = COLOR_SPACE_YCBCR709;
3580 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3583 case DRM_COLOR_YCBCR_BT2020:
3585 *color_space = COLOR_SPACE_2020_YCBCR;
3598 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3599 const struct drm_plane_state *plane_state,
3600 const uint64_t tiling_flags,
3601 struct dc_plane_info *plane_info,
3602 struct dc_plane_address *address)
3604 const struct drm_framebuffer *fb = plane_state->fb;
3605 const struct amdgpu_framebuffer *afb =
3606 to_amdgpu_framebuffer(plane_state->fb);
3607 struct drm_format_name_buf format_name;
3610 memset(plane_info, 0, sizeof(*plane_info));
3612 switch (fb->format->format) {
3614 plane_info->format =
3615 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3617 case DRM_FORMAT_RGB565:
3618 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3620 case DRM_FORMAT_XRGB8888:
3621 case DRM_FORMAT_ARGB8888:
3622 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3624 case DRM_FORMAT_XRGB2101010:
3625 case DRM_FORMAT_ARGB2101010:
3626 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3628 case DRM_FORMAT_XBGR2101010:
3629 case DRM_FORMAT_ABGR2101010:
3630 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3632 case DRM_FORMAT_XBGR8888:
3633 case DRM_FORMAT_ABGR8888:
3634 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3636 case DRM_FORMAT_NV21:
3637 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3639 case DRM_FORMAT_NV12:
3640 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3642 case DRM_FORMAT_P010:
3643 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3647 "Unsupported screen format %s\n",
3648 drm_get_format_name(fb->format->format, &format_name));
3652 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3653 case DRM_MODE_ROTATE_0:
3654 plane_info->rotation = ROTATION_ANGLE_0;
3656 case DRM_MODE_ROTATE_90:
3657 plane_info->rotation = ROTATION_ANGLE_90;
3659 case DRM_MODE_ROTATE_180:
3660 plane_info->rotation = ROTATION_ANGLE_180;
3662 case DRM_MODE_ROTATE_270:
3663 plane_info->rotation = ROTATION_ANGLE_270;
3666 plane_info->rotation = ROTATION_ANGLE_0;
3670 plane_info->visible = true;
3671 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3673 plane_info->layer_index = 0;
3675 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3676 &plane_info->color_space);
3680 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3681 plane_info->rotation, tiling_flags,
3682 &plane_info->tiling_info,
3683 &plane_info->plane_size,
3684 &plane_info->dcc, address);
3688 fill_blending_from_plane_state(
3689 plane_state, &plane_info->per_pixel_alpha,
3690 &plane_info->global_alpha, &plane_info->global_alpha_value);
3695 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3696 struct dc_plane_state *dc_plane_state,
3697 struct drm_plane_state *plane_state,
3698 struct drm_crtc_state *crtc_state)
3700 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3701 const struct amdgpu_framebuffer *amdgpu_fb =
3702 to_amdgpu_framebuffer(plane_state->fb);
3703 struct dc_scaling_info scaling_info;
3704 struct dc_plane_info plane_info;
3705 uint64_t tiling_flags;
3708 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3712 dc_plane_state->src_rect = scaling_info.src_rect;
3713 dc_plane_state->dst_rect = scaling_info.dst_rect;
3714 dc_plane_state->clip_rect = scaling_info.clip_rect;
3715 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3717 ret = get_fb_info(amdgpu_fb, &tiling_flags);
3721 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3723 &dc_plane_state->address);
3727 dc_plane_state->format = plane_info.format;
3728 dc_plane_state->color_space = plane_info.color_space;
3729 dc_plane_state->format = plane_info.format;
3730 dc_plane_state->plane_size = plane_info.plane_size;
3731 dc_plane_state->rotation = plane_info.rotation;
3732 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3733 dc_plane_state->stereo_format = plane_info.stereo_format;
3734 dc_plane_state->tiling_info = plane_info.tiling_info;
3735 dc_plane_state->visible = plane_info.visible;
3736 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3737 dc_plane_state->global_alpha = plane_info.global_alpha;
3738 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3739 dc_plane_state->dcc = plane_info.dcc;
3740 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3743 * Always set input transfer function, since plane state is refreshed
3746 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3753 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3754 const struct dm_connector_state *dm_state,
3755 struct dc_stream_state *stream)
3757 enum amdgpu_rmx_type rmx_type;
3759 struct rect src = { 0 }; /* viewport in composition space*/
3760 struct rect dst = { 0 }; /* stream addressable area */
3762 /* no mode. nothing to be done */
3766 /* Full screen scaling by default */
3767 src.width = mode->hdisplay;
3768 src.height = mode->vdisplay;
3769 dst.width = stream->timing.h_addressable;
3770 dst.height = stream->timing.v_addressable;
3773 rmx_type = dm_state->scaling;
3774 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3775 if (src.width * dst.height <
3776 src.height * dst.width) {
3777 /* height needs less upscaling/more downscaling */
3778 dst.width = src.width *
3779 dst.height / src.height;
3781 /* width needs less upscaling/more downscaling */
3782 dst.height = src.height *
3783 dst.width / src.width;
3785 } else if (rmx_type == RMX_CENTER) {
3789 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3790 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3792 if (dm_state->underscan_enable) {
3793 dst.x += dm_state->underscan_hborder / 2;
3794 dst.y += dm_state->underscan_vborder / 2;
3795 dst.width -= dm_state->underscan_hborder;
3796 dst.height -= dm_state->underscan_vborder;
3803 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3804 dst.x, dst.y, dst.width, dst.height);
3808 static enum dc_color_depth
3809 convert_color_depth_from_display_info(const struct drm_connector *connector,
3810 const struct drm_connector_state *state,
3818 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3819 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3821 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3823 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3826 bpc = (uint8_t)connector->display_info.bpc;
3827 /* Assume 8 bpc by default if no bpc is specified. */
3828 bpc = bpc ? bpc : 8;
3832 state = connector->state;
3836 * Cap display bpc based on the user requested value.
3838 * The value for state->max_bpc may not correctly updated
3839 * depending on when the connector gets added to the state
3840 * or if this was called outside of atomic check, so it
3841 * can't be used directly.
3843 bpc = min(bpc, state->max_requested_bpc);
3845 /* Round down to the nearest even number. */
3846 bpc = bpc - (bpc & 1);
3852 * Temporary Work around, DRM doesn't parse color depth for
3853 * EDID revision before 1.4
3854 * TODO: Fix edid parsing
3856 return COLOR_DEPTH_888;
3858 return COLOR_DEPTH_666;
3860 return COLOR_DEPTH_888;
3862 return COLOR_DEPTH_101010;
3864 return COLOR_DEPTH_121212;
3866 return COLOR_DEPTH_141414;
3868 return COLOR_DEPTH_161616;
3870 return COLOR_DEPTH_UNDEFINED;
3874 static enum dc_aspect_ratio
3875 get_aspect_ratio(const struct drm_display_mode *mode_in)
3877 /* 1-1 mapping, since both enums follow the HDMI spec. */
3878 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3881 static enum dc_color_space
3882 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3884 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3886 switch (dc_crtc_timing->pixel_encoding) {
3887 case PIXEL_ENCODING_YCBCR422:
3888 case PIXEL_ENCODING_YCBCR444:
3889 case PIXEL_ENCODING_YCBCR420:
3892 * 27030khz is the separation point between HDTV and SDTV
3893 * according to HDMI spec, we use YCbCr709 and YCbCr601
3896 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3897 if (dc_crtc_timing->flags.Y_ONLY)
3899 COLOR_SPACE_YCBCR709_LIMITED;
3901 color_space = COLOR_SPACE_YCBCR709;
3903 if (dc_crtc_timing->flags.Y_ONLY)
3905 COLOR_SPACE_YCBCR601_LIMITED;
3907 color_space = COLOR_SPACE_YCBCR601;
3912 case PIXEL_ENCODING_RGB:
3913 color_space = COLOR_SPACE_SRGB;
3924 static bool adjust_colour_depth_from_display_info(
3925 struct dc_crtc_timing *timing_out,
3926 const struct drm_display_info *info)
3928 enum dc_color_depth depth = timing_out->display_color_depth;
3931 normalized_clk = timing_out->pix_clk_100hz / 10;
3932 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3933 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3934 normalized_clk /= 2;
3935 /* Adjusting pix clock following on HDMI spec based on colour depth */
3937 case COLOR_DEPTH_888:
3939 case COLOR_DEPTH_101010:
3940 normalized_clk = (normalized_clk * 30) / 24;
3942 case COLOR_DEPTH_121212:
3943 normalized_clk = (normalized_clk * 36) / 24;
3945 case COLOR_DEPTH_161616:
3946 normalized_clk = (normalized_clk * 48) / 24;
3949 /* The above depths are the only ones valid for HDMI. */
3952 if (normalized_clk <= info->max_tmds_clock) {
3953 timing_out->display_color_depth = depth;
3956 } while (--depth > COLOR_DEPTH_666);
3960 static void fill_stream_properties_from_drm_display_mode(
3961 struct dc_stream_state *stream,
3962 const struct drm_display_mode *mode_in,
3963 const struct drm_connector *connector,
3964 const struct drm_connector_state *connector_state,
3965 const struct dc_stream_state *old_stream)
3967 struct dc_crtc_timing *timing_out = &stream->timing;
3968 const struct drm_display_info *info = &connector->display_info;
3969 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3970 struct hdmi_vendor_infoframe hv_frame;
3971 struct hdmi_avi_infoframe avi_frame;
3973 memset(&hv_frame, 0, sizeof(hv_frame));
3974 memset(&avi_frame, 0, sizeof(avi_frame));
3976 timing_out->h_border_left = 0;
3977 timing_out->h_border_right = 0;
3978 timing_out->v_border_top = 0;
3979 timing_out->v_border_bottom = 0;
3980 /* TODO: un-hardcode */
3981 if (drm_mode_is_420_only(info, mode_in)
3982 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3983 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3984 else if (drm_mode_is_420_also(info, mode_in)
3985 && aconnector->force_yuv420_output)
3986 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3987 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3988 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3989 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3991 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3993 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3994 timing_out->display_color_depth = convert_color_depth_from_display_info(
3995 connector, connector_state,
3996 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3997 timing_out->scan_type = SCANNING_TYPE_NODATA;
3998 timing_out->hdmi_vic = 0;
4001 timing_out->vic = old_stream->timing.vic;
4002 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4003 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4005 timing_out->vic = drm_match_cea_mode(mode_in);
4006 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4007 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4008 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4009 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4012 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4013 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4014 timing_out->vic = avi_frame.video_code;
4015 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4016 timing_out->hdmi_vic = hv_frame.vic;
4019 timing_out->h_addressable = mode_in->crtc_hdisplay;
4020 timing_out->h_total = mode_in->crtc_htotal;
4021 timing_out->h_sync_width =
4022 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4023 timing_out->h_front_porch =
4024 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4025 timing_out->v_total = mode_in->crtc_vtotal;
4026 timing_out->v_addressable = mode_in->crtc_vdisplay;
4027 timing_out->v_front_porch =
4028 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4029 timing_out->v_sync_width =
4030 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4031 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4032 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4034 stream->output_color_space = get_output_color_space(timing_out);
4036 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4037 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4038 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4039 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4040 drm_mode_is_420_also(info, mode_in) &&
4041 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4042 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4043 adjust_colour_depth_from_display_info(timing_out, info);
4048 static void fill_audio_info(struct audio_info *audio_info,
4049 const struct drm_connector *drm_connector,
4050 const struct dc_sink *dc_sink)
4053 int cea_revision = 0;
4054 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4056 audio_info->manufacture_id = edid_caps->manufacturer_id;
4057 audio_info->product_id = edid_caps->product_id;
4059 cea_revision = drm_connector->display_info.cea_rev;
4061 strscpy(audio_info->display_name,
4062 edid_caps->display_name,
4063 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4065 if (cea_revision >= 3) {
4066 audio_info->mode_count = edid_caps->audio_mode_count;
4068 for (i = 0; i < audio_info->mode_count; ++i) {
4069 audio_info->modes[i].format_code =
4070 (enum audio_format_code)
4071 (edid_caps->audio_modes[i].format_code);
4072 audio_info->modes[i].channel_count =
4073 edid_caps->audio_modes[i].channel_count;
4074 audio_info->modes[i].sample_rates.all =
4075 edid_caps->audio_modes[i].sample_rate;
4076 audio_info->modes[i].sample_size =
4077 edid_caps->audio_modes[i].sample_size;
4081 audio_info->flags.all = edid_caps->speaker_flags;
4083 /* TODO: We only check for the progressive mode, check for interlace mode too */
4084 if (drm_connector->latency_present[0]) {
4085 audio_info->video_latency = drm_connector->video_latency[0];
4086 audio_info->audio_latency = drm_connector->audio_latency[0];
4089 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4094 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4095 struct drm_display_mode *dst_mode)
4097 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4098 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4099 dst_mode->crtc_clock = src_mode->crtc_clock;
4100 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4101 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4102 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4103 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4104 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4105 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4106 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4107 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4108 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4109 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4110 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4114 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4115 const struct drm_display_mode *native_mode,
4118 if (scale_enabled) {
4119 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4120 } else if (native_mode->clock == drm_mode->clock &&
4121 native_mode->htotal == drm_mode->htotal &&
4122 native_mode->vtotal == drm_mode->vtotal) {
4123 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4125 /* no scaling nor amdgpu inserted, no need to patch */
4129 static struct dc_sink *
4130 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4132 struct dc_sink_init_data sink_init_data = { 0 };
4133 struct dc_sink *sink = NULL;
4134 sink_init_data.link = aconnector->dc_link;
4135 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4137 sink = dc_sink_create(&sink_init_data);
4139 DRM_ERROR("Failed to create sink!\n");
4142 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4147 static void set_multisync_trigger_params(
4148 struct dc_stream_state *stream)
4150 if (stream->triggered_crtc_reset.enabled) {
4151 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4152 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4156 static void set_master_stream(struct dc_stream_state *stream_set[],
4159 int j, highest_rfr = 0, master_stream = 0;
4161 for (j = 0; j < stream_count; j++) {
4162 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4163 int refresh_rate = 0;
4165 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4166 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4167 if (refresh_rate > highest_rfr) {
4168 highest_rfr = refresh_rate;
4173 for (j = 0; j < stream_count; j++) {
4175 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4179 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4183 if (context->stream_count < 2)
4185 for (i = 0; i < context->stream_count ; i++) {
4186 if (!context->streams[i])
4189 * TODO: add a function to read AMD VSDB bits and set
4190 * crtc_sync_master.multi_sync_enabled flag
4191 * For now it's set to false
4193 set_multisync_trigger_params(context->streams[i]);
4195 set_master_stream(context->streams, context->stream_count);
4198 static struct dc_stream_state *
4199 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4200 const struct drm_display_mode *drm_mode,
4201 const struct dm_connector_state *dm_state,
4202 const struct dc_stream_state *old_stream)
4204 struct drm_display_mode *preferred_mode = NULL;
4205 struct drm_connector *drm_connector;
4206 const struct drm_connector_state *con_state =
4207 dm_state ? &dm_state->base : NULL;
4208 struct dc_stream_state *stream = NULL;
4209 struct drm_display_mode mode = *drm_mode;
4210 bool native_mode_found = false;
4211 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4213 int preferred_refresh = 0;
4214 #if defined(CONFIG_DRM_AMD_DC_DCN)
4215 struct dsc_dec_dpcd_caps dsc_caps;
4217 uint32_t link_bandwidth_kbps;
4219 struct dc_sink *sink = NULL;
4220 if (aconnector == NULL) {
4221 DRM_ERROR("aconnector is NULL!\n");
4225 drm_connector = &aconnector->base;
4227 if (!aconnector->dc_sink) {
4228 sink = create_fake_sink(aconnector);
4232 sink = aconnector->dc_sink;
4233 dc_sink_retain(sink);
4236 stream = dc_create_stream_for_sink(sink);
4238 if (stream == NULL) {
4239 DRM_ERROR("Failed to create stream for sink!\n");
4243 stream->dm_stream_context = aconnector;
4245 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4246 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4248 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4249 /* Search for preferred mode */
4250 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4251 native_mode_found = true;
4255 if (!native_mode_found)
4256 preferred_mode = list_first_entry_or_null(
4257 &aconnector->base.modes,
4258 struct drm_display_mode,
4261 mode_refresh = drm_mode_vrefresh(&mode);
4263 if (preferred_mode == NULL) {
4265 * This may not be an error, the use case is when we have no
4266 * usermode calls to reset and set mode upon hotplug. In this
4267 * case, we call set mode ourselves to restore the previous mode
4268 * and the modelist may not be filled in in time.
4270 DRM_DEBUG_DRIVER("No preferred mode found\n");
4272 decide_crtc_timing_for_drm_display_mode(
4273 &mode, preferred_mode,
4274 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4275 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4279 drm_mode_set_crtcinfo(&mode, 0);
4282 * If scaling is enabled and refresh rate didn't change
4283 * we copy the vic and polarities of the old timings
4285 if (!scale || mode_refresh != preferred_refresh)
4286 fill_stream_properties_from_drm_display_mode(stream,
4287 &mode, &aconnector->base, con_state, NULL);
4289 fill_stream_properties_from_drm_display_mode(stream,
4290 &mode, &aconnector->base, con_state, old_stream);
4292 stream->timing.flags.DSC = 0;
4294 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4295 #if defined(CONFIG_DRM_AMD_DC_DCN)
4296 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4297 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4298 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4301 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4302 dc_link_get_link_cap(aconnector->dc_link));
4304 #if defined(CONFIG_DRM_AMD_DC_DCN)
4305 if (dsc_caps.is_dsc_supported)
4306 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4308 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4309 link_bandwidth_kbps,
4311 &stream->timing.dsc_cfg))
4312 stream->timing.flags.DSC = 1;
4316 update_stream_scaling_settings(&mode, dm_state, stream);
4319 &stream->audio_info,
4323 update_stream_signal(stream, sink);
4325 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4326 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4327 if (stream->link->psr_feature_enabled) {
4328 struct dc *core_dc = stream->link->ctx->dc;
4330 if (dc_is_dmcu_initialized(core_dc)) {
4331 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4333 stream->psr_version = dmcu->dmcu_version.psr_version;
4336 // should decide stream support vsc sdp colorimetry capability
4337 // before building vsc info packet
4339 stream->use_vsc_sdp_for_colorimetry = false;
4340 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4341 stream->use_vsc_sdp_for_colorimetry =
4342 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4344 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4345 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4346 stream->use_vsc_sdp_for_colorimetry = true;
4349 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4353 dc_sink_release(sink);
4358 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4360 drm_crtc_cleanup(crtc);
4364 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4365 struct drm_crtc_state *state)
4367 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4369 /* TODO Destroy dc_stream objects are stream object is flattened */
4371 dc_stream_release(cur->stream);
4374 __drm_atomic_helper_crtc_destroy_state(state);
4380 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4382 struct dm_crtc_state *state;
4385 dm_crtc_destroy_state(crtc, crtc->state);
4387 state = kzalloc(sizeof(*state), GFP_KERNEL);
4388 if (WARN_ON(!state))
4391 crtc->state = &state->base;
4392 crtc->state->crtc = crtc;
4396 static struct drm_crtc_state *
4397 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4399 struct dm_crtc_state *state, *cur;
4401 cur = to_dm_crtc_state(crtc->state);
4403 if (WARN_ON(!crtc->state))
4406 state = kzalloc(sizeof(*state), GFP_KERNEL);
4410 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4413 state->stream = cur->stream;
4414 dc_stream_retain(state->stream);
4417 state->active_planes = cur->active_planes;
4418 state->interrupts_enabled = cur->interrupts_enabled;
4419 state->vrr_params = cur->vrr_params;
4420 state->vrr_infopacket = cur->vrr_infopacket;
4421 state->abm_level = cur->abm_level;
4422 state->vrr_supported = cur->vrr_supported;
4423 state->freesync_config = cur->freesync_config;
4424 state->crc_src = cur->crc_src;
4425 state->cm_has_degamma = cur->cm_has_degamma;
4426 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4428 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4430 return &state->base;
4433 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4435 enum dc_irq_source irq_source;
4436 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4437 struct amdgpu_device *adev = crtc->dev->dev_private;
4440 /* Do not set vupdate for DCN hardware */
4441 if (adev->family > AMDGPU_FAMILY_AI)
4444 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4446 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4448 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4449 acrtc->crtc_id, enable ? "en" : "dis", rc);
4453 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4455 enum dc_irq_source irq_source;
4456 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4457 struct amdgpu_device *adev = crtc->dev->dev_private;
4458 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4462 /* vblank irq on -> Only need vupdate irq in vrr mode */
4463 if (amdgpu_dm_vrr_active(acrtc_state))
4464 rc = dm_set_vupdate_irq(crtc, true);
4466 /* vblank irq off -> vupdate irq off */
4467 rc = dm_set_vupdate_irq(crtc, false);
4473 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4474 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4477 static int dm_enable_vblank(struct drm_crtc *crtc)
4479 return dm_set_vblank(crtc, true);
4482 static void dm_disable_vblank(struct drm_crtc *crtc)
4484 dm_set_vblank(crtc, false);
4487 /* Implemented only the options currently availible for the driver */
4488 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4489 .reset = dm_crtc_reset_state,
4490 .destroy = amdgpu_dm_crtc_destroy,
4491 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4492 .set_config = drm_atomic_helper_set_config,
4493 .page_flip = drm_atomic_helper_page_flip,
4494 .atomic_duplicate_state = dm_crtc_duplicate_state,
4495 .atomic_destroy_state = dm_crtc_destroy_state,
4496 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4497 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4498 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4499 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4500 .enable_vblank = dm_enable_vblank,
4501 .disable_vblank = dm_disable_vblank,
4502 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4505 static enum drm_connector_status
4506 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4509 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4513 * 1. This interface is NOT called in context of HPD irq.
4514 * 2. This interface *is called* in context of user-mode ioctl. Which
4515 * makes it a bad place for *any* MST-related activity.
4518 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4519 !aconnector->fake_enable)
4520 connected = (aconnector->dc_sink != NULL);
4522 connected = (aconnector->base.force == DRM_FORCE_ON);
4524 return (connected ? connector_status_connected :
4525 connector_status_disconnected);
4528 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4529 struct drm_connector_state *connector_state,
4530 struct drm_property *property,
4533 struct drm_device *dev = connector->dev;
4534 struct amdgpu_device *adev = dev->dev_private;
4535 struct dm_connector_state *dm_old_state =
4536 to_dm_connector_state(connector->state);
4537 struct dm_connector_state *dm_new_state =
4538 to_dm_connector_state(connector_state);
4542 if (property == dev->mode_config.scaling_mode_property) {
4543 enum amdgpu_rmx_type rmx_type;
4546 case DRM_MODE_SCALE_CENTER:
4547 rmx_type = RMX_CENTER;
4549 case DRM_MODE_SCALE_ASPECT:
4550 rmx_type = RMX_ASPECT;
4552 case DRM_MODE_SCALE_FULLSCREEN:
4553 rmx_type = RMX_FULL;
4555 case DRM_MODE_SCALE_NONE:
4561 if (dm_old_state->scaling == rmx_type)
4564 dm_new_state->scaling = rmx_type;
4566 } else if (property == adev->mode_info.underscan_hborder_property) {
4567 dm_new_state->underscan_hborder = val;
4569 } else if (property == adev->mode_info.underscan_vborder_property) {
4570 dm_new_state->underscan_vborder = val;
4572 } else if (property == adev->mode_info.underscan_property) {
4573 dm_new_state->underscan_enable = val;
4575 } else if (property == adev->mode_info.abm_level_property) {
4576 dm_new_state->abm_level = val;
4583 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4584 const struct drm_connector_state *state,
4585 struct drm_property *property,
4588 struct drm_device *dev = connector->dev;
4589 struct amdgpu_device *adev = dev->dev_private;
4590 struct dm_connector_state *dm_state =
4591 to_dm_connector_state(state);
4594 if (property == dev->mode_config.scaling_mode_property) {
4595 switch (dm_state->scaling) {
4597 *val = DRM_MODE_SCALE_CENTER;
4600 *val = DRM_MODE_SCALE_ASPECT;
4603 *val = DRM_MODE_SCALE_FULLSCREEN;
4607 *val = DRM_MODE_SCALE_NONE;
4611 } else if (property == adev->mode_info.underscan_hborder_property) {
4612 *val = dm_state->underscan_hborder;
4614 } else if (property == adev->mode_info.underscan_vborder_property) {
4615 *val = dm_state->underscan_vborder;
4617 } else if (property == adev->mode_info.underscan_property) {
4618 *val = dm_state->underscan_enable;
4620 } else if (property == adev->mode_info.abm_level_property) {
4621 *val = dm_state->abm_level;
4628 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4630 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4632 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4635 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4637 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4638 const struct dc_link *link = aconnector->dc_link;
4639 struct amdgpu_device *adev = connector->dev->dev_private;
4640 struct amdgpu_display_manager *dm = &adev->dm;
4642 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4643 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4645 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4646 link->type != dc_connection_none &&
4647 dm->backlight_dev) {
4648 backlight_device_unregister(dm->backlight_dev);
4649 dm->backlight_dev = NULL;
4653 if (aconnector->dc_em_sink)
4654 dc_sink_release(aconnector->dc_em_sink);
4655 aconnector->dc_em_sink = NULL;
4656 if (aconnector->dc_sink)
4657 dc_sink_release(aconnector->dc_sink);
4658 aconnector->dc_sink = NULL;
4660 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4661 drm_connector_unregister(connector);
4662 drm_connector_cleanup(connector);
4663 if (aconnector->i2c) {
4664 i2c_del_adapter(&aconnector->i2c->base);
4665 kfree(aconnector->i2c);
4667 kfree(aconnector->dm_dp_aux.aux.name);
4672 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4674 struct dm_connector_state *state =
4675 to_dm_connector_state(connector->state);
4677 if (connector->state)
4678 __drm_atomic_helper_connector_destroy_state(connector->state);
4682 state = kzalloc(sizeof(*state), GFP_KERNEL);
4685 state->scaling = RMX_OFF;
4686 state->underscan_enable = false;
4687 state->underscan_hborder = 0;
4688 state->underscan_vborder = 0;
4689 state->base.max_requested_bpc = 8;
4690 state->vcpi_slots = 0;
4692 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4693 state->abm_level = amdgpu_dm_abm_level;
4695 __drm_atomic_helper_connector_reset(connector, &state->base);
4699 struct drm_connector_state *
4700 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4702 struct dm_connector_state *state =
4703 to_dm_connector_state(connector->state);
4705 struct dm_connector_state *new_state =
4706 kmemdup(state, sizeof(*state), GFP_KERNEL);
4711 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4713 new_state->freesync_capable = state->freesync_capable;
4714 new_state->abm_level = state->abm_level;
4715 new_state->scaling = state->scaling;
4716 new_state->underscan_enable = state->underscan_enable;
4717 new_state->underscan_hborder = state->underscan_hborder;
4718 new_state->underscan_vborder = state->underscan_vborder;
4719 new_state->vcpi_slots = state->vcpi_slots;
4720 new_state->pbn = state->pbn;
4721 return &new_state->base;
4725 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4727 struct amdgpu_dm_connector *amdgpu_dm_connector =
4728 to_amdgpu_dm_connector(connector);
4731 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4732 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4733 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4734 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4739 #if defined(CONFIG_DEBUG_FS)
4740 connector_debugfs_init(amdgpu_dm_connector);
4746 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4747 .reset = amdgpu_dm_connector_funcs_reset,
4748 .detect = amdgpu_dm_connector_detect,
4749 .fill_modes = drm_helper_probe_single_connector_modes,
4750 .destroy = amdgpu_dm_connector_destroy,
4751 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4752 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4753 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4754 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4755 .late_register = amdgpu_dm_connector_late_register,
4756 .early_unregister = amdgpu_dm_connector_unregister
4759 static int get_modes(struct drm_connector *connector)
4761 return amdgpu_dm_connector_get_modes(connector);
4764 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4766 struct dc_sink_init_data init_params = {
4767 .link = aconnector->dc_link,
4768 .sink_signal = SIGNAL_TYPE_VIRTUAL
4772 if (!aconnector->base.edid_blob_ptr) {
4773 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4774 aconnector->base.name);
4776 aconnector->base.force = DRM_FORCE_OFF;
4777 aconnector->base.override_edid = false;
4781 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4783 aconnector->edid = edid;
4785 aconnector->dc_em_sink = dc_link_add_remote_sink(
4786 aconnector->dc_link,
4788 (edid->extensions + 1) * EDID_LENGTH,
4791 if (aconnector->base.force == DRM_FORCE_ON) {
4792 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4793 aconnector->dc_link->local_sink :
4794 aconnector->dc_em_sink;
4795 dc_sink_retain(aconnector->dc_sink);
4799 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4801 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4804 * In case of headless boot with force on for DP managed connector
4805 * Those settings have to be != 0 to get initial modeset
4807 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4808 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4809 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4813 aconnector->base.override_edid = true;
4814 create_eml_sink(aconnector);
4817 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4818 struct drm_display_mode *mode)
4820 int result = MODE_ERROR;
4821 struct dc_sink *dc_sink;
4822 struct amdgpu_device *adev = connector->dev->dev_private;
4823 /* TODO: Unhardcode stream count */
4824 struct dc_stream_state *stream;
4825 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4826 enum dc_status dc_result = DC_OK;
4828 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4829 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4833 * Only run this the first time mode_valid is called to initilialize
4836 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4837 !aconnector->dc_em_sink)
4838 handle_edid_mgmt(aconnector);
4840 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4842 if (dc_sink == NULL) {
4843 DRM_ERROR("dc_sink is NULL!\n");
4847 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4848 if (stream == NULL) {
4849 DRM_ERROR("Failed to create stream for sink!\n");
4853 dc_result = dc_validate_stream(adev->dm.dc, stream);
4855 if (dc_result == DC_OK)
4858 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4864 dc_stream_release(stream);
4867 /* TODO: error handling*/
4871 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4872 struct dc_info_packet *out)
4874 struct hdmi_drm_infoframe frame;
4875 unsigned char buf[30]; /* 26 + 4 */
4879 memset(out, 0, sizeof(*out));
4881 if (!state->hdr_output_metadata)
4884 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4888 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4892 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4896 /* Prepare the infopacket for DC. */
4897 switch (state->connector->connector_type) {
4898 case DRM_MODE_CONNECTOR_HDMIA:
4899 out->hb0 = 0x87; /* type */
4900 out->hb1 = 0x01; /* version */
4901 out->hb2 = 0x1A; /* length */
4902 out->sb[0] = buf[3]; /* checksum */
4906 case DRM_MODE_CONNECTOR_DisplayPort:
4907 case DRM_MODE_CONNECTOR_eDP:
4908 out->hb0 = 0x00; /* sdp id, zero */
4909 out->hb1 = 0x87; /* type */
4910 out->hb2 = 0x1D; /* payload len - 1 */
4911 out->hb3 = (0x13 << 2); /* sdp version */
4912 out->sb[0] = 0x01; /* version */
4913 out->sb[1] = 0x1A; /* length */
4921 memcpy(&out->sb[i], &buf[4], 26);
4924 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4925 sizeof(out->sb), false);
4931 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4932 const struct drm_connector_state *new_state)
4934 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4935 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4937 if (old_blob != new_blob) {
4938 if (old_blob && new_blob &&
4939 old_blob->length == new_blob->length)
4940 return memcmp(old_blob->data, new_blob->data,
4950 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4951 struct drm_atomic_state *state)
4953 struct drm_connector_state *new_con_state =
4954 drm_atomic_get_new_connector_state(state, conn);
4955 struct drm_connector_state *old_con_state =
4956 drm_atomic_get_old_connector_state(state, conn);
4957 struct drm_crtc *crtc = new_con_state->crtc;
4958 struct drm_crtc_state *new_crtc_state;
4964 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4965 struct dc_info_packet hdr_infopacket;
4967 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4971 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4972 if (IS_ERR(new_crtc_state))
4973 return PTR_ERR(new_crtc_state);
4976 * DC considers the stream backends changed if the
4977 * static metadata changes. Forcing the modeset also
4978 * gives a simple way for userspace to switch from
4979 * 8bpc to 10bpc when setting the metadata to enter
4982 * Changing the static metadata after it's been
4983 * set is permissible, however. So only force a
4984 * modeset if we're entering or exiting HDR.
4986 new_crtc_state->mode_changed =
4987 !old_con_state->hdr_output_metadata ||
4988 !new_con_state->hdr_output_metadata;
4994 static const struct drm_connector_helper_funcs
4995 amdgpu_dm_connector_helper_funcs = {
4997 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4998 * modes will be filtered by drm_mode_validate_size(), and those modes
4999 * are missing after user start lightdm. So we need to renew modes list.
5000 * in get_modes call back, not just return the modes count
5002 .get_modes = get_modes,
5003 .mode_valid = amdgpu_dm_connector_mode_valid,
5004 .atomic_check = amdgpu_dm_connector_atomic_check,
5007 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5011 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5013 struct drm_device *dev = new_crtc_state->crtc->dev;
5014 struct drm_plane *plane;
5016 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5017 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5024 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5026 struct drm_atomic_state *state = new_crtc_state->state;
5027 struct drm_plane *plane;
5030 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5031 struct drm_plane_state *new_plane_state;
5033 /* Cursor planes are "fake". */
5034 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5037 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5039 if (!new_plane_state) {
5041 * The plane is enable on the CRTC and hasn't changed
5042 * state. This means that it previously passed
5043 * validation and is therefore enabled.
5049 /* We need a framebuffer to be considered enabled. */
5050 num_active += (new_plane_state->fb != NULL);
5057 * Sets whether interrupts should be enabled on a specific CRTC.
5058 * We require that the stream be enabled and that there exist active
5059 * DC planes on the stream.
5062 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5063 struct drm_crtc_state *new_crtc_state)
5065 struct dm_crtc_state *dm_new_crtc_state =
5066 to_dm_crtc_state(new_crtc_state);
5068 dm_new_crtc_state->active_planes = 0;
5069 dm_new_crtc_state->interrupts_enabled = false;
5071 if (!dm_new_crtc_state->stream)
5074 dm_new_crtc_state->active_planes =
5075 count_crtc_active_planes(new_crtc_state);
5077 dm_new_crtc_state->interrupts_enabled =
5078 dm_new_crtc_state->active_planes > 0;
5081 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5082 struct drm_crtc_state *state)
5084 struct amdgpu_device *adev = crtc->dev->dev_private;
5085 struct dc *dc = adev->dm.dc;
5086 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5090 * Update interrupt state for the CRTC. This needs to happen whenever
5091 * the CRTC has changed or whenever any of its planes have changed.
5092 * Atomic check satisfies both of these requirements since the CRTC
5093 * is added to the state by DRM during drm_atomic_helper_check_planes.
5095 dm_update_crtc_interrupt_state(crtc, state);
5097 if (unlikely(!dm_crtc_state->stream &&
5098 modeset_required(state, NULL, dm_crtc_state->stream))) {
5103 /* In some use cases, like reset, no stream is attached */
5104 if (!dm_crtc_state->stream)
5108 * We want at least one hardware plane enabled to use
5109 * the stream with a cursor enabled.
5111 if (state->enable && state->active &&
5112 does_crtc_have_active_cursor(state) &&
5113 dm_crtc_state->active_planes == 0)
5116 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5122 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5123 const struct drm_display_mode *mode,
5124 struct drm_display_mode *adjusted_mode)
5129 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5130 .disable = dm_crtc_helper_disable,
5131 .atomic_check = dm_crtc_helper_atomic_check,
5132 .mode_fixup = dm_crtc_helper_mode_fixup,
5133 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5136 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5141 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5143 switch (display_color_depth) {
5144 case COLOR_DEPTH_666:
5146 case COLOR_DEPTH_888:
5148 case COLOR_DEPTH_101010:
5150 case COLOR_DEPTH_121212:
5152 case COLOR_DEPTH_141414:
5154 case COLOR_DEPTH_161616:
5162 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5163 struct drm_crtc_state *crtc_state,
5164 struct drm_connector_state *conn_state)
5166 struct drm_atomic_state *state = crtc_state->state;
5167 struct drm_connector *connector = conn_state->connector;
5168 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5169 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5170 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5171 struct drm_dp_mst_topology_mgr *mst_mgr;
5172 struct drm_dp_mst_port *mst_port;
5173 enum dc_color_depth color_depth;
5175 bool is_y420 = false;
5177 if (!aconnector->port || !aconnector->dc_sink)
5180 mst_port = aconnector->port;
5181 mst_mgr = &aconnector->mst_port->mst_mgr;
5183 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5186 if (!state->duplicated) {
5187 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5188 aconnector->force_yuv420_output;
5189 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5191 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5192 clock = adjusted_mode->clock;
5193 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5195 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5198 dm_new_connector_state->pbn,
5200 if (dm_new_connector_state->vcpi_slots < 0) {
5201 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5202 return dm_new_connector_state->vcpi_slots;
5207 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5208 .disable = dm_encoder_helper_disable,
5209 .atomic_check = dm_encoder_helper_atomic_check
5212 #if defined(CONFIG_DRM_AMD_DC_DCN)
5213 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5214 struct dc_state *dc_state)
5216 struct dc_stream_state *stream = NULL;
5217 struct drm_connector *connector;
5218 struct drm_connector_state *new_con_state, *old_con_state;
5219 struct amdgpu_dm_connector *aconnector;
5220 struct dm_connector_state *dm_conn_state;
5221 int i, j, clock, bpp;
5222 int vcpi, pbn_div, pbn = 0;
5224 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5226 aconnector = to_amdgpu_dm_connector(connector);
5228 if (!aconnector->port)
5231 if (!new_con_state || !new_con_state->crtc)
5234 dm_conn_state = to_dm_connector_state(new_con_state);
5236 for (j = 0; j < dc_state->stream_count; j++) {
5237 stream = dc_state->streams[j];
5241 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5250 if (stream->timing.flags.DSC != 1) {
5251 drm_dp_mst_atomic_enable_dsc(state,
5259 pbn_div = dm_mst_get_pbn_divider(stream->link);
5260 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5261 clock = stream->timing.pix_clk_100hz / 10;
5262 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5263 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5270 dm_conn_state->pbn = pbn;
5271 dm_conn_state->vcpi_slots = vcpi;
5277 static void dm_drm_plane_reset(struct drm_plane *plane)
5279 struct dm_plane_state *amdgpu_state = NULL;
5282 plane->funcs->atomic_destroy_state(plane, plane->state);
5284 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5285 WARN_ON(amdgpu_state == NULL);
5288 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5291 static struct drm_plane_state *
5292 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5294 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5296 old_dm_plane_state = to_dm_plane_state(plane->state);
5297 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5298 if (!dm_plane_state)
5301 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5303 if (old_dm_plane_state->dc_state) {
5304 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5305 dc_plane_state_retain(dm_plane_state->dc_state);
5308 return &dm_plane_state->base;
5311 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5312 struct drm_plane_state *state)
5314 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5316 if (dm_plane_state->dc_state)
5317 dc_plane_state_release(dm_plane_state->dc_state);
5319 drm_atomic_helper_plane_destroy_state(plane, state);
5322 static const struct drm_plane_funcs dm_plane_funcs = {
5323 .update_plane = drm_atomic_helper_update_plane,
5324 .disable_plane = drm_atomic_helper_disable_plane,
5325 .destroy = drm_primary_helper_destroy,
5326 .reset = dm_drm_plane_reset,
5327 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5328 .atomic_destroy_state = dm_drm_plane_destroy_state,
5331 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5332 struct drm_plane_state *new_state)
5334 struct amdgpu_framebuffer *afb;
5335 struct drm_gem_object *obj;
5336 struct amdgpu_device *adev;
5337 struct amdgpu_bo *rbo;
5338 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5339 struct list_head list;
5340 struct ttm_validate_buffer tv;
5341 struct ww_acquire_ctx ticket;
5342 uint64_t tiling_flags;
5346 dm_plane_state_old = to_dm_plane_state(plane->state);
5347 dm_plane_state_new = to_dm_plane_state(new_state);
5349 if (!new_state->fb) {
5350 DRM_DEBUG_DRIVER("No FB bound\n");
5354 afb = to_amdgpu_framebuffer(new_state->fb);
5355 obj = new_state->fb->obj[0];
5356 rbo = gem_to_amdgpu_bo(obj);
5357 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5358 INIT_LIST_HEAD(&list);
5362 list_add(&tv.head, &list);
5364 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5366 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5370 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5371 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5373 domain = AMDGPU_GEM_DOMAIN_VRAM;
5375 r = amdgpu_bo_pin(rbo, domain);
5376 if (unlikely(r != 0)) {
5377 if (r != -ERESTARTSYS)
5378 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5379 ttm_eu_backoff_reservation(&ticket, &list);
5383 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5384 if (unlikely(r != 0)) {
5385 amdgpu_bo_unpin(rbo);
5386 ttm_eu_backoff_reservation(&ticket, &list);
5387 DRM_ERROR("%p bind failed\n", rbo);
5391 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5393 ttm_eu_backoff_reservation(&ticket, &list);
5395 afb->address = amdgpu_bo_gpu_offset(rbo);
5399 if (dm_plane_state_new->dc_state &&
5400 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5401 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5403 fill_plane_buffer_attributes(
5404 adev, afb, plane_state->format, plane_state->rotation,
5405 tiling_flags, &plane_state->tiling_info,
5406 &plane_state->plane_size, &plane_state->dcc,
5407 &plane_state->address);
5413 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5414 struct drm_plane_state *old_state)
5416 struct amdgpu_bo *rbo;
5422 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5423 r = amdgpu_bo_reserve(rbo, false);
5425 DRM_ERROR("failed to reserve rbo before unpin\n");
5429 amdgpu_bo_unpin(rbo);
5430 amdgpu_bo_unreserve(rbo);
5431 amdgpu_bo_unref(&rbo);
5434 static int dm_plane_atomic_check(struct drm_plane *plane,
5435 struct drm_plane_state *state)
5437 struct amdgpu_device *adev = plane->dev->dev_private;
5438 struct dc *dc = adev->dm.dc;
5439 struct dm_plane_state *dm_plane_state;
5440 struct dc_scaling_info scaling_info;
5443 dm_plane_state = to_dm_plane_state(state);
5445 if (!dm_plane_state->dc_state)
5448 ret = fill_dc_scaling_info(state, &scaling_info);
5452 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5458 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5459 struct drm_plane_state *new_plane_state)
5461 /* Only support async updates on cursor planes. */
5462 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5468 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5469 struct drm_plane_state *new_state)
5471 struct drm_plane_state *old_state =
5472 drm_atomic_get_old_plane_state(new_state->state, plane);
5474 swap(plane->state->fb, new_state->fb);
5476 plane->state->src_x = new_state->src_x;
5477 plane->state->src_y = new_state->src_y;
5478 plane->state->src_w = new_state->src_w;
5479 plane->state->src_h = new_state->src_h;
5480 plane->state->crtc_x = new_state->crtc_x;
5481 plane->state->crtc_y = new_state->crtc_y;
5482 plane->state->crtc_w = new_state->crtc_w;
5483 plane->state->crtc_h = new_state->crtc_h;
5485 handle_cursor_update(plane, old_state);
5488 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5489 .prepare_fb = dm_plane_helper_prepare_fb,
5490 .cleanup_fb = dm_plane_helper_cleanup_fb,
5491 .atomic_check = dm_plane_atomic_check,
5492 .atomic_async_check = dm_plane_atomic_async_check,
5493 .atomic_async_update = dm_plane_atomic_async_update
5497 * TODO: these are currently initialized to rgb formats only.
5498 * For future use cases we should either initialize them dynamically based on
5499 * plane capabilities, or initialize this array to all formats, so internal drm
5500 * check will succeed, and let DC implement proper check
5502 static const uint32_t rgb_formats[] = {
5503 DRM_FORMAT_XRGB8888,
5504 DRM_FORMAT_ARGB8888,
5505 DRM_FORMAT_RGBA8888,
5506 DRM_FORMAT_XRGB2101010,
5507 DRM_FORMAT_XBGR2101010,
5508 DRM_FORMAT_ARGB2101010,
5509 DRM_FORMAT_ABGR2101010,
5510 DRM_FORMAT_XBGR8888,
5511 DRM_FORMAT_ABGR8888,
5515 static const uint32_t overlay_formats[] = {
5516 DRM_FORMAT_XRGB8888,
5517 DRM_FORMAT_ARGB8888,
5518 DRM_FORMAT_RGBA8888,
5519 DRM_FORMAT_XBGR8888,
5520 DRM_FORMAT_ABGR8888,
5524 static const u32 cursor_formats[] = {
5528 static int get_plane_formats(const struct drm_plane *plane,
5529 const struct dc_plane_cap *plane_cap,
5530 uint32_t *formats, int max_formats)
5532 int i, num_formats = 0;
5535 * TODO: Query support for each group of formats directly from
5536 * DC plane caps. This will require adding more formats to the
5540 switch (plane->type) {
5541 case DRM_PLANE_TYPE_PRIMARY:
5542 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5543 if (num_formats >= max_formats)
5546 formats[num_formats++] = rgb_formats[i];
5549 if (plane_cap && plane_cap->pixel_format_support.nv12)
5550 formats[num_formats++] = DRM_FORMAT_NV12;
5551 if (plane_cap && plane_cap->pixel_format_support.p010)
5552 formats[num_formats++] = DRM_FORMAT_P010;
5555 case DRM_PLANE_TYPE_OVERLAY:
5556 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5557 if (num_formats >= max_formats)
5560 formats[num_formats++] = overlay_formats[i];
5564 case DRM_PLANE_TYPE_CURSOR:
5565 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5566 if (num_formats >= max_formats)
5569 formats[num_formats++] = cursor_formats[i];
5577 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5578 struct drm_plane *plane,
5579 unsigned long possible_crtcs,
5580 const struct dc_plane_cap *plane_cap)
5582 uint32_t formats[32];
5586 num_formats = get_plane_formats(plane, plane_cap, formats,
5587 ARRAY_SIZE(formats));
5589 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5590 &dm_plane_funcs, formats, num_formats,
5591 NULL, plane->type, NULL);
5595 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5596 plane_cap && plane_cap->per_pixel_alpha) {
5597 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5598 BIT(DRM_MODE_BLEND_PREMULTI);
5600 drm_plane_create_alpha_property(plane);
5601 drm_plane_create_blend_mode_property(plane, blend_caps);
5604 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5606 (plane_cap->pixel_format_support.nv12 ||
5607 plane_cap->pixel_format_support.p010)) {
5608 /* This only affects YUV formats. */
5609 drm_plane_create_color_properties(
5611 BIT(DRM_COLOR_YCBCR_BT601) |
5612 BIT(DRM_COLOR_YCBCR_BT709) |
5613 BIT(DRM_COLOR_YCBCR_BT2020),
5614 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5615 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5616 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5619 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5621 /* Create (reset) the plane state */
5622 if (plane->funcs->reset)
5623 plane->funcs->reset(plane);
5628 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5629 struct drm_plane *plane,
5630 uint32_t crtc_index)
5632 struct amdgpu_crtc *acrtc = NULL;
5633 struct drm_plane *cursor_plane;
5637 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5641 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5642 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5644 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5648 res = drm_crtc_init_with_planes(
5653 &amdgpu_dm_crtc_funcs, NULL);
5658 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5660 /* Create (reset) the plane state */
5661 if (acrtc->base.funcs->reset)
5662 acrtc->base.funcs->reset(&acrtc->base);
5664 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5665 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5667 acrtc->crtc_id = crtc_index;
5668 acrtc->base.enabled = false;
5669 acrtc->otg_inst = -1;
5671 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5672 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5673 true, MAX_COLOR_LUT_ENTRIES);
5674 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5680 kfree(cursor_plane);
5685 static int to_drm_connector_type(enum signal_type st)
5688 case SIGNAL_TYPE_HDMI_TYPE_A:
5689 return DRM_MODE_CONNECTOR_HDMIA;
5690 case SIGNAL_TYPE_EDP:
5691 return DRM_MODE_CONNECTOR_eDP;
5692 case SIGNAL_TYPE_LVDS:
5693 return DRM_MODE_CONNECTOR_LVDS;
5694 case SIGNAL_TYPE_RGB:
5695 return DRM_MODE_CONNECTOR_VGA;
5696 case SIGNAL_TYPE_DISPLAY_PORT:
5697 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5698 return DRM_MODE_CONNECTOR_DisplayPort;
5699 case SIGNAL_TYPE_DVI_DUAL_LINK:
5700 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5701 return DRM_MODE_CONNECTOR_DVID;
5702 case SIGNAL_TYPE_VIRTUAL:
5703 return DRM_MODE_CONNECTOR_VIRTUAL;
5706 return DRM_MODE_CONNECTOR_Unknown;
5710 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5712 struct drm_encoder *encoder;
5714 /* There is only one encoder per connector */
5715 drm_connector_for_each_possible_encoder(connector, encoder)
5721 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5723 struct drm_encoder *encoder;
5724 struct amdgpu_encoder *amdgpu_encoder;
5726 encoder = amdgpu_dm_connector_to_encoder(connector);
5728 if (encoder == NULL)
5731 amdgpu_encoder = to_amdgpu_encoder(encoder);
5733 amdgpu_encoder->native_mode.clock = 0;
5735 if (!list_empty(&connector->probed_modes)) {
5736 struct drm_display_mode *preferred_mode = NULL;
5738 list_for_each_entry(preferred_mode,
5739 &connector->probed_modes,
5741 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5742 amdgpu_encoder->native_mode = *preferred_mode;
5750 static struct drm_display_mode *
5751 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5753 int hdisplay, int vdisplay)
5755 struct drm_device *dev = encoder->dev;
5756 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5757 struct drm_display_mode *mode = NULL;
5758 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5760 mode = drm_mode_duplicate(dev, native_mode);
5765 mode->hdisplay = hdisplay;
5766 mode->vdisplay = vdisplay;
5767 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5768 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5774 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5775 struct drm_connector *connector)
5777 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5778 struct drm_display_mode *mode = NULL;
5779 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5780 struct amdgpu_dm_connector *amdgpu_dm_connector =
5781 to_amdgpu_dm_connector(connector);
5785 char name[DRM_DISPLAY_MODE_LEN];
5788 } common_modes[] = {
5789 { "640x480", 640, 480},
5790 { "800x600", 800, 600},
5791 { "1024x768", 1024, 768},
5792 { "1280x720", 1280, 720},
5793 { "1280x800", 1280, 800},
5794 {"1280x1024", 1280, 1024},
5795 { "1440x900", 1440, 900},
5796 {"1680x1050", 1680, 1050},
5797 {"1600x1200", 1600, 1200},
5798 {"1920x1080", 1920, 1080},
5799 {"1920x1200", 1920, 1200}
5802 n = ARRAY_SIZE(common_modes);
5804 for (i = 0; i < n; i++) {
5805 struct drm_display_mode *curmode = NULL;
5806 bool mode_existed = false;
5808 if (common_modes[i].w > native_mode->hdisplay ||
5809 common_modes[i].h > native_mode->vdisplay ||
5810 (common_modes[i].w == native_mode->hdisplay &&
5811 common_modes[i].h == native_mode->vdisplay))
5814 list_for_each_entry(curmode, &connector->probed_modes, head) {
5815 if (common_modes[i].w == curmode->hdisplay &&
5816 common_modes[i].h == curmode->vdisplay) {
5817 mode_existed = true;
5825 mode = amdgpu_dm_create_common_mode(encoder,
5826 common_modes[i].name, common_modes[i].w,
5828 drm_mode_probed_add(connector, mode);
5829 amdgpu_dm_connector->num_modes++;
5833 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5836 struct amdgpu_dm_connector *amdgpu_dm_connector =
5837 to_amdgpu_dm_connector(connector);
5840 /* empty probed_modes */
5841 INIT_LIST_HEAD(&connector->probed_modes);
5842 amdgpu_dm_connector->num_modes =
5843 drm_add_edid_modes(connector, edid);
5845 /* sorting the probed modes before calling function
5846 * amdgpu_dm_get_native_mode() since EDID can have
5847 * more than one preferred mode. The modes that are
5848 * later in the probed mode list could be of higher
5849 * and preferred resolution. For example, 3840x2160
5850 * resolution in base EDID preferred timing and 4096x2160
5851 * preferred resolution in DID extension block later.
5853 drm_mode_sort(&connector->probed_modes);
5854 amdgpu_dm_get_native_mode(connector);
5856 amdgpu_dm_connector->num_modes = 0;
5860 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5862 struct amdgpu_dm_connector *amdgpu_dm_connector =
5863 to_amdgpu_dm_connector(connector);
5864 struct drm_encoder *encoder;
5865 struct edid *edid = amdgpu_dm_connector->edid;
5867 encoder = amdgpu_dm_connector_to_encoder(connector);
5869 if (!edid || !drm_edid_is_valid(edid)) {
5870 amdgpu_dm_connector->num_modes =
5871 drm_add_modes_noedid(connector, 640, 480);
5873 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5874 amdgpu_dm_connector_add_common_modes(encoder, connector);
5876 amdgpu_dm_fbc_init(connector);
5878 return amdgpu_dm_connector->num_modes;
5881 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5882 struct amdgpu_dm_connector *aconnector,
5884 struct dc_link *link,
5887 struct amdgpu_device *adev = dm->ddev->dev_private;
5890 * Some of the properties below require access to state, like bpc.
5891 * Allocate some default initial connector state with our reset helper.
5893 if (aconnector->base.funcs->reset)
5894 aconnector->base.funcs->reset(&aconnector->base);
5896 aconnector->connector_id = link_index;
5897 aconnector->dc_link = link;
5898 aconnector->base.interlace_allowed = false;
5899 aconnector->base.doublescan_allowed = false;
5900 aconnector->base.stereo_allowed = false;
5901 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5902 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5903 aconnector->audio_inst = -1;
5904 mutex_init(&aconnector->hpd_lock);
5907 * configure support HPD hot plug connector_>polled default value is 0
5908 * which means HPD hot plug not supported
5910 switch (connector_type) {
5911 case DRM_MODE_CONNECTOR_HDMIA:
5912 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5913 aconnector->base.ycbcr_420_allowed =
5914 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5916 case DRM_MODE_CONNECTOR_DisplayPort:
5917 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5918 aconnector->base.ycbcr_420_allowed =
5919 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5921 case DRM_MODE_CONNECTOR_DVID:
5922 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5928 drm_object_attach_property(&aconnector->base.base,
5929 dm->ddev->mode_config.scaling_mode_property,
5930 DRM_MODE_SCALE_NONE);
5932 drm_object_attach_property(&aconnector->base.base,
5933 adev->mode_info.underscan_property,
5935 drm_object_attach_property(&aconnector->base.base,
5936 adev->mode_info.underscan_hborder_property,
5938 drm_object_attach_property(&aconnector->base.base,
5939 adev->mode_info.underscan_vborder_property,
5942 if (!aconnector->mst_port)
5943 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5945 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5946 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5947 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5949 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5950 dc_is_dmcu_initialized(adev->dm.dc)) {
5951 drm_object_attach_property(&aconnector->base.base,
5952 adev->mode_info.abm_level_property, 0);
5955 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5956 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5957 connector_type == DRM_MODE_CONNECTOR_eDP) {
5958 drm_object_attach_property(
5959 &aconnector->base.base,
5960 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5962 if (!aconnector->mst_port)
5963 drm_connector_attach_vrr_capable_property(&aconnector->base);
5965 #ifdef CONFIG_DRM_AMD_DC_HDCP
5966 if (adev->dm.hdcp_workqueue)
5967 drm_connector_attach_content_protection_property(&aconnector->base, true);
5972 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5973 struct i2c_msg *msgs, int num)
5975 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5976 struct ddc_service *ddc_service = i2c->ddc_service;
5977 struct i2c_command cmd;
5981 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5986 cmd.number_of_payloads = num;
5987 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5990 for (i = 0; i < num; i++) {
5991 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5992 cmd.payloads[i].address = msgs[i].addr;
5993 cmd.payloads[i].length = msgs[i].len;
5994 cmd.payloads[i].data = msgs[i].buf;
5998 ddc_service->ctx->dc,
5999 ddc_service->ddc_pin->hw_info.ddc_channel,
6003 kfree(cmd.payloads);
6007 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6009 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6012 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6013 .master_xfer = amdgpu_dm_i2c_xfer,
6014 .functionality = amdgpu_dm_i2c_func,
6017 static struct amdgpu_i2c_adapter *
6018 create_i2c(struct ddc_service *ddc_service,
6022 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6023 struct amdgpu_i2c_adapter *i2c;
6025 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6028 i2c->base.owner = THIS_MODULE;
6029 i2c->base.class = I2C_CLASS_DDC;
6030 i2c->base.dev.parent = &adev->pdev->dev;
6031 i2c->base.algo = &amdgpu_dm_i2c_algo;
6032 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6033 i2c_set_adapdata(&i2c->base, i2c);
6034 i2c->ddc_service = ddc_service;
6035 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6042 * Note: this function assumes that dc_link_detect() was called for the
6043 * dc_link which will be represented by this aconnector.
6045 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6046 struct amdgpu_dm_connector *aconnector,
6047 uint32_t link_index,
6048 struct amdgpu_encoder *aencoder)
6052 struct dc *dc = dm->dc;
6053 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6054 struct amdgpu_i2c_adapter *i2c;
6056 link->priv = aconnector;
6058 DRM_DEBUG_DRIVER("%s()\n", __func__);
6060 i2c = create_i2c(link->ddc, link->link_index, &res);
6062 DRM_ERROR("Failed to create i2c adapter data\n");
6066 aconnector->i2c = i2c;
6067 res = i2c_add_adapter(&i2c->base);
6070 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6074 connector_type = to_drm_connector_type(link->connector_signal);
6076 res = drm_connector_init_with_ddc(
6079 &amdgpu_dm_connector_funcs,
6084 DRM_ERROR("connector_init failed\n");
6085 aconnector->connector_id = -1;
6089 drm_connector_helper_add(
6091 &amdgpu_dm_connector_helper_funcs);
6093 amdgpu_dm_connector_init_helper(
6100 drm_connector_attach_encoder(
6101 &aconnector->base, &aencoder->base);
6103 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6104 || connector_type == DRM_MODE_CONNECTOR_eDP)
6105 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6110 aconnector->i2c = NULL;
6115 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6117 switch (adev->mode_info.num_crtc) {
6134 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6135 struct amdgpu_encoder *aencoder,
6136 uint32_t link_index)
6138 struct amdgpu_device *adev = dev->dev_private;
6140 int res = drm_encoder_init(dev,
6142 &amdgpu_dm_encoder_funcs,
6143 DRM_MODE_ENCODER_TMDS,
6146 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6149 aencoder->encoder_id = link_index;
6151 aencoder->encoder_id = -1;
6153 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6158 static void manage_dm_interrupts(struct amdgpu_device *adev,
6159 struct amdgpu_crtc *acrtc,
6163 * this is not correct translation but will work as soon as VBLANK
6164 * constant is the same as PFLIP
6167 amdgpu_display_crtc_idx_to_irq_type(
6172 drm_crtc_vblank_on(&acrtc->base);
6175 &adev->pageflip_irq,
6181 &adev->pageflip_irq,
6183 drm_crtc_vblank_off(&acrtc->base);
6188 is_scaling_state_different(const struct dm_connector_state *dm_state,
6189 const struct dm_connector_state *old_dm_state)
6191 if (dm_state->scaling != old_dm_state->scaling)
6193 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6194 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6196 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6197 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6199 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6200 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6205 #ifdef CONFIG_DRM_AMD_DC_HDCP
6206 static bool is_content_protection_different(struct drm_connector_state *state,
6207 const struct drm_connector_state *old_state,
6208 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6210 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6212 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6213 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6214 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6218 /* CP is being re enabled, ignore this */
6219 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6220 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6221 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6225 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6226 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6227 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6228 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6230 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6231 * hot-plug, headless s3, dpms
6233 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6234 aconnector->dc_sink != NULL)
6237 if (old_state->content_protection == state->content_protection)
6240 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6247 static void remove_stream(struct amdgpu_device *adev,
6248 struct amdgpu_crtc *acrtc,
6249 struct dc_stream_state *stream)
6251 /* this is the update mode case */
6253 acrtc->otg_inst = -1;
6254 acrtc->enabled = false;
6257 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6258 struct dc_cursor_position *position)
6260 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6262 int xorigin = 0, yorigin = 0;
6264 position->enable = false;
6268 if (!crtc || !plane->state->fb)
6271 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6272 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6273 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6275 plane->state->crtc_w,
6276 plane->state->crtc_h);
6280 x = plane->state->crtc_x;
6281 y = plane->state->crtc_y;
6283 if (x <= -amdgpu_crtc->max_cursor_width ||
6284 y <= -amdgpu_crtc->max_cursor_height)
6288 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6292 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6295 position->enable = true;
6296 position->translate_by_source = true;
6299 position->x_hotspot = xorigin;
6300 position->y_hotspot = yorigin;
6305 static void handle_cursor_update(struct drm_plane *plane,
6306 struct drm_plane_state *old_plane_state)
6308 struct amdgpu_device *adev = plane->dev->dev_private;
6309 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6310 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6311 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6312 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6313 uint64_t address = afb ? afb->address : 0;
6314 struct dc_cursor_position position;
6315 struct dc_cursor_attributes attributes;
6318 if (!plane->state->fb && !old_plane_state->fb)
6321 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6323 amdgpu_crtc->crtc_id,
6324 plane->state->crtc_w,
6325 plane->state->crtc_h);
6327 ret = get_cursor_position(plane, crtc, &position);
6331 if (!position.enable) {
6332 /* turn off cursor */
6333 if (crtc_state && crtc_state->stream) {
6334 mutex_lock(&adev->dm.dc_lock);
6335 dc_stream_set_cursor_position(crtc_state->stream,
6337 mutex_unlock(&adev->dm.dc_lock);
6342 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6343 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6345 memset(&attributes, 0, sizeof(attributes));
6346 attributes.address.high_part = upper_32_bits(address);
6347 attributes.address.low_part = lower_32_bits(address);
6348 attributes.width = plane->state->crtc_w;
6349 attributes.height = plane->state->crtc_h;
6350 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6351 attributes.rotation_angle = 0;
6352 attributes.attribute_flags.value = 0;
6354 attributes.pitch = attributes.width;
6356 if (crtc_state->stream) {
6357 mutex_lock(&adev->dm.dc_lock);
6358 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6360 DRM_ERROR("DC failed to set cursor attributes\n");
6362 if (!dc_stream_set_cursor_position(crtc_state->stream,
6364 DRM_ERROR("DC failed to set cursor position\n");
6365 mutex_unlock(&adev->dm.dc_lock);
6369 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6372 assert_spin_locked(&acrtc->base.dev->event_lock);
6373 WARN_ON(acrtc->event);
6375 acrtc->event = acrtc->base.state->event;
6377 /* Set the flip status */
6378 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6380 /* Mark this event as consumed */
6381 acrtc->base.state->event = NULL;
6383 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6387 static void update_freesync_state_on_stream(
6388 struct amdgpu_display_manager *dm,
6389 struct dm_crtc_state *new_crtc_state,
6390 struct dc_stream_state *new_stream,
6391 struct dc_plane_state *surface,
6392 u32 flip_timestamp_in_us)
6394 struct mod_vrr_params vrr_params;
6395 struct dc_info_packet vrr_infopacket = {0};
6396 struct amdgpu_device *adev = dm->adev;
6397 unsigned long flags;
6403 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6404 * For now it's sufficient to just guard against these conditions.
6407 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6410 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6411 vrr_params = new_crtc_state->vrr_params;
6414 mod_freesync_handle_preflip(
6415 dm->freesync_module,
6418 flip_timestamp_in_us,
6421 if (adev->family < AMDGPU_FAMILY_AI &&
6422 amdgpu_dm_vrr_active(new_crtc_state)) {
6423 mod_freesync_handle_v_update(dm->freesync_module,
6424 new_stream, &vrr_params);
6426 /* Need to call this before the frame ends. */
6427 dc_stream_adjust_vmin_vmax(dm->dc,
6428 new_crtc_state->stream,
6429 &vrr_params.adjust);
6433 mod_freesync_build_vrr_infopacket(
6434 dm->freesync_module,
6438 TRANSFER_FUNC_UNKNOWN,
6441 new_crtc_state->freesync_timing_changed |=
6442 (memcmp(&new_crtc_state->vrr_params.adjust,
6444 sizeof(vrr_params.adjust)) != 0);
6446 new_crtc_state->freesync_vrr_info_changed |=
6447 (memcmp(&new_crtc_state->vrr_infopacket,
6449 sizeof(vrr_infopacket)) != 0);
6451 new_crtc_state->vrr_params = vrr_params;
6452 new_crtc_state->vrr_infopacket = vrr_infopacket;
6454 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6455 new_stream->vrr_infopacket = vrr_infopacket;
6457 if (new_crtc_state->freesync_vrr_info_changed)
6458 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6459 new_crtc_state->base.crtc->base.id,
6460 (int)new_crtc_state->base.vrr_enabled,
6461 (int)vrr_params.state);
6463 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6466 static void pre_update_freesync_state_on_stream(
6467 struct amdgpu_display_manager *dm,
6468 struct dm_crtc_state *new_crtc_state)
6470 struct dc_stream_state *new_stream = new_crtc_state->stream;
6471 struct mod_vrr_params vrr_params;
6472 struct mod_freesync_config config = new_crtc_state->freesync_config;
6473 struct amdgpu_device *adev = dm->adev;
6474 unsigned long flags;
6480 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6481 * For now it's sufficient to just guard against these conditions.
6483 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6486 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6487 vrr_params = new_crtc_state->vrr_params;
6489 if (new_crtc_state->vrr_supported &&
6490 config.min_refresh_in_uhz &&
6491 config.max_refresh_in_uhz) {
6492 config.state = new_crtc_state->base.vrr_enabled ?
6493 VRR_STATE_ACTIVE_VARIABLE :
6496 config.state = VRR_STATE_UNSUPPORTED;
6499 mod_freesync_build_vrr_params(dm->freesync_module,
6501 &config, &vrr_params);
6503 new_crtc_state->freesync_timing_changed |=
6504 (memcmp(&new_crtc_state->vrr_params.adjust,
6506 sizeof(vrr_params.adjust)) != 0);
6508 new_crtc_state->vrr_params = vrr_params;
6509 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6512 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6513 struct dm_crtc_state *new_state)
6515 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6516 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6518 if (!old_vrr_active && new_vrr_active) {
6519 /* Transition VRR inactive -> active:
6520 * While VRR is active, we must not disable vblank irq, as a
6521 * reenable after disable would compute bogus vblank/pflip
6522 * timestamps if it likely happened inside display front-porch.
6524 * We also need vupdate irq for the actual core vblank handling
6527 dm_set_vupdate_irq(new_state->base.crtc, true);
6528 drm_crtc_vblank_get(new_state->base.crtc);
6529 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6530 __func__, new_state->base.crtc->base.id);
6531 } else if (old_vrr_active && !new_vrr_active) {
6532 /* Transition VRR active -> inactive:
6533 * Allow vblank irq disable again for fixed refresh rate.
6535 dm_set_vupdate_irq(new_state->base.crtc, false);
6536 drm_crtc_vblank_put(new_state->base.crtc);
6537 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6538 __func__, new_state->base.crtc->base.id);
6542 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6544 struct drm_plane *plane;
6545 struct drm_plane_state *old_plane_state, *new_plane_state;
6549 * TODO: Make this per-stream so we don't issue redundant updates for
6550 * commits with multiple streams.
6552 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6554 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6555 handle_cursor_update(plane, old_plane_state);
6558 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6559 struct dc_state *dc_state,
6560 struct drm_device *dev,
6561 struct amdgpu_display_manager *dm,
6562 struct drm_crtc *pcrtc,
6563 bool wait_for_vblank)
6566 uint64_t timestamp_ns;
6567 struct drm_plane *plane;
6568 struct drm_plane_state *old_plane_state, *new_plane_state;
6569 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6570 struct drm_crtc_state *new_pcrtc_state =
6571 drm_atomic_get_new_crtc_state(state, pcrtc);
6572 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6573 struct dm_crtc_state *dm_old_crtc_state =
6574 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6575 int planes_count = 0, vpos, hpos;
6577 unsigned long flags;
6578 struct amdgpu_bo *abo;
6579 uint64_t tiling_flags;
6580 uint32_t target_vblank, last_flip_vblank;
6581 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6582 bool pflip_present = false;
6584 struct dc_surface_update surface_updates[MAX_SURFACES];
6585 struct dc_plane_info plane_infos[MAX_SURFACES];
6586 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6587 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6588 struct dc_stream_update stream_update;
6591 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6594 dm_error("Failed to allocate update bundle\n");
6599 * Disable the cursor first if we're disabling all the planes.
6600 * It'll remain on the screen after the planes are re-enabled
6603 if (acrtc_state->active_planes == 0)
6604 amdgpu_dm_commit_cursors(state);
6606 /* update planes when needed */
6607 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6608 struct drm_crtc *crtc = new_plane_state->crtc;
6609 struct drm_crtc_state *new_crtc_state;
6610 struct drm_framebuffer *fb = new_plane_state->fb;
6611 bool plane_needs_flip;
6612 struct dc_plane_state *dc_plane;
6613 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6615 /* Cursor plane is handled after stream updates */
6616 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6619 if (!fb || !crtc || pcrtc != crtc)
6622 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6623 if (!new_crtc_state->active)
6626 dc_plane = dm_new_plane_state->dc_state;
6628 bundle->surface_updates[planes_count].surface = dc_plane;
6629 if (new_pcrtc_state->color_mgmt_changed) {
6630 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6631 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6634 fill_dc_scaling_info(new_plane_state,
6635 &bundle->scaling_infos[planes_count]);
6637 bundle->surface_updates[planes_count].scaling_info =
6638 &bundle->scaling_infos[planes_count];
6640 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6642 pflip_present = pflip_present || plane_needs_flip;
6644 if (!plane_needs_flip) {
6649 abo = gem_to_amdgpu_bo(fb->obj[0]);
6652 * Wait for all fences on this FB. Do limited wait to avoid
6653 * deadlock during GPU reset when this fence will not signal
6654 * but we hold reservation lock for the BO.
6656 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6658 msecs_to_jiffies(5000));
6659 if (unlikely(r <= 0))
6660 DRM_ERROR("Waiting for fences timed out!");
6663 * TODO This might fail and hence better not used, wait
6664 * explicitly on fences instead
6665 * and in general should be called for
6666 * blocking commit to as per framework helpers
6668 r = amdgpu_bo_reserve(abo, true);
6669 if (unlikely(r != 0))
6670 DRM_ERROR("failed to reserve buffer before flip\n");
6672 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6674 amdgpu_bo_unreserve(abo);
6676 fill_dc_plane_info_and_addr(
6677 dm->adev, new_plane_state, tiling_flags,
6678 &bundle->plane_infos[planes_count],
6679 &bundle->flip_addrs[planes_count].address);
6681 bundle->surface_updates[planes_count].plane_info =
6682 &bundle->plane_infos[planes_count];
6685 * Only allow immediate flips for fast updates that don't
6686 * change FB pitch, DCC state, rotation or mirroing.
6688 bundle->flip_addrs[planes_count].flip_immediate =
6689 crtc->state->async_flip &&
6690 acrtc_state->update_type == UPDATE_TYPE_FAST;
6692 timestamp_ns = ktime_get_ns();
6693 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6694 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6695 bundle->surface_updates[planes_count].surface = dc_plane;
6697 if (!bundle->surface_updates[planes_count].surface) {
6698 DRM_ERROR("No surface for CRTC: id=%d\n",
6699 acrtc_attach->crtc_id);
6703 if (plane == pcrtc->primary)
6704 update_freesync_state_on_stream(
6707 acrtc_state->stream,
6709 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6711 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6713 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6714 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6720 if (pflip_present) {
6722 /* Use old throttling in non-vrr fixed refresh rate mode
6723 * to keep flip scheduling based on target vblank counts
6724 * working in a backwards compatible way, e.g., for
6725 * clients using the GLX_OML_sync_control extension or
6726 * DRI3/Present extension with defined target_msc.
6728 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6731 /* For variable refresh rate mode only:
6732 * Get vblank of last completed flip to avoid > 1 vrr
6733 * flips per video frame by use of throttling, but allow
6734 * flip programming anywhere in the possibly large
6735 * variable vrr vblank interval for fine-grained flip
6736 * timing control and more opportunity to avoid stutter
6737 * on late submission of flips.
6739 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6740 last_flip_vblank = acrtc_attach->last_flip_vblank;
6741 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6744 target_vblank = last_flip_vblank + wait_for_vblank;
6747 * Wait until we're out of the vertical blank period before the one
6748 * targeted by the flip
6750 while ((acrtc_attach->enabled &&
6751 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6752 0, &vpos, &hpos, NULL,
6753 NULL, &pcrtc->hwmode)
6754 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6755 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6756 (int)(target_vblank -
6757 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6758 usleep_range(1000, 1100);
6761 if (acrtc_attach->base.state->event) {
6762 drm_crtc_vblank_get(pcrtc);
6764 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6766 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6767 prepare_flip_isr(acrtc_attach);
6769 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6772 if (acrtc_state->stream) {
6773 if (acrtc_state->freesync_vrr_info_changed)
6774 bundle->stream_update.vrr_infopacket =
6775 &acrtc_state->stream->vrr_infopacket;
6779 /* Update the planes if changed or disable if we don't have any. */
6780 if ((planes_count || acrtc_state->active_planes == 0) &&
6781 acrtc_state->stream) {
6782 bundle->stream_update.stream = acrtc_state->stream;
6783 if (new_pcrtc_state->mode_changed) {
6784 bundle->stream_update.src = acrtc_state->stream->src;
6785 bundle->stream_update.dst = acrtc_state->stream->dst;
6788 if (new_pcrtc_state->color_mgmt_changed) {
6790 * TODO: This isn't fully correct since we've actually
6791 * already modified the stream in place.
6793 bundle->stream_update.gamut_remap =
6794 &acrtc_state->stream->gamut_remap_matrix;
6795 bundle->stream_update.output_csc_transform =
6796 &acrtc_state->stream->csc_color_matrix;
6797 bundle->stream_update.out_transfer_func =
6798 acrtc_state->stream->out_transfer_func;
6801 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6802 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6803 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6806 * If FreeSync state on the stream has changed then we need to
6807 * re-adjust the min/max bounds now that DC doesn't handle this
6808 * as part of commit.
6810 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6811 amdgpu_dm_vrr_active(acrtc_state)) {
6812 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6813 dc_stream_adjust_vmin_vmax(
6814 dm->dc, acrtc_state->stream,
6815 &acrtc_state->vrr_params.adjust);
6816 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6818 mutex_lock(&dm->dc_lock);
6819 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6820 acrtc_state->stream->link->psr_allow_active)
6821 amdgpu_dm_psr_disable(acrtc_state->stream);
6823 dc_commit_updates_for_stream(dm->dc,
6824 bundle->surface_updates,
6826 acrtc_state->stream,
6827 &bundle->stream_update,
6830 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6831 acrtc_state->stream->psr_version &&
6832 !acrtc_state->stream->link->psr_feature_enabled)
6833 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6834 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6835 acrtc_state->stream->link->psr_feature_enabled &&
6836 !acrtc_state->stream->link->psr_allow_active) {
6837 amdgpu_dm_psr_enable(acrtc_state->stream);
6840 mutex_unlock(&dm->dc_lock);
6844 * Update cursor state *after* programming all the planes.
6845 * This avoids redundant programming in the case where we're going
6846 * to be disabling a single plane - those pipes are being disabled.
6848 if (acrtc_state->active_planes)
6849 amdgpu_dm_commit_cursors(state);
6855 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6856 struct drm_atomic_state *state)
6858 struct amdgpu_device *adev = dev->dev_private;
6859 struct amdgpu_dm_connector *aconnector;
6860 struct drm_connector *connector;
6861 struct drm_connector_state *old_con_state, *new_con_state;
6862 struct drm_crtc_state *new_crtc_state;
6863 struct dm_crtc_state *new_dm_crtc_state;
6864 const struct dc_stream_status *status;
6867 /* Notify device removals. */
6868 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6869 if (old_con_state->crtc != new_con_state->crtc) {
6870 /* CRTC changes require notification. */
6874 if (!new_con_state->crtc)
6877 new_crtc_state = drm_atomic_get_new_crtc_state(
6878 state, new_con_state->crtc);
6880 if (!new_crtc_state)
6883 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6887 aconnector = to_amdgpu_dm_connector(connector);
6889 mutex_lock(&adev->dm.audio_lock);
6890 inst = aconnector->audio_inst;
6891 aconnector->audio_inst = -1;
6892 mutex_unlock(&adev->dm.audio_lock);
6894 amdgpu_dm_audio_eld_notify(adev, inst);
6897 /* Notify audio device additions. */
6898 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6899 if (!new_con_state->crtc)
6902 new_crtc_state = drm_atomic_get_new_crtc_state(
6903 state, new_con_state->crtc);
6905 if (!new_crtc_state)
6908 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6911 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6912 if (!new_dm_crtc_state->stream)
6915 status = dc_stream_get_status(new_dm_crtc_state->stream);
6919 aconnector = to_amdgpu_dm_connector(connector);
6921 mutex_lock(&adev->dm.audio_lock);
6922 inst = status->audio_inst;
6923 aconnector->audio_inst = inst;
6924 mutex_unlock(&adev->dm.audio_lock);
6926 amdgpu_dm_audio_eld_notify(adev, inst);
6931 * Enable interrupts on CRTCs that are newly active, undergone
6932 * a modeset, or have active planes again.
6934 * Done in two passes, based on the for_modeset flag:
6935 * Pass 1: For CRTCs going through modeset
6936 * Pass 2: For CRTCs going from 0 to n active planes
6938 * Interrupts can only be enabled after the planes are programmed,
6939 * so this requires a two-pass approach since we don't want to
6940 * just defer the interrupts until after commit planes every time.
6942 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6943 struct drm_atomic_state *state,
6946 struct amdgpu_device *adev = dev->dev_private;
6947 struct drm_crtc *crtc;
6948 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6950 #ifdef CONFIG_DEBUG_FS
6951 enum amdgpu_dm_pipe_crc_source source;
6954 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6955 new_crtc_state, i) {
6956 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6957 struct dm_crtc_state *dm_new_crtc_state =
6958 to_dm_crtc_state(new_crtc_state);
6959 struct dm_crtc_state *dm_old_crtc_state =
6960 to_dm_crtc_state(old_crtc_state);
6961 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6964 run_pass = (for_modeset && modeset) ||
6965 (!for_modeset && !modeset &&
6966 !dm_old_crtc_state->interrupts_enabled);
6971 if (!dm_new_crtc_state->interrupts_enabled)
6974 manage_dm_interrupts(adev, acrtc, true);
6976 #ifdef CONFIG_DEBUG_FS
6977 /* The stream has changed so CRC capture needs to re-enabled. */
6978 source = dm_new_crtc_state->crc_src;
6979 if (amdgpu_dm_is_valid_crc_source(source)) {
6980 amdgpu_dm_crtc_configure_crc_source(
6981 crtc, dm_new_crtc_state,
6982 dm_new_crtc_state->crc_src);
6989 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6990 * @crtc_state: the DRM CRTC state
6991 * @stream_state: the DC stream state.
6993 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6994 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6996 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6997 struct dc_stream_state *stream_state)
6999 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7002 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7003 struct drm_atomic_state *state,
7006 struct drm_crtc *crtc;
7007 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7008 struct amdgpu_device *adev = dev->dev_private;
7012 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7013 * a modeset, being disabled, or have no active planes.
7015 * It's done in atomic commit rather than commit tail for now since
7016 * some of these interrupt handlers access the current CRTC state and
7017 * potentially the stream pointer itself.
7019 * Since the atomic state is swapped within atomic commit and not within
7020 * commit tail this would leave to new state (that hasn't been committed yet)
7021 * being accesssed from within the handlers.
7023 * TODO: Fix this so we can do this in commit tail and not have to block
7026 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7027 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7028 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7029 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7031 if (dm_old_crtc_state->interrupts_enabled &&
7032 (!dm_new_crtc_state->interrupts_enabled ||
7033 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7034 manage_dm_interrupts(adev, acrtc, false);
7037 * Add check here for SoC's that support hardware cursor plane, to
7038 * unset legacy_cursor_update
7041 return drm_atomic_helper_commit(dev, state, nonblock);
7043 /*TODO Handle EINTR, reenable IRQ*/
7047 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7048 * @state: The atomic state to commit
7050 * This will tell DC to commit the constructed DC state from atomic_check,
7051 * programming the hardware. Any failures here implies a hardware failure, since
7052 * atomic check should have filtered anything non-kosher.
7054 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7056 struct drm_device *dev = state->dev;
7057 struct amdgpu_device *adev = dev->dev_private;
7058 struct amdgpu_display_manager *dm = &adev->dm;
7059 struct dm_atomic_state *dm_state;
7060 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7062 struct drm_crtc *crtc;
7063 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7064 unsigned long flags;
7065 bool wait_for_vblank = true;
7066 struct drm_connector *connector;
7067 struct drm_connector_state *old_con_state, *new_con_state;
7068 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7069 int crtc_disable_count = 0;
7071 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7073 dm_state = dm_atomic_get_new_state(state);
7074 if (dm_state && dm_state->context) {
7075 dc_state = dm_state->context;
7077 /* No state changes, retain current state. */
7078 dc_state_temp = dc_create_state(dm->dc);
7079 ASSERT(dc_state_temp);
7080 dc_state = dc_state_temp;
7081 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7084 /* update changed items */
7085 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7086 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7088 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7089 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7092 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7093 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7094 "connectors_changed:%d\n",
7096 new_crtc_state->enable,
7097 new_crtc_state->active,
7098 new_crtc_state->planes_changed,
7099 new_crtc_state->mode_changed,
7100 new_crtc_state->active_changed,
7101 new_crtc_state->connectors_changed);
7103 /* Copy all transient state flags into dc state */
7104 if (dm_new_crtc_state->stream) {
7105 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7106 dm_new_crtc_state->stream);
7109 /* handles headless hotplug case, updating new_state and
7110 * aconnector as needed
7113 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7115 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7117 if (!dm_new_crtc_state->stream) {
7119 * this could happen because of issues with
7120 * userspace notifications delivery.
7121 * In this case userspace tries to set mode on
7122 * display which is disconnected in fact.
7123 * dc_sink is NULL in this case on aconnector.
7124 * We expect reset mode will come soon.
7126 * This can also happen when unplug is done
7127 * during resume sequence ended
7129 * In this case, we want to pretend we still
7130 * have a sink to keep the pipe running so that
7131 * hw state is consistent with the sw state
7133 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7134 __func__, acrtc->base.base.id);
7138 if (dm_old_crtc_state->stream)
7139 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7141 pm_runtime_get_noresume(dev->dev);
7143 acrtc->enabled = true;
7144 acrtc->hw_mode = new_crtc_state->mode;
7145 crtc->hwmode = new_crtc_state->mode;
7146 } else if (modereset_required(new_crtc_state)) {
7147 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7148 /* i.e. reset mode */
7149 if (dm_old_crtc_state->stream) {
7150 if (dm_old_crtc_state->stream->link->psr_allow_active)
7151 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7153 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7156 } /* for_each_crtc_in_state() */
7159 dm_enable_per_frame_crtc_master_sync(dc_state);
7160 mutex_lock(&dm->dc_lock);
7161 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7162 mutex_unlock(&dm->dc_lock);
7165 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7166 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7168 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7170 if (dm_new_crtc_state->stream != NULL) {
7171 const struct dc_stream_status *status =
7172 dc_stream_get_status(dm_new_crtc_state->stream);
7175 status = dc_stream_get_status_from_state(dc_state,
7176 dm_new_crtc_state->stream);
7179 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7181 acrtc->otg_inst = status->primary_otg_inst;
7184 #ifdef CONFIG_DRM_AMD_DC_HDCP
7185 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7186 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7187 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7188 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7190 new_crtc_state = NULL;
7193 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7195 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7197 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7198 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7199 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7200 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7204 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7205 hdcp_update_display(
7206 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7207 new_con_state->hdcp_content_type,
7208 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7213 /* Handle connector state changes */
7214 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7215 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7216 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7217 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7218 struct dc_surface_update dummy_updates[MAX_SURFACES];
7219 struct dc_stream_update stream_update;
7220 struct dc_info_packet hdr_packet;
7221 struct dc_stream_status *status = NULL;
7222 bool abm_changed, hdr_changed, scaling_changed;
7224 memset(&dummy_updates, 0, sizeof(dummy_updates));
7225 memset(&stream_update, 0, sizeof(stream_update));
7228 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7229 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7232 /* Skip any modesets/resets */
7233 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7236 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7237 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7239 scaling_changed = is_scaling_state_different(dm_new_con_state,
7242 abm_changed = dm_new_crtc_state->abm_level !=
7243 dm_old_crtc_state->abm_level;
7246 is_hdr_metadata_different(old_con_state, new_con_state);
7248 if (!scaling_changed && !abm_changed && !hdr_changed)
7251 stream_update.stream = dm_new_crtc_state->stream;
7252 if (scaling_changed) {
7253 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7254 dm_new_con_state, dm_new_crtc_state->stream);
7256 stream_update.src = dm_new_crtc_state->stream->src;
7257 stream_update.dst = dm_new_crtc_state->stream->dst;
7261 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7263 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7267 fill_hdr_info_packet(new_con_state, &hdr_packet);
7268 stream_update.hdr_static_metadata = &hdr_packet;
7271 status = dc_stream_get_status(dm_new_crtc_state->stream);
7273 WARN_ON(!status->plane_count);
7276 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7277 * Here we create an empty update on each plane.
7278 * To fix this, DC should permit updating only stream properties.
7280 for (j = 0; j < status->plane_count; j++)
7281 dummy_updates[j].surface = status->plane_states[0];
7284 mutex_lock(&dm->dc_lock);
7285 dc_commit_updates_for_stream(dm->dc,
7287 status->plane_count,
7288 dm_new_crtc_state->stream,
7291 mutex_unlock(&dm->dc_lock);
7294 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7295 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7296 new_crtc_state, i) {
7297 if (old_crtc_state->active && !new_crtc_state->active)
7298 crtc_disable_count++;
7300 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7301 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7303 /* Update freesync active state. */
7304 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7306 /* Handle vrr on->off / off->on transitions */
7307 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7311 /* Enable interrupts for CRTCs going through a modeset. */
7312 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7314 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7315 if (new_crtc_state->async_flip)
7316 wait_for_vblank = false;
7318 /* update planes when needed per crtc*/
7319 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7320 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7322 if (dm_new_crtc_state->stream)
7323 amdgpu_dm_commit_planes(state, dc_state, dev,
7324 dm, crtc, wait_for_vblank);
7327 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7328 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7330 /* Update audio instances for each connector. */
7331 amdgpu_dm_commit_audio(dev, state);
7334 * send vblank event on all events not handled in flip and
7335 * mark consumed event for drm_atomic_helper_commit_hw_done
7337 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7338 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7340 if (new_crtc_state->event)
7341 drm_send_event_locked(dev, &new_crtc_state->event->base);
7343 new_crtc_state->event = NULL;
7345 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7347 /* Signal HW programming completion */
7348 drm_atomic_helper_commit_hw_done(state);
7350 if (wait_for_vblank)
7351 drm_atomic_helper_wait_for_flip_done(dev, state);
7353 drm_atomic_helper_cleanup_planes(dev, state);
7356 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7357 * so we can put the GPU into runtime suspend if we're not driving any
7360 for (i = 0; i < crtc_disable_count; i++)
7361 pm_runtime_put_autosuspend(dev->dev);
7362 pm_runtime_mark_last_busy(dev->dev);
7365 dc_release_state(dc_state_temp);
7369 static int dm_force_atomic_commit(struct drm_connector *connector)
7372 struct drm_device *ddev = connector->dev;
7373 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7374 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7375 struct drm_plane *plane = disconnected_acrtc->base.primary;
7376 struct drm_connector_state *conn_state;
7377 struct drm_crtc_state *crtc_state;
7378 struct drm_plane_state *plane_state;
7383 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7385 /* Construct an atomic state to restore previous display setting */
7388 * Attach connectors to drm_atomic_state
7390 conn_state = drm_atomic_get_connector_state(state, connector);
7392 ret = PTR_ERR_OR_ZERO(conn_state);
7396 /* Attach crtc to drm_atomic_state*/
7397 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7399 ret = PTR_ERR_OR_ZERO(crtc_state);
7403 /* force a restore */
7404 crtc_state->mode_changed = true;
7406 /* Attach plane to drm_atomic_state */
7407 plane_state = drm_atomic_get_plane_state(state, plane);
7409 ret = PTR_ERR_OR_ZERO(plane_state);
7414 /* Call commit internally with the state we just constructed */
7415 ret = drm_atomic_commit(state);
7420 DRM_ERROR("Restoring old state failed with %i\n", ret);
7421 drm_atomic_state_put(state);
7427 * This function handles all cases when set mode does not come upon hotplug.
7428 * This includes when a display is unplugged then plugged back into the
7429 * same port and when running without usermode desktop manager supprot
7431 void dm_restore_drm_connector_state(struct drm_device *dev,
7432 struct drm_connector *connector)
7434 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7435 struct amdgpu_crtc *disconnected_acrtc;
7436 struct dm_crtc_state *acrtc_state;
7438 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7441 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7442 if (!disconnected_acrtc)
7445 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7446 if (!acrtc_state->stream)
7450 * If the previous sink is not released and different from the current,
7451 * we deduce we are in a state where we can not rely on usermode call
7452 * to turn on the display, so we do it here
7454 if (acrtc_state->stream->sink != aconnector->dc_sink)
7455 dm_force_atomic_commit(&aconnector->base);
7459 * Grabs all modesetting locks to serialize against any blocking commits,
7460 * Waits for completion of all non blocking commits.
7462 static int do_aquire_global_lock(struct drm_device *dev,
7463 struct drm_atomic_state *state)
7465 struct drm_crtc *crtc;
7466 struct drm_crtc_commit *commit;
7470 * Adding all modeset locks to aquire_ctx will
7471 * ensure that when the framework release it the
7472 * extra locks we are locking here will get released to
7474 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7478 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7479 spin_lock(&crtc->commit_lock);
7480 commit = list_first_entry_or_null(&crtc->commit_list,
7481 struct drm_crtc_commit, commit_entry);
7483 drm_crtc_commit_get(commit);
7484 spin_unlock(&crtc->commit_lock);
7490 * Make sure all pending HW programming completed and
7493 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7496 ret = wait_for_completion_interruptible_timeout(
7497 &commit->flip_done, 10*HZ);
7500 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7501 "timed out\n", crtc->base.id, crtc->name);
7503 drm_crtc_commit_put(commit);
7506 return ret < 0 ? ret : 0;
7509 static void get_freesync_config_for_crtc(
7510 struct dm_crtc_state *new_crtc_state,
7511 struct dm_connector_state *new_con_state)
7513 struct mod_freesync_config config = {0};
7514 struct amdgpu_dm_connector *aconnector =
7515 to_amdgpu_dm_connector(new_con_state->base.connector);
7516 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7517 int vrefresh = drm_mode_vrefresh(mode);
7519 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7520 vrefresh >= aconnector->min_vfreq &&
7521 vrefresh <= aconnector->max_vfreq;
7523 if (new_crtc_state->vrr_supported) {
7524 new_crtc_state->stream->ignore_msa_timing_param = true;
7525 config.state = new_crtc_state->base.vrr_enabled ?
7526 VRR_STATE_ACTIVE_VARIABLE :
7528 config.min_refresh_in_uhz =
7529 aconnector->min_vfreq * 1000000;
7530 config.max_refresh_in_uhz =
7531 aconnector->max_vfreq * 1000000;
7532 config.vsif_supported = true;
7536 new_crtc_state->freesync_config = config;
7539 static void reset_freesync_config_for_crtc(
7540 struct dm_crtc_state *new_crtc_state)
7542 new_crtc_state->vrr_supported = false;
7544 memset(&new_crtc_state->vrr_params, 0,
7545 sizeof(new_crtc_state->vrr_params));
7546 memset(&new_crtc_state->vrr_infopacket, 0,
7547 sizeof(new_crtc_state->vrr_infopacket));
7550 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7551 struct drm_atomic_state *state,
7552 struct drm_crtc *crtc,
7553 struct drm_crtc_state *old_crtc_state,
7554 struct drm_crtc_state *new_crtc_state,
7556 bool *lock_and_validation_needed)
7558 struct dm_atomic_state *dm_state = NULL;
7559 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7560 struct dc_stream_state *new_stream;
7564 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7565 * update changed items
7567 struct amdgpu_crtc *acrtc = NULL;
7568 struct amdgpu_dm_connector *aconnector = NULL;
7569 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7570 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7574 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7575 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7576 acrtc = to_amdgpu_crtc(crtc);
7577 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7579 /* TODO This hack should go away */
7580 if (aconnector && enable) {
7581 /* Make sure fake sink is created in plug-in scenario */
7582 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7584 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7587 if (IS_ERR(drm_new_conn_state)) {
7588 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7592 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7593 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7595 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7598 new_stream = create_stream_for_sink(aconnector,
7599 &new_crtc_state->mode,
7601 dm_old_crtc_state->stream);
7604 * we can have no stream on ACTION_SET if a display
7605 * was disconnected during S3, in this case it is not an
7606 * error, the OS will be updated after detection, and
7607 * will do the right thing on next atomic commit
7611 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7612 __func__, acrtc->base.base.id);
7617 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7619 ret = fill_hdr_info_packet(drm_new_conn_state,
7620 &new_stream->hdr_static_metadata);
7625 * If we already removed the old stream from the context
7626 * (and set the new stream to NULL) then we can't reuse
7627 * the old stream even if the stream and scaling are unchanged.
7628 * We'll hit the BUG_ON and black screen.
7630 * TODO: Refactor this function to allow this check to work
7631 * in all conditions.
7633 if (dm_new_crtc_state->stream &&
7634 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7635 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7636 new_crtc_state->mode_changed = false;
7637 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7638 new_crtc_state->mode_changed);
7642 /* mode_changed flag may get updated above, need to check again */
7643 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7647 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7648 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7649 "connectors_changed:%d\n",
7651 new_crtc_state->enable,
7652 new_crtc_state->active,
7653 new_crtc_state->planes_changed,
7654 new_crtc_state->mode_changed,
7655 new_crtc_state->active_changed,
7656 new_crtc_state->connectors_changed);
7658 /* Remove stream for any changed/disabled CRTC */
7661 if (!dm_old_crtc_state->stream)
7664 ret = dm_atomic_get_state(state, &dm_state);
7668 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7671 /* i.e. reset mode */
7672 if (dc_remove_stream_from_ctx(
7675 dm_old_crtc_state->stream) != DC_OK) {
7680 dc_stream_release(dm_old_crtc_state->stream);
7681 dm_new_crtc_state->stream = NULL;
7683 reset_freesync_config_for_crtc(dm_new_crtc_state);
7685 *lock_and_validation_needed = true;
7687 } else {/* Add stream for any updated/enabled CRTC */
7689 * Quick fix to prevent NULL pointer on new_stream when
7690 * added MST connectors not found in existing crtc_state in the chained mode
7691 * TODO: need to dig out the root cause of that
7693 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7696 if (modereset_required(new_crtc_state))
7699 if (modeset_required(new_crtc_state, new_stream,
7700 dm_old_crtc_state->stream)) {
7702 WARN_ON(dm_new_crtc_state->stream);
7704 ret = dm_atomic_get_state(state, &dm_state);
7708 dm_new_crtc_state->stream = new_stream;
7710 dc_stream_retain(new_stream);
7712 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7715 if (dc_add_stream_to_ctx(
7718 dm_new_crtc_state->stream) != DC_OK) {
7723 *lock_and_validation_needed = true;
7728 /* Release extra reference */
7730 dc_stream_release(new_stream);
7733 * We want to do dc stream updates that do not require a
7734 * full modeset below.
7736 if (!(enable && aconnector && new_crtc_state->enable &&
7737 new_crtc_state->active))
7740 * Given above conditions, the dc state cannot be NULL because:
7741 * 1. We're in the process of enabling CRTCs (just been added
7742 * to the dc context, or already is on the context)
7743 * 2. Has a valid connector attached, and
7744 * 3. Is currently active and enabled.
7745 * => The dc stream state currently exists.
7747 BUG_ON(dm_new_crtc_state->stream == NULL);
7749 /* Scaling or underscan settings */
7750 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7751 update_stream_scaling_settings(
7752 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7755 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7758 * Color management settings. We also update color properties
7759 * when a modeset is needed, to ensure it gets reprogrammed.
7761 if (dm_new_crtc_state->base.color_mgmt_changed ||
7762 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7763 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7768 /* Update Freesync settings. */
7769 get_freesync_config_for_crtc(dm_new_crtc_state,
7776 dc_stream_release(new_stream);
7780 static bool should_reset_plane(struct drm_atomic_state *state,
7781 struct drm_plane *plane,
7782 struct drm_plane_state *old_plane_state,
7783 struct drm_plane_state *new_plane_state)
7785 struct drm_plane *other;
7786 struct drm_plane_state *old_other_state, *new_other_state;
7787 struct drm_crtc_state *new_crtc_state;
7791 * TODO: Remove this hack once the checks below are sufficient
7792 * enough to determine when we need to reset all the planes on
7795 if (state->allow_modeset)
7798 /* Exit early if we know that we're adding or removing the plane. */
7799 if (old_plane_state->crtc != new_plane_state->crtc)
7802 /* old crtc == new_crtc == NULL, plane not in context. */
7803 if (!new_plane_state->crtc)
7807 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7809 if (!new_crtc_state)
7812 /* CRTC Degamma changes currently require us to recreate planes. */
7813 if (new_crtc_state->color_mgmt_changed)
7816 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7820 * If there are any new primary or overlay planes being added or
7821 * removed then the z-order can potentially change. To ensure
7822 * correct z-order and pipe acquisition the current DC architecture
7823 * requires us to remove and recreate all existing planes.
7825 * TODO: Come up with a more elegant solution for this.
7827 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7828 if (other->type == DRM_PLANE_TYPE_CURSOR)
7831 if (old_other_state->crtc != new_plane_state->crtc &&
7832 new_other_state->crtc != new_plane_state->crtc)
7835 if (old_other_state->crtc != new_other_state->crtc)
7838 /* TODO: Remove this once we can handle fast format changes. */
7839 if (old_other_state->fb && new_other_state->fb &&
7840 old_other_state->fb->format != new_other_state->fb->format)
7847 static int dm_update_plane_state(struct dc *dc,
7848 struct drm_atomic_state *state,
7849 struct drm_plane *plane,
7850 struct drm_plane_state *old_plane_state,
7851 struct drm_plane_state *new_plane_state,
7853 bool *lock_and_validation_needed)
7856 struct dm_atomic_state *dm_state = NULL;
7857 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7858 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7859 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7860 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7865 new_plane_crtc = new_plane_state->crtc;
7866 old_plane_crtc = old_plane_state->crtc;
7867 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7868 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7870 /*TODO Implement atomic check for cursor plane */
7871 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7874 needs_reset = should_reset_plane(state, plane, old_plane_state,
7877 /* Remove any changed/removed planes */
7882 if (!old_plane_crtc)
7885 old_crtc_state = drm_atomic_get_old_crtc_state(
7886 state, old_plane_crtc);
7887 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7889 if (!dm_old_crtc_state->stream)
7892 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7893 plane->base.id, old_plane_crtc->base.id);
7895 ret = dm_atomic_get_state(state, &dm_state);
7899 if (!dc_remove_plane_from_context(
7901 dm_old_crtc_state->stream,
7902 dm_old_plane_state->dc_state,
7903 dm_state->context)) {
7910 dc_plane_state_release(dm_old_plane_state->dc_state);
7911 dm_new_plane_state->dc_state = NULL;
7913 *lock_and_validation_needed = true;
7915 } else { /* Add new planes */
7916 struct dc_plane_state *dc_new_plane_state;
7918 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7921 if (!new_plane_crtc)
7924 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7925 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7927 if (!dm_new_crtc_state->stream)
7933 WARN_ON(dm_new_plane_state->dc_state);
7935 dc_new_plane_state = dc_create_plane_state(dc);
7936 if (!dc_new_plane_state)
7939 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7940 plane->base.id, new_plane_crtc->base.id);
7942 ret = fill_dc_plane_attributes(
7943 new_plane_crtc->dev->dev_private,
7948 dc_plane_state_release(dc_new_plane_state);
7952 ret = dm_atomic_get_state(state, &dm_state);
7954 dc_plane_state_release(dc_new_plane_state);
7959 * Any atomic check errors that occur after this will
7960 * not need a release. The plane state will be attached
7961 * to the stream, and therefore part of the atomic
7962 * state. It'll be released when the atomic state is
7965 if (!dc_add_plane_to_context(
7967 dm_new_crtc_state->stream,
7969 dm_state->context)) {
7971 dc_plane_state_release(dc_new_plane_state);
7975 dm_new_plane_state->dc_state = dc_new_plane_state;
7977 /* Tell DC to do a full surface update every time there
7978 * is a plane change. Inefficient, but works for now.
7980 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7982 *lock_and_validation_needed = true;
7990 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7991 struct drm_atomic_state *state,
7992 enum surface_update_type *out_type)
7994 struct dc *dc = dm->dc;
7995 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7996 int i, j, num_plane, ret = 0;
7997 struct drm_plane_state *old_plane_state, *new_plane_state;
7998 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7999 struct drm_crtc *new_plane_crtc;
8000 struct drm_plane *plane;
8002 struct drm_crtc *crtc;
8003 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8004 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8005 struct dc_stream_status *status = NULL;
8006 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8007 struct surface_info_bundle {
8008 struct dc_surface_update surface_updates[MAX_SURFACES];
8009 struct dc_plane_info plane_infos[MAX_SURFACES];
8010 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8011 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8012 struct dc_stream_update stream_update;
8015 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8018 DRM_ERROR("Failed to allocate update bundle\n");
8019 /* Set type to FULL to avoid crashing in DC*/
8020 update_type = UPDATE_TYPE_FULL;
8024 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8026 memset(bundle, 0, sizeof(struct surface_info_bundle));
8028 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8029 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8032 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8033 update_type = UPDATE_TYPE_FULL;
8037 if (!new_dm_crtc_state->stream)
8040 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8041 const struct amdgpu_framebuffer *amdgpu_fb =
8042 to_amdgpu_framebuffer(new_plane_state->fb);
8043 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8044 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8045 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8046 uint64_t tiling_flags;
8048 new_plane_crtc = new_plane_state->crtc;
8049 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8050 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8052 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8055 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8056 update_type = UPDATE_TYPE_FULL;
8060 if (crtc != new_plane_crtc)
8063 bundle->surface_updates[num_plane].surface =
8064 new_dm_plane_state->dc_state;
8066 if (new_crtc_state->mode_changed) {
8067 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8068 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8071 if (new_crtc_state->color_mgmt_changed) {
8072 bundle->surface_updates[num_plane].gamma =
8073 new_dm_plane_state->dc_state->gamma_correction;
8074 bundle->surface_updates[num_plane].in_transfer_func =
8075 new_dm_plane_state->dc_state->in_transfer_func;
8076 bundle->stream_update.gamut_remap =
8077 &new_dm_crtc_state->stream->gamut_remap_matrix;
8078 bundle->stream_update.output_csc_transform =
8079 &new_dm_crtc_state->stream->csc_color_matrix;
8080 bundle->stream_update.out_transfer_func =
8081 new_dm_crtc_state->stream->out_transfer_func;
8084 ret = fill_dc_scaling_info(new_plane_state,
8089 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8092 ret = get_fb_info(amdgpu_fb, &tiling_flags);
8096 ret = fill_dc_plane_info_and_addr(
8097 dm->adev, new_plane_state, tiling_flags,
8099 &flip_addr->address);
8103 bundle->surface_updates[num_plane].plane_info = plane_info;
8104 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8113 ret = dm_atomic_get_state(state, &dm_state);
8117 old_dm_state = dm_atomic_get_old_state(state);
8118 if (!old_dm_state) {
8123 status = dc_stream_get_status_from_state(old_dm_state->context,
8124 new_dm_crtc_state->stream);
8125 bundle->stream_update.stream = new_dm_crtc_state->stream;
8127 * TODO: DC modifies the surface during this call so we need
8128 * to lock here - find a way to do this without locking.
8130 mutex_lock(&dm->dc_lock);
8131 update_type = dc_check_update_surfaces_for_stream(
8132 dc, bundle->surface_updates, num_plane,
8133 &bundle->stream_update, status);
8134 mutex_unlock(&dm->dc_lock);
8136 if (update_type > UPDATE_TYPE_MED) {
8137 update_type = UPDATE_TYPE_FULL;
8145 *out_type = update_type;
8149 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8151 struct drm_connector *connector;
8152 struct drm_connector_state *conn_state;
8153 struct amdgpu_dm_connector *aconnector = NULL;
8155 for_each_new_connector_in_state(state, connector, conn_state, i) {
8156 if (conn_state->crtc != crtc)
8159 aconnector = to_amdgpu_dm_connector(connector);
8160 if (!aconnector->port || !aconnector->mst_port)
8169 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8173 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8174 * @dev: The DRM device
8175 * @state: The atomic state to commit
8177 * Validate that the given atomic state is programmable by DC into hardware.
8178 * This involves constructing a &struct dc_state reflecting the new hardware
8179 * state we wish to commit, then querying DC to see if it is programmable. It's
8180 * important not to modify the existing DC state. Otherwise, atomic_check
8181 * may unexpectedly commit hardware changes.
8183 * When validating the DC state, it's important that the right locks are
8184 * acquired. For full updates case which removes/adds/updates streams on one
8185 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8186 * that any such full update commit will wait for completion of any outstanding
8187 * flip using DRMs synchronization events. See
8188 * dm_determine_update_type_for_commit()
8190 * Note that DM adds the affected connectors for all CRTCs in state, when that
8191 * might not seem necessary. This is because DC stream creation requires the
8192 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8193 * be possible but non-trivial - a possible TODO item.
8195 * Return: -Error code if validation failed.
8197 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8198 struct drm_atomic_state *state)
8200 struct amdgpu_device *adev = dev->dev_private;
8201 struct dm_atomic_state *dm_state = NULL;
8202 struct dc *dc = adev->dm.dc;
8203 struct drm_connector *connector;
8204 struct drm_connector_state *old_con_state, *new_con_state;
8205 struct drm_crtc *crtc;
8206 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8207 struct drm_plane *plane;
8208 struct drm_plane_state *old_plane_state, *new_plane_state;
8209 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8210 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8215 * This bool will be set for true for any modeset/reset
8216 * or plane update which implies non fast surface update.
8218 bool lock_and_validation_needed = false;
8220 ret = drm_atomic_helper_check_modeset(dev, state);
8224 if (adev->asic_type >= CHIP_NAVI10) {
8225 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8226 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8227 ret = add_affected_mst_dsc_crtcs(state, crtc);
8234 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8235 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8236 !new_crtc_state->color_mgmt_changed &&
8237 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8240 if (!new_crtc_state->enable)
8243 ret = drm_atomic_add_affected_connectors(state, crtc);
8247 ret = drm_atomic_add_affected_planes(state, crtc);
8253 * Add all primary and overlay planes on the CRTC to the state
8254 * whenever a plane is enabled to maintain correct z-ordering
8255 * and to enable fast surface updates.
8257 drm_for_each_crtc(crtc, dev) {
8258 bool modified = false;
8260 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8261 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8264 if (new_plane_state->crtc == crtc ||
8265 old_plane_state->crtc == crtc) {
8274 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8275 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8279 drm_atomic_get_plane_state(state, plane);
8281 if (IS_ERR(new_plane_state)) {
8282 ret = PTR_ERR(new_plane_state);
8288 /* Remove exiting planes if they are modified */
8289 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8290 ret = dm_update_plane_state(dc, state, plane,
8294 &lock_and_validation_needed);
8299 /* Disable all crtcs which require disable */
8300 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8301 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8305 &lock_and_validation_needed);
8310 /* Enable all crtcs which require enable */
8311 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8312 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8316 &lock_and_validation_needed);
8321 /* Add new/modified planes */
8322 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8323 ret = dm_update_plane_state(dc, state, plane,
8327 &lock_and_validation_needed);
8332 /* Run this here since we want to validate the streams we created */
8333 ret = drm_atomic_helper_check_planes(dev, state);
8337 if (state->legacy_cursor_update) {
8339 * This is a fast cursor update coming from the plane update
8340 * helper, check if it can be done asynchronously for better
8343 state->async_update =
8344 !drm_atomic_helper_async_check(dev, state);
8347 * Skip the remaining global validation if this is an async
8348 * update. Cursor updates can be done without affecting
8349 * state or bandwidth calcs and this avoids the performance
8350 * penalty of locking the private state object and
8351 * allocating a new dc_state.
8353 if (state->async_update)
8357 /* Check scaling and underscan changes*/
8358 /* TODO Removed scaling changes validation due to inability to commit
8359 * new stream into context w\o causing full reset. Need to
8360 * decide how to handle.
8362 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8363 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8364 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8365 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8367 /* Skip any modesets/resets */
8368 if (!acrtc || drm_atomic_crtc_needs_modeset(
8369 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8372 /* Skip any thing not scale or underscan changes */
8373 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8376 overall_update_type = UPDATE_TYPE_FULL;
8377 lock_and_validation_needed = true;
8380 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8384 if (overall_update_type < update_type)
8385 overall_update_type = update_type;
8388 * lock_and_validation_needed was an old way to determine if we need to set
8389 * the global lock. Leaving it in to check if we broke any corner cases
8390 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8391 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8393 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8394 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8396 if (overall_update_type > UPDATE_TYPE_FAST) {
8397 ret = dm_atomic_get_state(state, &dm_state);
8401 ret = do_aquire_global_lock(dev, state);
8405 #if defined(CONFIG_DRM_AMD_DC_DCN)
8406 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8409 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8415 * Perform validation of MST topology in the state:
8416 * We need to perform MST atomic check before calling
8417 * dc_validate_global_state(), or there is a chance
8418 * to get stuck in an infinite loop and hang eventually.
8420 ret = drm_dp_mst_atomic_check(state);
8424 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8430 * The commit is a fast update. Fast updates shouldn't change
8431 * the DC context, affect global validation, and can have their
8432 * commit work done in parallel with other commits not touching
8433 * the same resource. If we have a new DC context as part of
8434 * the DM atomic state from validation we need to free it and
8435 * retain the existing one instead.
8437 struct dm_atomic_state *new_dm_state, *old_dm_state;
8439 new_dm_state = dm_atomic_get_new_state(state);
8440 old_dm_state = dm_atomic_get_old_state(state);
8442 if (new_dm_state && old_dm_state) {
8443 if (new_dm_state->context)
8444 dc_release_state(new_dm_state->context);
8446 new_dm_state->context = old_dm_state->context;
8448 if (old_dm_state->context)
8449 dc_retain_state(old_dm_state->context);
8453 /* Store the overall update type for use later in atomic check. */
8454 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8455 struct dm_crtc_state *dm_new_crtc_state =
8456 to_dm_crtc_state(new_crtc_state);
8458 dm_new_crtc_state->update_type = (int)overall_update_type;
8461 /* Must be success */
8466 if (ret == -EDEADLK)
8467 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8468 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8469 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8471 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8476 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8477 struct amdgpu_dm_connector *amdgpu_dm_connector)
8480 bool capable = false;
8482 if (amdgpu_dm_connector->dc_link &&
8483 dm_helpers_dp_read_dpcd(
8485 amdgpu_dm_connector->dc_link,
8486 DP_DOWN_STREAM_PORT_COUNT,
8488 sizeof(dpcd_data))) {
8489 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8494 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8498 bool edid_check_required;
8499 struct detailed_timing *timing;
8500 struct detailed_non_pixel *data;
8501 struct detailed_data_monitor_range *range;
8502 struct amdgpu_dm_connector *amdgpu_dm_connector =
8503 to_amdgpu_dm_connector(connector);
8504 struct dm_connector_state *dm_con_state = NULL;
8506 struct drm_device *dev = connector->dev;
8507 struct amdgpu_device *adev = dev->dev_private;
8508 bool freesync_capable = false;
8510 if (!connector->state) {
8511 DRM_ERROR("%s - Connector has no state", __func__);
8516 dm_con_state = to_dm_connector_state(connector->state);
8518 amdgpu_dm_connector->min_vfreq = 0;
8519 amdgpu_dm_connector->max_vfreq = 0;
8520 amdgpu_dm_connector->pixel_clock_mhz = 0;
8525 dm_con_state = to_dm_connector_state(connector->state);
8527 edid_check_required = false;
8528 if (!amdgpu_dm_connector->dc_sink) {
8529 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8532 if (!adev->dm.freesync_module)
8535 * if edid non zero restrict freesync only for dp and edp
8538 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8539 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8540 edid_check_required = is_dp_capable_without_timing_msa(
8542 amdgpu_dm_connector);
8545 if (edid_check_required == true && (edid->version > 1 ||
8546 (edid->version == 1 && edid->revision > 1))) {
8547 for (i = 0; i < 4; i++) {
8549 timing = &edid->detailed_timings[i];
8550 data = &timing->data.other_data;
8551 range = &data->data.range;
8553 * Check if monitor has continuous frequency mode
8555 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8558 * Check for flag range limits only. If flag == 1 then
8559 * no additional timing information provided.
8560 * Default GTF, GTF Secondary curve and CVT are not
8563 if (range->flags != 1)
8566 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8567 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8568 amdgpu_dm_connector->pixel_clock_mhz =
8569 range->pixel_clock_mhz * 10;
8573 if (amdgpu_dm_connector->max_vfreq -
8574 amdgpu_dm_connector->min_vfreq > 10) {
8576 freesync_capable = true;
8582 dm_con_state->freesync_capable = freesync_capable;
8584 if (connector->vrr_capable_property)
8585 drm_connector_set_vrr_capable_property(connector,
8589 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8591 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8593 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8595 if (link->type == dc_connection_none)
8597 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8598 dpcd_data, sizeof(dpcd_data))) {
8599 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8600 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8605 * amdgpu_dm_link_setup_psr() - configure psr link
8606 * @stream: stream state
8608 * Return: true if success
8610 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8612 struct dc_link *link = NULL;
8613 struct psr_config psr_config = {0};
8614 struct psr_context psr_context = {0};
8615 struct dc *dc = NULL;
8621 link = stream->link;
8624 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8626 if (psr_config.psr_version > 0) {
8627 psr_config.psr_exit_link_training_required = 0x1;
8628 psr_config.psr_frame_capture_indication_req = 0;
8629 psr_config.psr_rfb_setup_time = 0x37;
8630 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8631 psr_config.allow_smu_optimizations = 0x0;
8633 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8636 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8642 * amdgpu_dm_psr_enable() - enable psr f/w
8643 * @stream: stream state
8645 * Return: true if success
8647 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8649 struct dc_link *link = stream->link;
8650 unsigned int vsync_rate_hz = 0;
8651 struct dc_static_screen_params params = {0};
8652 /* Calculate number of static frames before generating interrupt to
8655 // Init fail safe of 2 frames static
8656 unsigned int num_frames_static = 2;
8658 DRM_DEBUG_DRIVER("Enabling psr...\n");
8660 vsync_rate_hz = div64_u64(div64_u64((
8661 stream->timing.pix_clk_100hz * 100),
8662 stream->timing.v_total),
8663 stream->timing.h_total);
8666 * Calculate number of frames such that at least 30 ms of time has
8669 if (vsync_rate_hz != 0) {
8670 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8671 num_frames_static = (30000 / frame_time_microsec) + 1;
8674 params.triggers.cursor_update = true;
8675 params.triggers.overlay_update = true;
8676 params.triggers.surface_update = true;
8677 params.num_frames = num_frames_static;
8679 dc_stream_set_static_screen_params(link->ctx->dc,
8683 return dc_link_set_psr_allow_active(link, true, false);
8687 * amdgpu_dm_psr_disable() - disable psr f/w
8688 * @stream: stream state
8690 * Return: true if success
8692 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8695 DRM_DEBUG_DRIVER("Disabling psr...\n");
8697 return dc_link_set_psr_allow_active(stream->link, false, true);