2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
117 * The root control structure is &struct amdgpu_display_manager.
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
129 * Returns 0 on success
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
145 struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
161 static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
185 if (crtc >= adev->mode_info.num_crtc)
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state->stream,
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
238 static bool dm_is_idle(void *handle)
244 static int dm_wait_for_idle(void *handle)
250 static bool dm_check_soft_reset(void *handle)
255 static int dm_soft_reset(void *handle)
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
269 if (otg_inst == -1) {
271 return adev->mode_info.crtcs[0];
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
277 if (amdgpu_crtc->otg_inst == otg_inst)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params)
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
397 static void dm_vupdate_high_irq(void *interrupt_params)
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
412 amdgpu_dm_vrr_active(acrtc_state));
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
420 if (amdgpu_dm_vrr_active(acrtc_state)) {
421 drm_crtc_handle_vblank(&acrtc->base);
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
430 &acrtc_state->vrr_params);
432 dc_stream_adjust_vmin_vmax(
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: used for determining the CRTC instance
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 static void dm_crtc_high_irq(void *interrupt_params)
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
453 struct amdgpu_crtc *acrtc;
454 struct dm_crtc_state *acrtc_state;
457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
461 acrtc_state = to_dm_crtc_state(acrtc->base.state);
463 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 amdgpu_dm_vrr_active(acrtc_state),
465 acrtc_state->active_planes);
468 * Core vblank handling at start of front-porch is only possible
469 * in non-vrr mode, as only there vblank timestamping will give
470 * valid results while done in front-porch. Otherwise defer it
471 * to dm_vupdate_high_irq after end of front-porch.
473 if (!amdgpu_dm_vrr_active(acrtc_state))
474 drm_crtc_handle_vblank(&acrtc->base);
477 * Following stuff must happen at start of vblank, for crc
478 * computation and below-the-range btr support in vrr mode.
480 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
482 /* BTR updates need to happen before VUPDATE on Vega and above. */
483 if (adev->family < AMDGPU_FAMILY_AI)
486 spin_lock_irqsave(&adev->ddev->event_lock, flags);
488 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 mod_freesync_handle_v_update(adev->dm.freesync_module,
492 &acrtc_state->vrr_params);
494 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 &acrtc_state->vrr_params.adjust);
499 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 * In that case, pageflip completion interrupts won't fire and pageflip
501 * completion events won't get delivered. Prevent this by sending
502 * pending pageflip events from here if a flip is still pending.
504 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 * avoid race conditions between flip programming and completion,
506 * which could cause too early flip completion events.
508 if (adev->family >= AMDGPU_FAMILY_RV &&
509 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 acrtc_state->active_planes == 0) {
512 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
514 drm_crtc_vblank_put(&acrtc->base);
516 acrtc->pflip_status = AMDGPU_FLIP_NONE;
519 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
522 static int dm_set_clockgating_state(void *handle,
523 enum amd_clockgating_state state)
528 static int dm_set_powergating_state(void *handle,
529 enum amd_powergating_state state)
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
537 /* Allocate memory for FBC compressed data */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
540 struct drm_device *dev = connector->dev;
541 struct amdgpu_device *adev = dev->dev_private;
542 struct dm_comressor_info *compressor = &adev->dm.compressor;
543 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 struct drm_display_mode *mode;
545 unsigned long max_size = 0;
547 if (adev->dm.dc->fbc_compressor == NULL)
550 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
553 if (compressor->bo_ptr)
557 list_for_each_entry(mode, &connector->modes, head) {
558 if (max_size < mode->htotal * mode->vtotal)
559 max_size = mode->htotal * mode->vtotal;
563 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 &compressor->gpu_addr, &compressor->cpu_addr);
568 DRM_ERROR("DM: Failed to initialize FBC\n");
570 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 int pipe, bool *enabled,
580 unsigned char *buf, int max_bytes)
582 struct drm_device *dev = dev_get_drvdata(kdev);
583 struct amdgpu_device *adev = dev->dev_private;
584 struct drm_connector *connector;
585 struct drm_connector_list_iter conn_iter;
586 struct amdgpu_dm_connector *aconnector;
591 mutex_lock(&adev->dm.audio_lock);
593 drm_connector_list_iter_begin(dev, &conn_iter);
594 drm_for_each_connector_iter(connector, &conn_iter) {
595 aconnector = to_amdgpu_dm_connector(connector);
596 if (aconnector->audio_inst != port)
600 ret = drm_eld_size(connector->eld);
601 memcpy(buf, connector->eld, min(max_bytes, ret));
605 drm_connector_list_iter_end(&conn_iter);
607 mutex_unlock(&adev->dm.audio_lock);
609 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 .get_eld = amdgpu_dm_audio_component_get_eld,
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 struct device *hda_kdev, void *data)
621 struct drm_device *dev = dev_get_drvdata(kdev);
622 struct amdgpu_device *adev = dev->dev_private;
623 struct drm_audio_component *acomp = data;
625 acomp->ops = &amdgpu_dm_audio_component_ops;
627 adev->dm.audio_component = acomp;
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 struct device *hda_kdev, void *data)
635 struct drm_device *dev = dev_get_drvdata(kdev);
636 struct amdgpu_device *adev = dev->dev_private;
637 struct drm_audio_component *acomp = data;
641 adev->dm.audio_component = NULL;
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645 .bind = amdgpu_dm_audio_component_bind,
646 .unbind = amdgpu_dm_audio_component_unbind,
649 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 adev->mode_info.audio.enabled = true;
658 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
660 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661 adev->mode_info.audio.pin[i].channels = -1;
662 adev->mode_info.audio.pin[i].rate = -1;
663 adev->mode_info.audio.pin[i].bits_per_sample = -1;
664 adev->mode_info.audio.pin[i].status_bits = 0;
665 adev->mode_info.audio.pin[i].category_code = 0;
666 adev->mode_info.audio.pin[i].connected = false;
667 adev->mode_info.audio.pin[i].id =
668 adev->dm.dc->res_pool->audios[i]->inst;
669 adev->mode_info.audio.pin[i].offset = 0;
672 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
676 adev->dm.audio_registered = true;
681 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
686 if (!adev->mode_info.audio.enabled)
689 if (adev->dm.audio_registered) {
690 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691 adev->dm.audio_registered = false;
694 /* TODO: Disable audio? */
696 adev->mode_info.audio.enabled = false;
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
701 struct drm_audio_component *acomp = adev->dm.audio_component;
703 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
706 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
711 static int dm_dmub_hw_init(struct amdgpu_device *adev)
713 const struct dmcub_firmware_header_v1_0 *hdr;
714 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716 const struct firmware *dmub_fw = adev->dm.dmub_fw;
717 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718 struct abm *abm = adev->dm.dc->res_pool->abm;
719 struct dmub_srv_hw_params hw_params;
720 enum dmub_status status;
721 const unsigned char *fw_inst_const, *fw_bss_data;
722 uint32_t i, fw_inst_const_size, fw_bss_data_size;
726 /* DMUB isn't supported on the ASIC. */
730 DRM_ERROR("No framebuffer info for DMUB service.\n");
735 /* Firmware required for DMUB support. */
736 DRM_ERROR("No firmware provided for DMUB.\n");
740 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741 if (status != DMUB_STATUS_OK) {
742 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
746 if (!has_hw_support) {
747 DRM_INFO("DMUB unsupported on ASIC\n");
751 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
753 fw_inst_const = dmub_fw->data +
754 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
757 fw_bss_data = dmub_fw->data +
758 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 le32_to_cpu(hdr->inst_const_bytes);
761 /* Copy firmware and bios info into FB memory. */
762 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
765 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
767 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768 * amdgpu_ucode_init_single_fw will load dmub firmware
769 * fw_inst_const part to cw0; otherwise, the firmware back door load
770 * will be done by dm_dmub_hw_init
772 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
777 if (fw_bss_data_size)
778 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
779 fw_bss_data, fw_bss_data_size);
781 /* Copy firmware bios info into FB memory. */
782 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
785 /* Reset regions that need to be reset. */
786 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
787 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
789 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
790 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
792 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
795 /* Initialize hardware. */
796 memset(&hw_params, 0, sizeof(hw_params));
797 hw_params.fb_base = adev->gmc.fb_start;
798 hw_params.fb_offset = adev->gmc.aper_base;
800 /* backdoor load firmware and trigger dmub running */
801 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
802 hw_params.load_inst_const = true;
805 hw_params.psp_version = dmcu->psp_version;
807 for (i = 0; i < fb_info->num_fb; ++i)
808 hw_params.fb[i] = &fb_info->fb[i];
810 status = dmub_srv_hw_init(dmub_srv, &hw_params);
811 if (status != DMUB_STATUS_OK) {
812 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
816 /* Wait for firmware load to finish. */
817 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
818 if (status != DMUB_STATUS_OK)
819 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
821 /* Init DMCU and ABM if available. */
823 dmcu->funcs->dmcu_init(dmcu);
824 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
827 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
828 if (!adev->dm.dc->ctx->dmub_srv) {
829 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
833 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
834 adev->dm.dmcub_fw_version);
839 static int amdgpu_dm_init(struct amdgpu_device *adev)
841 struct dc_init_data init_data;
842 #ifdef CONFIG_DRM_AMD_DC_HDCP
843 struct dc_callback_init init_params;
847 adev->dm.ddev = adev->ddev;
848 adev->dm.adev = adev;
850 /* Zero all the fields */
851 memset(&init_data, 0, sizeof(init_data));
852 #ifdef CONFIG_DRM_AMD_DC_HDCP
853 memset(&init_params, 0, sizeof(init_params));
856 mutex_init(&adev->dm.dc_lock);
857 mutex_init(&adev->dm.audio_lock);
859 if(amdgpu_dm_irq_init(adev)) {
860 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
864 init_data.asic_id.chip_family = adev->family;
866 init_data.asic_id.pci_revision_id = adev->pdev->revision;
867 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
869 init_data.asic_id.vram_width = adev->gmc.vram_width;
870 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
871 init_data.asic_id.atombios_base_address =
872 adev->mode_info.atom_context->bios;
874 init_data.driver = adev;
876 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
878 if (!adev->dm.cgs_device) {
879 DRM_ERROR("amdgpu: failed to create cgs device.\n");
883 init_data.cgs_device = adev->dm.cgs_device;
885 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
887 switch (adev->asic_type) {
892 init_data.flags.gpu_vm_support = true;
898 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
899 init_data.flags.fbc_support = true;
901 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
902 init_data.flags.multi_mon_pp_mclk_switch = true;
904 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
905 init_data.flags.disable_fractional_pwm = true;
907 init_data.flags.power_down_display_on_boot = true;
909 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
911 /* Display Core create. */
912 adev->dm.dc = dc_create(&init_data);
915 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
917 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
921 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
922 adev->dm.dc->debug.force_single_disp_pipe_split = false;
923 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
926 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
927 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
929 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
930 adev->dm.dc->debug.disable_stutter = true;
932 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
933 adev->dm.dc->debug.disable_dsc = true;
935 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
936 adev->dm.dc->debug.disable_clock_gate = true;
938 r = dm_dmub_hw_init(adev);
940 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
944 dc_hardware_init(adev->dm.dc);
946 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
947 if (!adev->dm.freesync_module) {
949 "amdgpu: failed to initialize freesync_module.\n");
951 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
952 adev->dm.freesync_module);
954 amdgpu_dm_init_color_mod();
956 #ifdef CONFIG_DRM_AMD_DC_HDCP
957 if (adev->asic_type >= CHIP_RAVEN) {
958 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
960 if (!adev->dm.hdcp_workqueue)
961 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
963 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
965 dc_init_callbacks(adev->dm.dc, &init_params);
968 if (amdgpu_dm_initialize_drm_device(adev)) {
970 "amdgpu: failed to initialize sw for display support.\n");
974 /* Update the actual used number of crtc */
975 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
977 /* create fake encoders for MST */
978 dm_dp_create_fake_mst_encoders(adev);
980 /* TODO: Add_display_info? */
982 /* TODO use dynamic cursor width */
983 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
984 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
986 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
988 "amdgpu: failed to initialize sw for display support.\n");
992 DRM_DEBUG_DRIVER("KMS initialized.\n");
996 amdgpu_dm_fini(adev);
1001 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1005 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1006 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1009 amdgpu_dm_audio_fini(adev);
1011 amdgpu_dm_destroy_drm_device(&adev->dm);
1013 #ifdef CONFIG_DRM_AMD_DC_HDCP
1014 if (adev->dm.hdcp_workqueue) {
1015 hdcp_destroy(adev->dm.hdcp_workqueue);
1016 adev->dm.hdcp_workqueue = NULL;
1020 dc_deinit_callbacks(adev->dm.dc);
1022 if (adev->dm.dc->ctx->dmub_srv) {
1023 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1024 adev->dm.dc->ctx->dmub_srv = NULL;
1027 if (adev->dm.dmub_bo)
1028 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1029 &adev->dm.dmub_bo_gpu_addr,
1030 &adev->dm.dmub_bo_cpu_addr);
1032 /* DC Destroy TODO: Replace destroy DAL */
1034 dc_destroy(&adev->dm.dc);
1036 * TODO: pageflip, vlank interrupt
1038 * amdgpu_dm_irq_fini(adev);
1041 if (adev->dm.cgs_device) {
1042 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1043 adev->dm.cgs_device = NULL;
1045 if (adev->dm.freesync_module) {
1046 mod_freesync_destroy(adev->dm.freesync_module);
1047 adev->dm.freesync_module = NULL;
1050 mutex_destroy(&adev->dm.audio_lock);
1051 mutex_destroy(&adev->dm.dc_lock);
1056 static int load_dmcu_fw(struct amdgpu_device *adev)
1058 const char *fw_name_dmcu = NULL;
1060 const struct dmcu_firmware_header_v1_0 *hdr;
1062 switch(adev->asic_type) {
1072 case CHIP_POLARIS11:
1073 case CHIP_POLARIS10:
1074 case CHIP_POLARIS12:
1084 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1087 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1088 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1089 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1090 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1095 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1099 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1100 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1104 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1106 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1107 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1108 adev->dm.fw_dmcu = NULL;
1112 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1117 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1119 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1121 release_firmware(adev->dm.fw_dmcu);
1122 adev->dm.fw_dmcu = NULL;
1126 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1127 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1128 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1129 adev->firmware.fw_size +=
1130 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1132 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1133 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1134 adev->firmware.fw_size +=
1135 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1137 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1139 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1144 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1146 struct amdgpu_device *adev = ctx;
1148 return dm_read_reg(adev->dm.dc->ctx, address);
1151 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1154 struct amdgpu_device *adev = ctx;
1156 return dm_write_reg(adev->dm.dc->ctx, address, value);
1159 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1161 struct dmub_srv_create_params create_params;
1162 struct dmub_srv_region_params region_params;
1163 struct dmub_srv_region_info region_info;
1164 struct dmub_srv_fb_params fb_params;
1165 struct dmub_srv_fb_info *fb_info;
1166 struct dmub_srv *dmub_srv;
1167 const struct dmcub_firmware_header_v1_0 *hdr;
1168 const char *fw_name_dmub;
1169 enum dmub_asic dmub_asic;
1170 enum dmub_status status;
1173 switch (adev->asic_type) {
1175 dmub_asic = DMUB_ASIC_DCN21;
1176 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1180 /* ASIC doesn't support DMUB. */
1184 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1186 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1190 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1192 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1196 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1198 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1199 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1200 AMDGPU_UCODE_ID_DMCUB;
1201 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1203 adev->firmware.fw_size +=
1204 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1206 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1207 adev->dm.dmcub_fw_version);
1210 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1212 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1213 dmub_srv = adev->dm.dmub_srv;
1216 DRM_ERROR("Failed to allocate DMUB service!\n");
1220 memset(&create_params, 0, sizeof(create_params));
1221 create_params.user_ctx = adev;
1222 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1223 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1224 create_params.asic = dmub_asic;
1226 /* Create the DMUB service. */
1227 status = dmub_srv_create(dmub_srv, &create_params);
1228 if (status != DMUB_STATUS_OK) {
1229 DRM_ERROR("Error creating DMUB service: %d\n", status);
1233 /* Calculate the size of all the regions for the DMUB service. */
1234 memset(®ion_params, 0, sizeof(region_params));
1236 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1237 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1238 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1239 region_params.vbios_size = adev->bios_size;
1240 region_params.fw_bss_data =
1241 adev->dm.dmub_fw->data +
1242 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1243 le32_to_cpu(hdr->inst_const_bytes);
1244 region_params.fw_inst_const =
1245 adev->dm.dmub_fw->data +
1246 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1249 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1252 if (status != DMUB_STATUS_OK) {
1253 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1258 * Allocate a framebuffer based on the total size of all the regions.
1259 * TODO: Move this into GART.
1261 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1262 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1263 &adev->dm.dmub_bo_gpu_addr,
1264 &adev->dm.dmub_bo_cpu_addr);
1268 /* Rebase the regions on the framebuffer address. */
1269 memset(&fb_params, 0, sizeof(fb_params));
1270 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1271 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1272 fb_params.region_info = ®ion_info;
1274 adev->dm.dmub_fb_info =
1275 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1276 fb_info = adev->dm.dmub_fb_info;
1280 "Failed to allocate framebuffer info for DMUB service!\n");
1284 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1285 if (status != DMUB_STATUS_OK) {
1286 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1293 static int dm_sw_init(void *handle)
1295 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1298 r = dm_dmub_sw_init(adev);
1302 return load_dmcu_fw(adev);
1305 static int dm_sw_fini(void *handle)
1307 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1309 kfree(adev->dm.dmub_fb_info);
1310 adev->dm.dmub_fb_info = NULL;
1312 if (adev->dm.dmub_srv) {
1313 dmub_srv_destroy(adev->dm.dmub_srv);
1314 adev->dm.dmub_srv = NULL;
1317 if (adev->dm.dmub_fw) {
1318 release_firmware(adev->dm.dmub_fw);
1319 adev->dm.dmub_fw = NULL;
1322 if(adev->dm.fw_dmcu) {
1323 release_firmware(adev->dm.fw_dmcu);
1324 adev->dm.fw_dmcu = NULL;
1330 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1332 struct amdgpu_dm_connector *aconnector;
1333 struct drm_connector *connector;
1334 struct drm_connector_list_iter iter;
1337 drm_connector_list_iter_begin(dev, &iter);
1338 drm_for_each_connector_iter(connector, &iter) {
1339 aconnector = to_amdgpu_dm_connector(connector);
1340 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1341 aconnector->mst_mgr.aux) {
1342 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1344 aconnector->base.base.id);
1346 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1348 DRM_ERROR("DM_MST: Failed to start MST\n");
1349 aconnector->dc_link->type =
1350 dc_connection_single;
1355 drm_connector_list_iter_end(&iter);
1360 static int dm_late_init(void *handle)
1362 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1364 struct dmcu_iram_parameters params;
1365 unsigned int linear_lut[16];
1367 struct dmcu *dmcu = NULL;
1370 if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1371 return detect_mst_link_for_all_connectors(adev->ddev);
1373 dmcu = adev->dm.dc->res_pool->dmcu;
1375 for (i = 0; i < 16; i++)
1376 linear_lut[i] = 0xFFFF * i / 15;
1379 params.backlight_ramping_start = 0xCCCC;
1380 params.backlight_ramping_reduction = 0xCCCCCCCC;
1381 params.backlight_lut_array_size = 16;
1382 params.backlight_lut_array = linear_lut;
1384 /* Min backlight level after ABM reduction, Don't allow below 1%
1385 * 0xFFFF x 0.01 = 0x28F
1387 params.min_abm_backlight = 0x28F;
1389 ret = dmcu_load_iram(dmcu, params);
1394 return detect_mst_link_for_all_connectors(adev->ddev);
1397 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1399 struct amdgpu_dm_connector *aconnector;
1400 struct drm_connector *connector;
1401 struct drm_connector_list_iter iter;
1402 struct drm_dp_mst_topology_mgr *mgr;
1404 bool need_hotplug = false;
1406 drm_connector_list_iter_begin(dev, &iter);
1407 drm_for_each_connector_iter(connector, &iter) {
1408 aconnector = to_amdgpu_dm_connector(connector);
1409 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1410 aconnector->mst_port)
1413 mgr = &aconnector->mst_mgr;
1416 drm_dp_mst_topology_mgr_suspend(mgr);
1418 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1420 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1421 need_hotplug = true;
1425 drm_connector_list_iter_end(&iter);
1428 drm_kms_helper_hotplug_event(dev);
1431 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1433 struct smu_context *smu = &adev->smu;
1436 if (!is_support_sw_smu(adev))
1439 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1440 * on window driver dc implementation.
1441 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1442 * should be passed to smu during boot up and resume from s3.
1443 * boot up: dc calculate dcn watermark clock settings within dc_create,
1444 * dcn20_resource_construct
1445 * then call pplib functions below to pass the settings to smu:
1446 * smu_set_watermarks_for_clock_ranges
1447 * smu_set_watermarks_table
1448 * navi10_set_watermarks_table
1449 * smu_write_watermarks_table
1451 * For Renoir, clock settings of dcn watermark are also fixed values.
1452 * dc has implemented different flow for window driver:
1453 * dc_hardware_init / dc_set_power_state
1458 * smu_set_watermarks_for_clock_ranges
1459 * renoir_set_watermarks_table
1460 * smu_write_watermarks_table
1463 * dc_hardware_init -> amdgpu_dm_init
1464 * dc_set_power_state --> dm_resume
1466 * therefore, this function apply to navi10/12/14 but not Renoir
1469 switch(adev->asic_type) {
1478 mutex_lock(&smu->mutex);
1480 /* pass data to smu controller */
1481 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1482 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1483 ret = smu_write_watermarks_table(smu);
1486 mutex_unlock(&smu->mutex);
1487 DRM_ERROR("Failed to update WMTABLE!\n");
1490 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1493 mutex_unlock(&smu->mutex);
1499 * dm_hw_init() - Initialize DC device
1500 * @handle: The base driver device containing the amdgpu_dm device.
1502 * Initialize the &struct amdgpu_display_manager device. This involves calling
1503 * the initializers of each DM component, then populating the struct with them.
1505 * Although the function implies hardware initialization, both hardware and
1506 * software are initialized here. Splitting them out to their relevant init
1507 * hooks is a future TODO item.
1509 * Some notable things that are initialized here:
1511 * - Display Core, both software and hardware
1512 * - DC modules that we need (freesync and color management)
1513 * - DRM software states
1514 * - Interrupt sources and handlers
1516 * - Debug FS entries, if enabled
1518 static int dm_hw_init(void *handle)
1520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521 /* Create DAL display manager */
1522 amdgpu_dm_init(adev);
1523 amdgpu_dm_hpd_init(adev);
1529 * dm_hw_fini() - Teardown DC device
1530 * @handle: The base driver device containing the amdgpu_dm device.
1532 * Teardown components within &struct amdgpu_display_manager that require
1533 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1534 * were loaded. Also flush IRQ workqueues and disable them.
1536 static int dm_hw_fini(void *handle)
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1540 amdgpu_dm_hpd_fini(adev);
1542 amdgpu_dm_irq_fini(adev);
1543 amdgpu_dm_fini(adev);
1548 static int dm_enable_vblank(struct drm_crtc *crtc);
1549 static void dm_disable_vblank(struct drm_crtc *crtc);
1551 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1552 struct dc_state *state, bool enable)
1554 enum dc_irq_source irq_source;
1555 struct amdgpu_crtc *acrtc;
1559 for (i = 0; i < state->stream_count; i++) {
1560 acrtc = get_crtc_by_otg_inst(
1561 adev, state->stream_status[i].primary_otg_inst);
1563 if (acrtc && state->stream_status[i].plane_count != 0) {
1564 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1565 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1566 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1567 acrtc->crtc_id, enable ? "en" : "dis", rc);
1569 DRM_WARN("Failed to %s pflip interrupts\n",
1570 enable ? "enable" : "disable");
1573 rc = dm_enable_vblank(&acrtc->base);
1575 DRM_WARN("Failed to enable vblank interrupts\n");
1577 dm_disable_vblank(&acrtc->base);
1585 enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1587 struct dc_state *context = NULL;
1588 enum dc_status res = DC_ERROR_UNEXPECTED;
1590 struct dc_stream_state *del_streams[MAX_PIPES];
1591 int del_streams_count = 0;
1593 memset(del_streams, 0, sizeof(del_streams));
1595 context = dc_create_state(dc);
1596 if (context == NULL)
1597 goto context_alloc_fail;
1599 dc_resource_state_copy_construct_current(dc, context);
1601 /* First remove from context all streams */
1602 for (i = 0; i < context->stream_count; i++) {
1603 struct dc_stream_state *stream = context->streams[i];
1605 del_streams[del_streams_count++] = stream;
1608 /* Remove all planes for removed streams and then remove the streams */
1609 for (i = 0; i < del_streams_count; i++) {
1610 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1611 res = DC_FAIL_DETACH_SURFACES;
1615 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1621 res = dc_validate_global_state(dc, context, false);
1624 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1628 res = dc_commit_state(dc, context);
1631 dc_release_state(context);
1637 static int dm_suspend(void *handle)
1639 struct amdgpu_device *adev = handle;
1640 struct amdgpu_display_manager *dm = &adev->dm;
1643 if (adev->in_gpu_reset) {
1644 mutex_lock(&dm->dc_lock);
1645 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1647 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1649 amdgpu_dm_commit_zero_streams(dm->dc);
1651 amdgpu_dm_irq_suspend(adev);
1656 WARN_ON(adev->dm.cached_state);
1657 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1659 s3_handle_mst(adev->ddev, true);
1661 amdgpu_dm_irq_suspend(adev);
1664 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1669 static struct amdgpu_dm_connector *
1670 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1671 struct drm_crtc *crtc)
1674 struct drm_connector_state *new_con_state;
1675 struct drm_connector *connector;
1676 struct drm_crtc *crtc_from_state;
1678 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1679 crtc_from_state = new_con_state->crtc;
1681 if (crtc_from_state == crtc)
1682 return to_amdgpu_dm_connector(connector);
1688 static void emulated_link_detect(struct dc_link *link)
1690 struct dc_sink_init_data sink_init_data = { 0 };
1691 struct display_sink_capability sink_caps = { 0 };
1692 enum dc_edid_status edid_status;
1693 struct dc_context *dc_ctx = link->ctx;
1694 struct dc_sink *sink = NULL;
1695 struct dc_sink *prev_sink = NULL;
1697 link->type = dc_connection_none;
1698 prev_sink = link->local_sink;
1700 if (prev_sink != NULL)
1701 dc_sink_retain(prev_sink);
1703 switch (link->connector_signal) {
1704 case SIGNAL_TYPE_HDMI_TYPE_A: {
1705 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1706 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1710 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1711 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1712 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1716 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1717 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1722 case SIGNAL_TYPE_LVDS: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_LVDS;
1728 case SIGNAL_TYPE_EDP: {
1729 sink_caps.transaction_type =
1730 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1731 sink_caps.signal = SIGNAL_TYPE_EDP;
1735 case SIGNAL_TYPE_DISPLAY_PORT: {
1736 sink_caps.transaction_type =
1737 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1738 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1743 DC_ERROR("Invalid connector type! signal:%d\n",
1744 link->connector_signal);
1748 sink_init_data.link = link;
1749 sink_init_data.sink_signal = sink_caps.signal;
1751 sink = dc_sink_create(&sink_init_data);
1753 DC_ERROR("Failed to create sink!\n");
1757 /* dc_sink_create returns a new reference */
1758 link->local_sink = sink;
1760 edid_status = dm_helpers_read_local_edid(
1765 if (edid_status != EDID_OK)
1766 DC_ERROR("Failed to read EDID");
1770 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1771 struct amdgpu_display_manager *dm)
1774 struct dc_surface_update surface_updates[MAX_SURFACES];
1775 struct dc_plane_info plane_infos[MAX_SURFACES];
1776 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1777 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1778 struct dc_stream_update stream_update;
1782 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1785 dm_error("Failed to allocate update bundle\n");
1789 for (k = 0; k < dc_state->stream_count; k++) {
1790 bundle->stream_update.stream = dc_state->streams[k];
1792 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1793 bundle->surface_updates[m].surface =
1794 dc_state->stream_status->plane_states[m];
1795 bundle->surface_updates[m].surface->force_full_update =
1798 dc_commit_updates_for_stream(
1799 dm->dc, bundle->surface_updates,
1800 dc_state->stream_status->plane_count,
1801 dc_state->streams[k], &bundle->stream_update, dc_state);
1810 static int dm_resume(void *handle)
1812 struct amdgpu_device *adev = handle;
1813 struct drm_device *ddev = adev->ddev;
1814 struct amdgpu_display_manager *dm = &adev->dm;
1815 struct amdgpu_dm_connector *aconnector;
1816 struct drm_connector *connector;
1817 struct drm_connector_list_iter iter;
1818 struct drm_crtc *crtc;
1819 struct drm_crtc_state *new_crtc_state;
1820 struct dm_crtc_state *dm_new_crtc_state;
1821 struct drm_plane *plane;
1822 struct drm_plane_state *new_plane_state;
1823 struct dm_plane_state *dm_new_plane_state;
1824 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1825 enum dc_connection_type new_connection_type = dc_connection_none;
1826 struct dc_state *dc_state;
1829 if (adev->in_gpu_reset) {
1830 dc_state = dm->cached_dc_state;
1832 r = dm_dmub_hw_init(adev);
1834 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1836 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1839 amdgpu_dm_irq_resume_early(adev);
1841 for (i = 0; i < dc_state->stream_count; i++) {
1842 dc_state->streams[i]->mode_changed = true;
1843 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1844 dc_state->stream_status->plane_states[j]->update_flags.raw
1849 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1851 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1853 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1855 dc_release_state(dm->cached_dc_state);
1856 dm->cached_dc_state = NULL;
1858 amdgpu_dm_irq_resume_late(adev);
1860 mutex_unlock(&dm->dc_lock);
1864 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1865 dc_release_state(dm_state->context);
1866 dm_state->context = dc_create_state(dm->dc);
1867 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1868 dc_resource_state_construct(dm->dc, dm_state->context);
1870 /* Before powering on DC we need to re-initialize DMUB. */
1871 r = dm_dmub_hw_init(adev);
1873 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1875 /* power on hardware */
1876 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1878 /* program HPD filter */
1882 * early enable HPD Rx IRQ, should be done before set mode as short
1883 * pulse interrupts are used for MST
1885 amdgpu_dm_irq_resume_early(adev);
1887 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1888 s3_handle_mst(ddev, false);
1891 drm_connector_list_iter_begin(ddev, &iter);
1892 drm_for_each_connector_iter(connector, &iter) {
1893 aconnector = to_amdgpu_dm_connector(connector);
1896 * this is the case when traversing through already created
1897 * MST connectors, should be skipped
1899 if (aconnector->mst_port)
1902 mutex_lock(&aconnector->hpd_lock);
1903 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1904 DRM_ERROR("KMS: Failed to detect connector\n");
1906 if (aconnector->base.force && new_connection_type == dc_connection_none)
1907 emulated_link_detect(aconnector->dc_link);
1909 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1911 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1912 aconnector->fake_enable = false;
1914 if (aconnector->dc_sink)
1915 dc_sink_release(aconnector->dc_sink);
1916 aconnector->dc_sink = NULL;
1917 amdgpu_dm_update_connector_after_detect(aconnector);
1918 mutex_unlock(&aconnector->hpd_lock);
1920 drm_connector_list_iter_end(&iter);
1922 /* Force mode set in atomic commit */
1923 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1924 new_crtc_state->active_changed = true;
1927 * atomic_check is expected to create the dc states. We need to release
1928 * them here, since they were duplicated as part of the suspend
1931 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1932 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1933 if (dm_new_crtc_state->stream) {
1934 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1935 dc_stream_release(dm_new_crtc_state->stream);
1936 dm_new_crtc_state->stream = NULL;
1940 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1941 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1942 if (dm_new_plane_state->dc_state) {
1943 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1944 dc_plane_state_release(dm_new_plane_state->dc_state);
1945 dm_new_plane_state->dc_state = NULL;
1949 drm_atomic_helper_resume(ddev, dm->cached_state);
1951 dm->cached_state = NULL;
1953 amdgpu_dm_irq_resume_late(adev);
1955 amdgpu_dm_smu_write_watermarks_table(adev);
1963 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1964 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1965 * the base driver's device list to be initialized and torn down accordingly.
1967 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1970 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1972 .early_init = dm_early_init,
1973 .late_init = dm_late_init,
1974 .sw_init = dm_sw_init,
1975 .sw_fini = dm_sw_fini,
1976 .hw_init = dm_hw_init,
1977 .hw_fini = dm_hw_fini,
1978 .suspend = dm_suspend,
1979 .resume = dm_resume,
1980 .is_idle = dm_is_idle,
1981 .wait_for_idle = dm_wait_for_idle,
1982 .check_soft_reset = dm_check_soft_reset,
1983 .soft_reset = dm_soft_reset,
1984 .set_clockgating_state = dm_set_clockgating_state,
1985 .set_powergating_state = dm_set_powergating_state,
1988 const struct amdgpu_ip_block_version dm_ip_block =
1990 .type = AMD_IP_BLOCK_TYPE_DCE,
1994 .funcs = &amdgpu_dm_funcs,
2004 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2005 .fb_create = amdgpu_display_user_framebuffer_create,
2006 .output_poll_changed = drm_fb_helper_output_poll_changed,
2007 .atomic_check = amdgpu_dm_atomic_check,
2008 .atomic_commit = amdgpu_dm_atomic_commit,
2011 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2012 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2015 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2017 u32 max_cll, min_cll, max, min, q, r;
2018 struct amdgpu_dm_backlight_caps *caps;
2019 struct amdgpu_display_manager *dm;
2020 struct drm_connector *conn_base;
2021 struct amdgpu_device *adev;
2022 struct dc_link *link = NULL;
2023 static const u8 pre_computed_values[] = {
2024 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2025 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2027 if (!aconnector || !aconnector->dc_link)
2030 link = aconnector->dc_link;
2031 if (link->connector_signal != SIGNAL_TYPE_EDP)
2034 conn_base = &aconnector->base;
2035 adev = conn_base->dev->dev_private;
2037 caps = &dm->backlight_caps;
2038 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2039 caps->aux_support = false;
2040 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2041 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2043 if (caps->ext_caps->bits.oled == 1 ||
2044 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2045 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2046 caps->aux_support = true;
2048 /* From the specification (CTA-861-G), for calculating the maximum
2049 * luminance we need to use:
2050 * Luminance = 50*2**(CV/32)
2051 * Where CV is a one-byte value.
2052 * For calculating this expression we may need float point precision;
2053 * to avoid this complexity level, we take advantage that CV is divided
2054 * by a constant. From the Euclids division algorithm, we know that CV
2055 * can be written as: CV = 32*q + r. Next, we replace CV in the
2056 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2057 * need to pre-compute the value of r/32. For pre-computing the values
2058 * We just used the following Ruby line:
2059 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2060 * The results of the above expressions can be verified at
2061 * pre_computed_values.
2065 max = (1 << q) * pre_computed_values[r];
2067 // min luminance: maxLum * (CV/255)^2 / 100
2068 q = DIV_ROUND_CLOSEST(min_cll, 255);
2069 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2071 caps->aux_max_input_signal = max;
2072 caps->aux_min_input_signal = min;
2075 void amdgpu_dm_update_connector_after_detect(
2076 struct amdgpu_dm_connector *aconnector)
2078 struct drm_connector *connector = &aconnector->base;
2079 struct drm_device *dev = connector->dev;
2080 struct dc_sink *sink;
2082 /* MST handled by drm_mst framework */
2083 if (aconnector->mst_mgr.mst_state == true)
2087 sink = aconnector->dc_link->local_sink;
2089 dc_sink_retain(sink);
2092 * Edid mgmt connector gets first update only in mode_valid hook and then
2093 * the connector sink is set to either fake or physical sink depends on link status.
2094 * Skip if already done during boot.
2096 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2097 && aconnector->dc_em_sink) {
2100 * For S3 resume with headless use eml_sink to fake stream
2101 * because on resume connector->sink is set to NULL
2103 mutex_lock(&dev->mode_config.mutex);
2106 if (aconnector->dc_sink) {
2107 amdgpu_dm_update_freesync_caps(connector, NULL);
2109 * retain and release below are used to
2110 * bump up refcount for sink because the link doesn't point
2111 * to it anymore after disconnect, so on next crtc to connector
2112 * reshuffle by UMD we will get into unwanted dc_sink release
2114 dc_sink_release(aconnector->dc_sink);
2116 aconnector->dc_sink = sink;
2117 dc_sink_retain(aconnector->dc_sink);
2118 amdgpu_dm_update_freesync_caps(connector,
2121 amdgpu_dm_update_freesync_caps(connector, NULL);
2122 if (!aconnector->dc_sink) {
2123 aconnector->dc_sink = aconnector->dc_em_sink;
2124 dc_sink_retain(aconnector->dc_sink);
2128 mutex_unlock(&dev->mode_config.mutex);
2131 dc_sink_release(sink);
2136 * TODO: temporary guard to look for proper fix
2137 * if this sink is MST sink, we should not do anything
2139 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2140 dc_sink_release(sink);
2144 if (aconnector->dc_sink == sink) {
2146 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2149 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2150 aconnector->connector_id);
2152 dc_sink_release(sink);
2156 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2157 aconnector->connector_id, aconnector->dc_sink, sink);
2159 mutex_lock(&dev->mode_config.mutex);
2162 * 1. Update status of the drm connector
2163 * 2. Send an event and let userspace tell us what to do
2167 * TODO: check if we still need the S3 mode update workaround.
2168 * If yes, put it here.
2170 if (aconnector->dc_sink)
2171 amdgpu_dm_update_freesync_caps(connector, NULL);
2173 aconnector->dc_sink = sink;
2174 dc_sink_retain(aconnector->dc_sink);
2175 if (sink->dc_edid.length == 0) {
2176 aconnector->edid = NULL;
2177 if (aconnector->dc_link->aux_mode) {
2178 drm_dp_cec_unset_edid(
2179 &aconnector->dm_dp_aux.aux);
2183 (struct edid *)sink->dc_edid.raw_edid;
2185 drm_connector_update_edid_property(connector,
2188 if (aconnector->dc_link->aux_mode)
2189 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2193 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2194 update_connector_ext_caps(aconnector);
2196 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2197 amdgpu_dm_update_freesync_caps(connector, NULL);
2198 drm_connector_update_edid_property(connector, NULL);
2199 aconnector->num_modes = 0;
2200 dc_sink_release(aconnector->dc_sink);
2201 aconnector->dc_sink = NULL;
2202 aconnector->edid = NULL;
2203 #ifdef CONFIG_DRM_AMD_DC_HDCP
2204 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2205 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2206 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2210 mutex_unlock(&dev->mode_config.mutex);
2213 dc_sink_release(sink);
2216 static void handle_hpd_irq(void *param)
2218 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2219 struct drm_connector *connector = &aconnector->base;
2220 struct drm_device *dev = connector->dev;
2221 enum dc_connection_type new_connection_type = dc_connection_none;
2222 #ifdef CONFIG_DRM_AMD_DC_HDCP
2223 struct amdgpu_device *adev = dev->dev_private;
2227 * In case of failure or MST no need to update connector status or notify the OS
2228 * since (for MST case) MST does this in its own context.
2230 mutex_lock(&aconnector->hpd_lock);
2232 #ifdef CONFIG_DRM_AMD_DC_HDCP
2233 if (adev->dm.hdcp_workqueue)
2234 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2236 if (aconnector->fake_enable)
2237 aconnector->fake_enable = false;
2239 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2240 DRM_ERROR("KMS: Failed to detect connector\n");
2242 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2243 emulated_link_detect(aconnector->dc_link);
2246 drm_modeset_lock_all(dev);
2247 dm_restore_drm_connector_state(dev, connector);
2248 drm_modeset_unlock_all(dev);
2250 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2251 drm_kms_helper_hotplug_event(dev);
2253 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2254 amdgpu_dm_update_connector_after_detect(aconnector);
2257 drm_modeset_lock_all(dev);
2258 dm_restore_drm_connector_state(dev, connector);
2259 drm_modeset_unlock_all(dev);
2261 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2262 drm_kms_helper_hotplug_event(dev);
2264 mutex_unlock(&aconnector->hpd_lock);
2268 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2270 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2272 bool new_irq_handled = false;
2274 int dpcd_bytes_to_read;
2276 const int max_process_count = 30;
2277 int process_count = 0;
2279 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2281 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2282 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2283 /* DPCD 0x200 - 0x201 for downstream IRQ */
2284 dpcd_addr = DP_SINK_COUNT;
2286 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2287 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2288 dpcd_addr = DP_SINK_COUNT_ESI;
2291 dret = drm_dp_dpcd_read(
2292 &aconnector->dm_dp_aux.aux,
2295 dpcd_bytes_to_read);
2297 while (dret == dpcd_bytes_to_read &&
2298 process_count < max_process_count) {
2304 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2305 /* handle HPD short pulse irq */
2306 if (aconnector->mst_mgr.mst_state)
2308 &aconnector->mst_mgr,
2312 if (new_irq_handled) {
2313 /* ACK at DPCD to notify down stream */
2314 const int ack_dpcd_bytes_to_write =
2315 dpcd_bytes_to_read - 1;
2317 for (retry = 0; retry < 3; retry++) {
2320 wret = drm_dp_dpcd_write(
2321 &aconnector->dm_dp_aux.aux,
2324 ack_dpcd_bytes_to_write);
2325 if (wret == ack_dpcd_bytes_to_write)
2329 /* check if there is new irq to be handled */
2330 dret = drm_dp_dpcd_read(
2331 &aconnector->dm_dp_aux.aux,
2334 dpcd_bytes_to_read);
2336 new_irq_handled = false;
2342 if (process_count == max_process_count)
2343 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2346 static void handle_hpd_rx_irq(void *param)
2348 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2349 struct drm_connector *connector = &aconnector->base;
2350 struct drm_device *dev = connector->dev;
2351 struct dc_link *dc_link = aconnector->dc_link;
2352 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2353 enum dc_connection_type new_connection_type = dc_connection_none;
2354 #ifdef CONFIG_DRM_AMD_DC_HDCP
2355 union hpd_irq_data hpd_irq_data;
2356 struct amdgpu_device *adev = dev->dev_private;
2358 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2362 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2363 * conflict, after implement i2c helper, this mutex should be
2366 if (dc_link->type != dc_connection_mst_branch)
2367 mutex_lock(&aconnector->hpd_lock);
2370 #ifdef CONFIG_DRM_AMD_DC_HDCP
2371 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2373 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2375 !is_mst_root_connector) {
2376 /* Downstream Port status changed. */
2377 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2378 DRM_ERROR("KMS: Failed to detect connector\n");
2380 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2381 emulated_link_detect(dc_link);
2383 if (aconnector->fake_enable)
2384 aconnector->fake_enable = false;
2386 amdgpu_dm_update_connector_after_detect(aconnector);
2389 drm_modeset_lock_all(dev);
2390 dm_restore_drm_connector_state(dev, connector);
2391 drm_modeset_unlock_all(dev);
2393 drm_kms_helper_hotplug_event(dev);
2394 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2396 if (aconnector->fake_enable)
2397 aconnector->fake_enable = false;
2399 amdgpu_dm_update_connector_after_detect(aconnector);
2402 drm_modeset_lock_all(dev);
2403 dm_restore_drm_connector_state(dev, connector);
2404 drm_modeset_unlock_all(dev);
2406 drm_kms_helper_hotplug_event(dev);
2409 #ifdef CONFIG_DRM_AMD_DC_HDCP
2410 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2411 if (adev->dm.hdcp_workqueue)
2412 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2415 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2416 (dc_link->type == dc_connection_mst_branch))
2417 dm_handle_hpd_rx_irq(aconnector);
2419 if (dc_link->type != dc_connection_mst_branch) {
2420 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2421 mutex_unlock(&aconnector->hpd_lock);
2425 static void register_hpd_handlers(struct amdgpu_device *adev)
2427 struct drm_device *dev = adev->ddev;
2428 struct drm_connector *connector;
2429 struct amdgpu_dm_connector *aconnector;
2430 const struct dc_link *dc_link;
2431 struct dc_interrupt_params int_params = {0};
2433 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2434 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2436 list_for_each_entry(connector,
2437 &dev->mode_config.connector_list, head) {
2439 aconnector = to_amdgpu_dm_connector(connector);
2440 dc_link = aconnector->dc_link;
2442 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2443 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2444 int_params.irq_source = dc_link->irq_source_hpd;
2446 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2448 (void *) aconnector);
2451 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2453 /* Also register for DP short pulse (hpd_rx). */
2454 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2455 int_params.irq_source = dc_link->irq_source_hpd_rx;
2457 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2459 (void *) aconnector);
2464 /* Register IRQ sources and initialize IRQ callbacks */
2465 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2467 struct dc *dc = adev->dm.dc;
2468 struct common_irq_params *c_irq_params;
2469 struct dc_interrupt_params int_params = {0};
2472 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2474 if (adev->asic_type >= CHIP_VEGA10)
2475 client_id = SOC15_IH_CLIENTID_DCE;
2477 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2478 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2481 * Actions of amdgpu_irq_add_id():
2482 * 1. Register a set() function with base driver.
2483 * Base driver will call set() function to enable/disable an
2484 * interrupt in DC hardware.
2485 * 2. Register amdgpu_dm_irq_handler().
2486 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2487 * coming from DC hardware.
2488 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2489 * for acknowledging and handling. */
2491 /* Use VBLANK interrupt */
2492 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2493 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2495 DRM_ERROR("Failed to add crtc irq id!\n");
2499 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2500 int_params.irq_source =
2501 dc_interrupt_to_irq_source(dc, i, 0);
2503 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2505 c_irq_params->adev = adev;
2506 c_irq_params->irq_src = int_params.irq_source;
2508 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2509 dm_crtc_high_irq, c_irq_params);
2512 /* Use VUPDATE interrupt */
2513 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2514 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2516 DRM_ERROR("Failed to add vupdate irq id!\n");
2520 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2521 int_params.irq_source =
2522 dc_interrupt_to_irq_source(dc, i, 0);
2524 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2526 c_irq_params->adev = adev;
2527 c_irq_params->irq_src = int_params.irq_source;
2529 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2530 dm_vupdate_high_irq, c_irq_params);
2533 /* Use GRPH_PFLIP interrupt */
2534 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2535 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2536 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2538 DRM_ERROR("Failed to add page flip irq id!\n");
2542 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2543 int_params.irq_source =
2544 dc_interrupt_to_irq_source(dc, i, 0);
2546 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2548 c_irq_params->adev = adev;
2549 c_irq_params->irq_src = int_params.irq_source;
2551 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2552 dm_pflip_high_irq, c_irq_params);
2557 r = amdgpu_irq_add_id(adev, client_id,
2558 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2560 DRM_ERROR("Failed to add hpd irq id!\n");
2564 register_hpd_handlers(adev);
2569 #if defined(CONFIG_DRM_AMD_DC_DCN)
2570 /* Register IRQ sources and initialize IRQ callbacks */
2571 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2573 struct dc *dc = adev->dm.dc;
2574 struct common_irq_params *c_irq_params;
2575 struct dc_interrupt_params int_params = {0};
2579 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2580 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2583 * Actions of amdgpu_irq_add_id():
2584 * 1. Register a set() function with base driver.
2585 * Base driver will call set() function to enable/disable an
2586 * interrupt in DC hardware.
2587 * 2. Register amdgpu_dm_irq_handler().
2588 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2589 * coming from DC hardware.
2590 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2591 * for acknowledging and handling.
2594 /* Use VSTARTUP interrupt */
2595 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2596 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2598 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2601 DRM_ERROR("Failed to add crtc irq id!\n");
2605 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2606 int_params.irq_source =
2607 dc_interrupt_to_irq_source(dc, i, 0);
2609 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2611 c_irq_params->adev = adev;
2612 c_irq_params->irq_src = int_params.irq_source;
2614 amdgpu_dm_irq_register_interrupt(
2615 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2618 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2619 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2620 * to trigger at end of each vblank, regardless of state of the lock,
2621 * matching DCE behaviour.
2623 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2624 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2626 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2629 DRM_ERROR("Failed to add vupdate irq id!\n");
2633 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2634 int_params.irq_source =
2635 dc_interrupt_to_irq_source(dc, i, 0);
2637 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2639 c_irq_params->adev = adev;
2640 c_irq_params->irq_src = int_params.irq_source;
2642 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2643 dm_vupdate_high_irq, c_irq_params);
2646 /* Use GRPH_PFLIP interrupt */
2647 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2648 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2650 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2652 DRM_ERROR("Failed to add page flip irq id!\n");
2656 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2657 int_params.irq_source =
2658 dc_interrupt_to_irq_source(dc, i, 0);
2660 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2662 c_irq_params->adev = adev;
2663 c_irq_params->irq_src = int_params.irq_source;
2665 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2666 dm_pflip_high_irq, c_irq_params);
2671 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2674 DRM_ERROR("Failed to add hpd irq id!\n");
2678 register_hpd_handlers(adev);
2685 * Acquires the lock for the atomic state object and returns
2686 * the new atomic state.
2688 * This should only be called during atomic check.
2690 static int dm_atomic_get_state(struct drm_atomic_state *state,
2691 struct dm_atomic_state **dm_state)
2693 struct drm_device *dev = state->dev;
2694 struct amdgpu_device *adev = dev->dev_private;
2695 struct amdgpu_display_manager *dm = &adev->dm;
2696 struct drm_private_state *priv_state;
2701 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2702 if (IS_ERR(priv_state))
2703 return PTR_ERR(priv_state);
2705 *dm_state = to_dm_atomic_state(priv_state);
2710 struct dm_atomic_state *
2711 dm_atomic_get_new_state(struct drm_atomic_state *state)
2713 struct drm_device *dev = state->dev;
2714 struct amdgpu_device *adev = dev->dev_private;
2715 struct amdgpu_display_manager *dm = &adev->dm;
2716 struct drm_private_obj *obj;
2717 struct drm_private_state *new_obj_state;
2720 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2721 if (obj->funcs == dm->atomic_obj.funcs)
2722 return to_dm_atomic_state(new_obj_state);
2728 struct dm_atomic_state *
2729 dm_atomic_get_old_state(struct drm_atomic_state *state)
2731 struct drm_device *dev = state->dev;
2732 struct amdgpu_device *adev = dev->dev_private;
2733 struct amdgpu_display_manager *dm = &adev->dm;
2734 struct drm_private_obj *obj;
2735 struct drm_private_state *old_obj_state;
2738 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2739 if (obj->funcs == dm->atomic_obj.funcs)
2740 return to_dm_atomic_state(old_obj_state);
2746 static struct drm_private_state *
2747 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2749 struct dm_atomic_state *old_state, *new_state;
2751 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2755 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2757 old_state = to_dm_atomic_state(obj->state);
2759 if (old_state && old_state->context)
2760 new_state->context = dc_copy_state(old_state->context);
2762 if (!new_state->context) {
2767 return &new_state->base;
2770 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2771 struct drm_private_state *state)
2773 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2775 if (dm_state && dm_state->context)
2776 dc_release_state(dm_state->context);
2781 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2782 .atomic_duplicate_state = dm_atomic_duplicate_state,
2783 .atomic_destroy_state = dm_atomic_destroy_state,
2786 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2788 struct dm_atomic_state *state;
2791 adev->mode_info.mode_config_initialized = true;
2793 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2794 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2796 adev->ddev->mode_config.max_width = 16384;
2797 adev->ddev->mode_config.max_height = 16384;
2799 adev->ddev->mode_config.preferred_depth = 24;
2800 adev->ddev->mode_config.prefer_shadow = 1;
2801 /* indicates support for immediate flip */
2802 adev->ddev->mode_config.async_page_flip = true;
2804 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2806 state = kzalloc(sizeof(*state), GFP_KERNEL);
2810 state->context = dc_create_state(adev->dm.dc);
2811 if (!state->context) {
2816 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2818 drm_atomic_private_obj_init(adev->ddev,
2819 &adev->dm.atomic_obj,
2821 &dm_atomic_state_funcs);
2823 r = amdgpu_display_modeset_create_props(adev);
2827 r = amdgpu_dm_audio_init(adev);
2834 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2835 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2836 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2838 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2839 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2841 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2843 #if defined(CONFIG_ACPI)
2844 struct amdgpu_dm_backlight_caps caps;
2846 if (dm->backlight_caps.caps_valid)
2849 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2850 if (caps.caps_valid) {
2851 dm->backlight_caps.caps_valid = true;
2852 if (caps.aux_support)
2854 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2855 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2857 dm->backlight_caps.min_input_signal =
2858 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2859 dm->backlight_caps.max_input_signal =
2860 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2863 if (dm->backlight_caps.aux_support)
2866 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2867 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2871 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2878 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2879 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2884 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2885 const uint32_t user_brightness)
2887 u32 min, max, conversion_pace;
2888 u32 brightness = user_brightness;
2893 if (!caps->aux_support) {
2894 max = caps->max_input_signal;
2895 min = caps->min_input_signal;
2897 * The brightness input is in the range 0-255
2898 * It needs to be rescaled to be between the
2899 * requested min and max input signal
2900 * It also needs to be scaled up by 0x101 to
2901 * match the DC interface which has a range of
2904 conversion_pace = 0x101;
2909 / AMDGPU_MAX_BL_LEVEL
2910 + min * conversion_pace;
2913 * We are doing a linear interpolation here, which is OK but
2914 * does not provide the optimal result. We probably want
2915 * something close to the Perceptual Quantizer (PQ) curve.
2917 max = caps->aux_max_input_signal;
2918 min = caps->aux_min_input_signal;
2920 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2921 + user_brightness * max;
2922 // Multiple the value by 1000 since we use millinits
2924 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2931 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2933 struct amdgpu_display_manager *dm = bl_get_data(bd);
2934 struct amdgpu_dm_backlight_caps caps;
2935 struct dc_link *link = NULL;
2939 amdgpu_dm_update_backlight_caps(dm);
2940 caps = dm->backlight_caps;
2942 link = (struct dc_link *)dm->backlight_link;
2944 brightness = convert_brightness(&caps, bd->props.brightness);
2945 // Change brightness based on AUX property
2946 if (caps.aux_support)
2947 return set_backlight_via_aux(link, brightness);
2949 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2954 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2956 struct amdgpu_display_manager *dm = bl_get_data(bd);
2957 int ret = dc_link_get_backlight_level(dm->backlight_link);
2959 if (ret == DC_ERROR_UNEXPECTED)
2960 return bd->props.brightness;
2964 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2965 .options = BL_CORE_SUSPENDRESUME,
2966 .get_brightness = amdgpu_dm_backlight_get_brightness,
2967 .update_status = amdgpu_dm_backlight_update_status,
2971 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2974 struct backlight_properties props = { 0 };
2976 amdgpu_dm_update_backlight_caps(dm);
2978 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2979 props.brightness = AMDGPU_MAX_BL_LEVEL;
2980 props.type = BACKLIGHT_RAW;
2982 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2983 dm->adev->ddev->primary->index);
2985 dm->backlight_dev = backlight_device_register(bl_name,
2986 dm->adev->ddev->dev,
2988 &amdgpu_dm_backlight_ops,
2991 if (IS_ERR(dm->backlight_dev))
2992 DRM_ERROR("DM: Backlight registration failed!\n");
2994 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2999 static int initialize_plane(struct amdgpu_display_manager *dm,
3000 struct amdgpu_mode_info *mode_info, int plane_id,
3001 enum drm_plane_type plane_type,
3002 const struct dc_plane_cap *plane_cap)
3004 struct drm_plane *plane;
3005 unsigned long possible_crtcs;
3008 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3010 DRM_ERROR("KMS: Failed to allocate plane\n");
3013 plane->type = plane_type;
3016 * HACK: IGT tests expect that the primary plane for a CRTC
3017 * can only have one possible CRTC. Only expose support for
3018 * any CRTC if they're not going to be used as a primary plane
3019 * for a CRTC - like overlay or underlay planes.
3021 possible_crtcs = 1 << plane_id;
3022 if (plane_id >= dm->dc->caps.max_streams)
3023 possible_crtcs = 0xff;
3025 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3028 DRM_ERROR("KMS: Failed to initialize plane\n");
3034 mode_info->planes[plane_id] = plane;
3040 static void register_backlight_device(struct amdgpu_display_manager *dm,
3041 struct dc_link *link)
3043 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3044 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3046 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3047 link->type != dc_connection_none) {
3049 * Event if registration failed, we should continue with
3050 * DM initialization because not having a backlight control
3051 * is better then a black screen.
3053 amdgpu_dm_register_backlight_device(dm);
3055 if (dm->backlight_dev)
3056 dm->backlight_link = link;
3063 * In this architecture, the association
3064 * connector -> encoder -> crtc
3065 * id not really requried. The crtc and connector will hold the
3066 * display_index as an abstraction to use with DAL component
3068 * Returns 0 on success
3070 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3072 struct amdgpu_display_manager *dm = &adev->dm;
3074 struct amdgpu_dm_connector *aconnector = NULL;
3075 struct amdgpu_encoder *aencoder = NULL;
3076 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3078 int32_t primary_planes;
3079 enum dc_connection_type new_connection_type = dc_connection_none;
3080 const struct dc_plane_cap *plane;
3082 link_cnt = dm->dc->caps.max_links;
3083 if (amdgpu_dm_mode_config_init(dm->adev)) {
3084 DRM_ERROR("DM: Failed to initialize mode config\n");
3088 /* There is one primary plane per CRTC */
3089 primary_planes = dm->dc->caps.max_streams;
3090 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3093 * Initialize primary planes, implicit planes for legacy IOCTLS.
3094 * Order is reversed to match iteration order in atomic check.
3096 for (i = (primary_planes - 1); i >= 0; i--) {
3097 plane = &dm->dc->caps.planes[i];
3099 if (initialize_plane(dm, mode_info, i,
3100 DRM_PLANE_TYPE_PRIMARY, plane)) {
3101 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3107 * Initialize overlay planes, index starting after primary planes.
3108 * These planes have a higher DRM index than the primary planes since
3109 * they should be considered as having a higher z-order.
3110 * Order is reversed to match iteration order in atomic check.
3112 * Only support DCN for now, and only expose one so we don't encourage
3113 * userspace to use up all the pipes.
3115 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3116 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3118 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3121 if (!plane->blends_with_above || !plane->blends_with_below)
3124 if (!plane->pixel_format_support.argb8888)
3127 if (initialize_plane(dm, NULL, primary_planes + i,
3128 DRM_PLANE_TYPE_OVERLAY, plane)) {
3129 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3133 /* Only create one overlay plane. */
3137 for (i = 0; i < dm->dc->caps.max_streams; i++)
3138 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3139 DRM_ERROR("KMS: Failed to initialize crtc\n");
3143 dm->display_indexes_num = dm->dc->caps.max_streams;
3145 /* loops over all connectors on the board */
3146 for (i = 0; i < link_cnt; i++) {
3147 struct dc_link *link = NULL;
3149 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3151 "KMS: Cannot support more than %d display indexes\n",
3152 AMDGPU_DM_MAX_DISPLAY_INDEX);
3156 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3160 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3164 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3165 DRM_ERROR("KMS: Failed to initialize encoder\n");
3169 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3170 DRM_ERROR("KMS: Failed to initialize connector\n");
3174 link = dc_get_link_at_index(dm->dc, i);
3176 if (!dc_link_detect_sink(link, &new_connection_type))
3177 DRM_ERROR("KMS: Failed to detect connector\n");
3179 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3180 emulated_link_detect(link);
3181 amdgpu_dm_update_connector_after_detect(aconnector);
3183 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3184 amdgpu_dm_update_connector_after_detect(aconnector);
3185 register_backlight_device(dm, link);
3186 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3187 amdgpu_dm_set_psr_caps(link);
3193 /* Software is initialized. Now we can register interrupt handlers. */
3194 switch (adev->asic_type) {
3204 case CHIP_POLARIS11:
3205 case CHIP_POLARIS10:
3206 case CHIP_POLARIS12:
3211 if (dce110_register_irq_handlers(dm->adev)) {
3212 DRM_ERROR("DM: Failed to initialize IRQ\n");
3216 #if defined(CONFIG_DRM_AMD_DC_DCN)
3222 if (dcn10_register_irq_handlers(dm->adev)) {
3223 DRM_ERROR("DM: Failed to initialize IRQ\n");
3229 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3233 /* No userspace support. */
3234 dm->dc->debug.disable_tri_buf = true;
3244 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3246 drm_mode_config_cleanup(dm->ddev);
3247 drm_atomic_private_obj_fini(&dm->atomic_obj);
3251 /******************************************************************************
3252 * amdgpu_display_funcs functions
3253 *****************************************************************************/
3256 * dm_bandwidth_update - program display watermarks
3258 * @adev: amdgpu_device pointer
3260 * Calculate and program the display watermarks and line buffer allocation.
3262 static void dm_bandwidth_update(struct amdgpu_device *adev)
3264 /* TODO: implement later */
3267 static const struct amdgpu_display_funcs dm_display_funcs = {
3268 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3269 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3270 .backlight_set_level = NULL, /* never called for DC */
3271 .backlight_get_level = NULL, /* never called for DC */
3272 .hpd_sense = NULL,/* called unconditionally */
3273 .hpd_set_polarity = NULL, /* called unconditionally */
3274 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3275 .page_flip_get_scanoutpos =
3276 dm_crtc_get_scanoutpos,/* called unconditionally */
3277 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3278 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3281 #if defined(CONFIG_DEBUG_KERNEL_DC)
3283 static ssize_t s3_debug_store(struct device *device,
3284 struct device_attribute *attr,
3290 struct drm_device *drm_dev = dev_get_drvdata(device);
3291 struct amdgpu_device *adev = drm_dev->dev_private;
3293 ret = kstrtoint(buf, 0, &s3_state);
3298 drm_kms_helper_hotplug_event(adev->ddev);
3303 return ret == 0 ? count : 0;
3306 DEVICE_ATTR_WO(s3_debug);
3310 static int dm_early_init(void *handle)
3312 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3314 switch (adev->asic_type) {
3317 adev->mode_info.num_crtc = 6;
3318 adev->mode_info.num_hpd = 6;
3319 adev->mode_info.num_dig = 6;
3322 adev->mode_info.num_crtc = 4;
3323 adev->mode_info.num_hpd = 6;
3324 adev->mode_info.num_dig = 7;
3328 adev->mode_info.num_crtc = 2;
3329 adev->mode_info.num_hpd = 6;
3330 adev->mode_info.num_dig = 6;
3334 adev->mode_info.num_crtc = 6;
3335 adev->mode_info.num_hpd = 6;
3336 adev->mode_info.num_dig = 7;
3339 adev->mode_info.num_crtc = 3;
3340 adev->mode_info.num_hpd = 6;
3341 adev->mode_info.num_dig = 9;
3344 adev->mode_info.num_crtc = 2;
3345 adev->mode_info.num_hpd = 6;
3346 adev->mode_info.num_dig = 9;
3348 case CHIP_POLARIS11:
3349 case CHIP_POLARIS12:
3350 adev->mode_info.num_crtc = 5;
3351 adev->mode_info.num_hpd = 5;
3352 adev->mode_info.num_dig = 5;
3354 case CHIP_POLARIS10:
3356 adev->mode_info.num_crtc = 6;
3357 adev->mode_info.num_hpd = 6;
3358 adev->mode_info.num_dig = 6;
3363 adev->mode_info.num_crtc = 6;
3364 adev->mode_info.num_hpd = 6;
3365 adev->mode_info.num_dig = 6;
3367 #if defined(CONFIG_DRM_AMD_DC_DCN)
3369 adev->mode_info.num_crtc = 4;
3370 adev->mode_info.num_hpd = 4;
3371 adev->mode_info.num_dig = 4;
3376 adev->mode_info.num_crtc = 6;
3377 adev->mode_info.num_hpd = 6;
3378 adev->mode_info.num_dig = 6;
3381 adev->mode_info.num_crtc = 5;
3382 adev->mode_info.num_hpd = 5;
3383 adev->mode_info.num_dig = 5;
3386 adev->mode_info.num_crtc = 4;
3387 adev->mode_info.num_hpd = 4;
3388 adev->mode_info.num_dig = 4;
3391 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3395 amdgpu_dm_set_irq_funcs(adev);
3397 if (adev->mode_info.funcs == NULL)
3398 adev->mode_info.funcs = &dm_display_funcs;
3401 * Note: Do NOT change adev->audio_endpt_rreg and
3402 * adev->audio_endpt_wreg because they are initialised in
3403 * amdgpu_device_init()
3405 #if defined(CONFIG_DEBUG_KERNEL_DC)
3408 &dev_attr_s3_debug);
3414 static bool modeset_required(struct drm_crtc_state *crtc_state,
3415 struct dc_stream_state *new_stream,
3416 struct dc_stream_state *old_stream)
3418 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3421 if (!crtc_state->enable)
3424 return crtc_state->active;
3427 static bool modereset_required(struct drm_crtc_state *crtc_state)
3429 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3432 return !crtc_state->enable || !crtc_state->active;
3435 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3437 drm_encoder_cleanup(encoder);
3441 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3442 .destroy = amdgpu_dm_encoder_destroy,
3446 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3447 struct dc_scaling_info *scaling_info)
3449 int scale_w, scale_h;
3451 memset(scaling_info, 0, sizeof(*scaling_info));
3453 /* Source is fixed 16.16 but we ignore mantissa for now... */
3454 scaling_info->src_rect.x = state->src_x >> 16;
3455 scaling_info->src_rect.y = state->src_y >> 16;
3457 scaling_info->src_rect.width = state->src_w >> 16;
3458 if (scaling_info->src_rect.width == 0)
3461 scaling_info->src_rect.height = state->src_h >> 16;
3462 if (scaling_info->src_rect.height == 0)
3465 scaling_info->dst_rect.x = state->crtc_x;
3466 scaling_info->dst_rect.y = state->crtc_y;
3468 if (state->crtc_w == 0)
3471 scaling_info->dst_rect.width = state->crtc_w;
3473 if (state->crtc_h == 0)
3476 scaling_info->dst_rect.height = state->crtc_h;
3478 /* DRM doesn't specify clipping on destination output. */
3479 scaling_info->clip_rect = scaling_info->dst_rect;
3481 /* TODO: Validate scaling per-format with DC plane caps */
3482 scale_w = scaling_info->dst_rect.width * 1000 /
3483 scaling_info->src_rect.width;
3485 if (scale_w < 250 || scale_w > 16000)
3488 scale_h = scaling_info->dst_rect.height * 1000 /
3489 scaling_info->src_rect.height;
3491 if (scale_h < 250 || scale_h > 16000)
3495 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3496 * assume reasonable defaults based on the format.
3502 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3503 uint64_t *tiling_flags, bool *tmz_surface)
3505 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3506 int r = amdgpu_bo_reserve(rbo, false);
3509 /* Don't show error message when returning -ERESTARTSYS */
3510 if (r != -ERESTARTSYS)
3511 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3516 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3519 *tmz_surface = amdgpu_bo_encrypted(rbo);
3521 amdgpu_bo_unreserve(rbo);
3526 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3528 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3530 return offset ? (address + offset * 256) : 0;
3534 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3535 const struct amdgpu_framebuffer *afb,
3536 const enum surface_pixel_format format,
3537 const enum dc_rotation_angle rotation,
3538 const struct plane_size *plane_size,
3539 const union dc_tiling_info *tiling_info,
3540 const uint64_t info,
3541 struct dc_plane_dcc_param *dcc,
3542 struct dc_plane_address *address,
3543 bool force_disable_dcc)
3545 struct dc *dc = adev->dm.dc;
3546 struct dc_dcc_surface_param input;
3547 struct dc_surface_dcc_cap output;
3548 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3549 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3550 uint64_t dcc_address;
3552 memset(&input, 0, sizeof(input));
3553 memset(&output, 0, sizeof(output));
3555 if (force_disable_dcc)
3561 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3564 if (!dc->cap_funcs.get_dcc_compression_cap)
3567 input.format = format;
3568 input.surface_size.width = plane_size->surface_size.width;
3569 input.surface_size.height = plane_size->surface_size.height;
3570 input.swizzle_mode = tiling_info->gfx9.swizzle;
3572 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3573 input.scan = SCAN_DIRECTION_HORIZONTAL;
3574 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3575 input.scan = SCAN_DIRECTION_VERTICAL;
3577 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3580 if (!output.capable)
3583 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3588 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3589 dcc->independent_64b_blks = i64b;
3591 dcc_address = get_dcc_address(afb->address, info);
3592 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3593 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3599 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3600 const struct amdgpu_framebuffer *afb,
3601 const enum surface_pixel_format format,
3602 const enum dc_rotation_angle rotation,
3603 const uint64_t tiling_flags,
3604 union dc_tiling_info *tiling_info,
3605 struct plane_size *plane_size,
3606 struct dc_plane_dcc_param *dcc,
3607 struct dc_plane_address *address,
3609 bool force_disable_dcc)
3611 const struct drm_framebuffer *fb = &afb->base;
3614 memset(tiling_info, 0, sizeof(*tiling_info));
3615 memset(plane_size, 0, sizeof(*plane_size));
3616 memset(dcc, 0, sizeof(*dcc));
3617 memset(address, 0, sizeof(*address));
3619 address->tmz_surface = tmz_surface;
3621 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3622 plane_size->surface_size.x = 0;
3623 plane_size->surface_size.y = 0;
3624 plane_size->surface_size.width = fb->width;
3625 plane_size->surface_size.height = fb->height;
3626 plane_size->surface_pitch =
3627 fb->pitches[0] / fb->format->cpp[0];
3629 address->type = PLN_ADDR_TYPE_GRAPHICS;
3630 address->grph.addr.low_part = lower_32_bits(afb->address);
3631 address->grph.addr.high_part = upper_32_bits(afb->address);
3632 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3633 uint64_t chroma_addr = afb->address + fb->offsets[1];
3635 plane_size->surface_size.x = 0;
3636 plane_size->surface_size.y = 0;
3637 plane_size->surface_size.width = fb->width;
3638 plane_size->surface_size.height = fb->height;
3639 plane_size->surface_pitch =
3640 fb->pitches[0] / fb->format->cpp[0];
3642 plane_size->chroma_size.x = 0;
3643 plane_size->chroma_size.y = 0;
3644 /* TODO: set these based on surface format */
3645 plane_size->chroma_size.width = fb->width / 2;
3646 plane_size->chroma_size.height = fb->height / 2;
3648 plane_size->chroma_pitch =
3649 fb->pitches[1] / fb->format->cpp[1];
3651 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3652 address->video_progressive.luma_addr.low_part =
3653 lower_32_bits(afb->address);
3654 address->video_progressive.luma_addr.high_part =
3655 upper_32_bits(afb->address);
3656 address->video_progressive.chroma_addr.low_part =
3657 lower_32_bits(chroma_addr);
3658 address->video_progressive.chroma_addr.high_part =
3659 upper_32_bits(chroma_addr);
3662 /* Fill GFX8 params */
3663 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3664 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3666 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3667 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3668 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3669 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3670 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3672 /* XXX fix me for VI */
3673 tiling_info->gfx8.num_banks = num_banks;
3674 tiling_info->gfx8.array_mode =
3675 DC_ARRAY_2D_TILED_THIN1;
3676 tiling_info->gfx8.tile_split = tile_split;
3677 tiling_info->gfx8.bank_width = bankw;
3678 tiling_info->gfx8.bank_height = bankh;
3679 tiling_info->gfx8.tile_aspect = mtaspect;
3680 tiling_info->gfx8.tile_mode =
3681 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3682 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3683 == DC_ARRAY_1D_TILED_THIN1) {
3684 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3687 tiling_info->gfx8.pipe_config =
3688 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3690 if (adev->asic_type == CHIP_VEGA10 ||
3691 adev->asic_type == CHIP_VEGA12 ||
3692 adev->asic_type == CHIP_VEGA20 ||
3693 adev->asic_type == CHIP_NAVI10 ||
3694 adev->asic_type == CHIP_NAVI14 ||
3695 adev->asic_type == CHIP_NAVI12 ||
3696 adev->asic_type == CHIP_RENOIR ||
3697 adev->asic_type == CHIP_RAVEN) {
3698 /* Fill GFX9 params */
3699 tiling_info->gfx9.num_pipes =
3700 adev->gfx.config.gb_addr_config_fields.num_pipes;
3701 tiling_info->gfx9.num_banks =
3702 adev->gfx.config.gb_addr_config_fields.num_banks;
3703 tiling_info->gfx9.pipe_interleave =
3704 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3705 tiling_info->gfx9.num_shader_engines =
3706 adev->gfx.config.gb_addr_config_fields.num_se;
3707 tiling_info->gfx9.max_compressed_frags =
3708 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3709 tiling_info->gfx9.num_rb_per_se =
3710 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3711 tiling_info->gfx9.swizzle =
3712 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3713 tiling_info->gfx9.shaderEnable = 1;
3715 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3716 plane_size, tiling_info,
3717 tiling_flags, dcc, address,
3727 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3728 bool *per_pixel_alpha, bool *global_alpha,
3729 int *global_alpha_value)
3731 *per_pixel_alpha = false;
3732 *global_alpha = false;
3733 *global_alpha_value = 0xff;
3735 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3738 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3739 static const uint32_t alpha_formats[] = {
3740 DRM_FORMAT_ARGB8888,
3741 DRM_FORMAT_RGBA8888,
3742 DRM_FORMAT_ABGR8888,
3744 uint32_t format = plane_state->fb->format->format;
3747 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3748 if (format == alpha_formats[i]) {
3749 *per_pixel_alpha = true;
3755 if (plane_state->alpha < 0xffff) {
3756 *global_alpha = true;
3757 *global_alpha_value = plane_state->alpha >> 8;
3762 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3763 const enum surface_pixel_format format,
3764 enum dc_color_space *color_space)
3768 *color_space = COLOR_SPACE_SRGB;
3770 /* DRM color properties only affect non-RGB formats. */
3771 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3774 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3776 switch (plane_state->color_encoding) {
3777 case DRM_COLOR_YCBCR_BT601:
3779 *color_space = COLOR_SPACE_YCBCR601;
3781 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3784 case DRM_COLOR_YCBCR_BT709:
3786 *color_space = COLOR_SPACE_YCBCR709;
3788 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3791 case DRM_COLOR_YCBCR_BT2020:
3793 *color_space = COLOR_SPACE_2020_YCBCR;
3806 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3807 const struct drm_plane_state *plane_state,
3808 const uint64_t tiling_flags,
3809 struct dc_plane_info *plane_info,
3810 struct dc_plane_address *address,
3812 bool force_disable_dcc)
3814 const struct drm_framebuffer *fb = plane_state->fb;
3815 const struct amdgpu_framebuffer *afb =
3816 to_amdgpu_framebuffer(plane_state->fb);
3817 struct drm_format_name_buf format_name;
3820 memset(plane_info, 0, sizeof(*plane_info));
3822 switch (fb->format->format) {
3824 plane_info->format =
3825 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3827 case DRM_FORMAT_RGB565:
3828 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3830 case DRM_FORMAT_XRGB8888:
3831 case DRM_FORMAT_ARGB8888:
3832 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3834 case DRM_FORMAT_XRGB2101010:
3835 case DRM_FORMAT_ARGB2101010:
3836 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3838 case DRM_FORMAT_XBGR2101010:
3839 case DRM_FORMAT_ABGR2101010:
3840 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3842 case DRM_FORMAT_XBGR8888:
3843 case DRM_FORMAT_ABGR8888:
3844 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3846 case DRM_FORMAT_NV21:
3847 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3849 case DRM_FORMAT_NV12:
3850 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3852 case DRM_FORMAT_P010:
3853 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3855 case DRM_FORMAT_XRGB16161616F:
3856 case DRM_FORMAT_ARGB16161616F:
3857 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3859 case DRM_FORMAT_XBGR16161616F:
3860 case DRM_FORMAT_ABGR16161616F:
3861 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3865 "Unsupported screen format %s\n",
3866 drm_get_format_name(fb->format->format, &format_name));
3870 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3871 case DRM_MODE_ROTATE_0:
3872 plane_info->rotation = ROTATION_ANGLE_0;
3874 case DRM_MODE_ROTATE_90:
3875 plane_info->rotation = ROTATION_ANGLE_90;
3877 case DRM_MODE_ROTATE_180:
3878 plane_info->rotation = ROTATION_ANGLE_180;
3880 case DRM_MODE_ROTATE_270:
3881 plane_info->rotation = ROTATION_ANGLE_270;
3884 plane_info->rotation = ROTATION_ANGLE_0;
3888 plane_info->visible = true;
3889 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3891 plane_info->layer_index = 0;
3893 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3894 &plane_info->color_space);
3898 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3899 plane_info->rotation, tiling_flags,
3900 &plane_info->tiling_info,
3901 &plane_info->plane_size,
3902 &plane_info->dcc, address, tmz_surface,
3907 fill_blending_from_plane_state(
3908 plane_state, &plane_info->per_pixel_alpha,
3909 &plane_info->global_alpha, &plane_info->global_alpha_value);
3914 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3915 struct dc_plane_state *dc_plane_state,
3916 struct drm_plane_state *plane_state,
3917 struct drm_crtc_state *crtc_state)
3919 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3920 const struct amdgpu_framebuffer *amdgpu_fb =
3921 to_amdgpu_framebuffer(plane_state->fb);
3922 struct dc_scaling_info scaling_info;
3923 struct dc_plane_info plane_info;
3924 uint64_t tiling_flags;
3926 bool tmz_surface = false;
3927 bool force_disable_dcc = false;
3929 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3933 dc_plane_state->src_rect = scaling_info.src_rect;
3934 dc_plane_state->dst_rect = scaling_info.dst_rect;
3935 dc_plane_state->clip_rect = scaling_info.clip_rect;
3936 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3938 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3942 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3943 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3945 &dc_plane_state->address,
3951 dc_plane_state->format = plane_info.format;
3952 dc_plane_state->color_space = plane_info.color_space;
3953 dc_plane_state->format = plane_info.format;
3954 dc_plane_state->plane_size = plane_info.plane_size;
3955 dc_plane_state->rotation = plane_info.rotation;
3956 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3957 dc_plane_state->stereo_format = plane_info.stereo_format;
3958 dc_plane_state->tiling_info = plane_info.tiling_info;
3959 dc_plane_state->visible = plane_info.visible;
3960 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3961 dc_plane_state->global_alpha = plane_info.global_alpha;
3962 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3963 dc_plane_state->dcc = plane_info.dcc;
3964 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3967 * Always set input transfer function, since plane state is refreshed
3970 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3977 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3978 const struct dm_connector_state *dm_state,
3979 struct dc_stream_state *stream)
3981 enum amdgpu_rmx_type rmx_type;
3983 struct rect src = { 0 }; /* viewport in composition space*/
3984 struct rect dst = { 0 }; /* stream addressable area */
3986 /* no mode. nothing to be done */
3990 /* Full screen scaling by default */
3991 src.width = mode->hdisplay;
3992 src.height = mode->vdisplay;
3993 dst.width = stream->timing.h_addressable;
3994 dst.height = stream->timing.v_addressable;
3997 rmx_type = dm_state->scaling;
3998 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3999 if (src.width * dst.height <
4000 src.height * dst.width) {
4001 /* height needs less upscaling/more downscaling */
4002 dst.width = src.width *
4003 dst.height / src.height;
4005 /* width needs less upscaling/more downscaling */
4006 dst.height = src.height *
4007 dst.width / src.width;
4009 } else if (rmx_type == RMX_CENTER) {
4013 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4014 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4016 if (dm_state->underscan_enable) {
4017 dst.x += dm_state->underscan_hborder / 2;
4018 dst.y += dm_state->underscan_vborder / 2;
4019 dst.width -= dm_state->underscan_hborder;
4020 dst.height -= dm_state->underscan_vborder;
4027 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4028 dst.x, dst.y, dst.width, dst.height);
4032 static enum dc_color_depth
4033 convert_color_depth_from_display_info(const struct drm_connector *connector,
4034 bool is_y420, int requested_bpc)
4041 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4042 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4044 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4046 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4049 bpc = (uint8_t)connector->display_info.bpc;
4050 /* Assume 8 bpc by default if no bpc is specified. */
4051 bpc = bpc ? bpc : 8;
4054 if (requested_bpc > 0) {
4056 * Cap display bpc based on the user requested value.
4058 * The value for state->max_bpc may not correctly updated
4059 * depending on when the connector gets added to the state
4060 * or if this was called outside of atomic check, so it
4061 * can't be used directly.
4063 bpc = min_t(u8, bpc, requested_bpc);
4065 /* Round down to the nearest even number. */
4066 bpc = bpc - (bpc & 1);
4072 * Temporary Work around, DRM doesn't parse color depth for
4073 * EDID revision before 1.4
4074 * TODO: Fix edid parsing
4076 return COLOR_DEPTH_888;
4078 return COLOR_DEPTH_666;
4080 return COLOR_DEPTH_888;
4082 return COLOR_DEPTH_101010;
4084 return COLOR_DEPTH_121212;
4086 return COLOR_DEPTH_141414;
4088 return COLOR_DEPTH_161616;
4090 return COLOR_DEPTH_UNDEFINED;
4094 static enum dc_aspect_ratio
4095 get_aspect_ratio(const struct drm_display_mode *mode_in)
4097 /* 1-1 mapping, since both enums follow the HDMI spec. */
4098 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4101 static enum dc_color_space
4102 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4104 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4106 switch (dc_crtc_timing->pixel_encoding) {
4107 case PIXEL_ENCODING_YCBCR422:
4108 case PIXEL_ENCODING_YCBCR444:
4109 case PIXEL_ENCODING_YCBCR420:
4112 * 27030khz is the separation point between HDTV and SDTV
4113 * according to HDMI spec, we use YCbCr709 and YCbCr601
4116 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4117 if (dc_crtc_timing->flags.Y_ONLY)
4119 COLOR_SPACE_YCBCR709_LIMITED;
4121 color_space = COLOR_SPACE_YCBCR709;
4123 if (dc_crtc_timing->flags.Y_ONLY)
4125 COLOR_SPACE_YCBCR601_LIMITED;
4127 color_space = COLOR_SPACE_YCBCR601;
4132 case PIXEL_ENCODING_RGB:
4133 color_space = COLOR_SPACE_SRGB;
4144 static bool adjust_colour_depth_from_display_info(
4145 struct dc_crtc_timing *timing_out,
4146 const struct drm_display_info *info)
4148 enum dc_color_depth depth = timing_out->display_color_depth;
4151 normalized_clk = timing_out->pix_clk_100hz / 10;
4152 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4153 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4154 normalized_clk /= 2;
4155 /* Adjusting pix clock following on HDMI spec based on colour depth */
4157 case COLOR_DEPTH_888:
4159 case COLOR_DEPTH_101010:
4160 normalized_clk = (normalized_clk * 30) / 24;
4162 case COLOR_DEPTH_121212:
4163 normalized_clk = (normalized_clk * 36) / 24;
4165 case COLOR_DEPTH_161616:
4166 normalized_clk = (normalized_clk * 48) / 24;
4169 /* The above depths are the only ones valid for HDMI. */
4172 if (normalized_clk <= info->max_tmds_clock) {
4173 timing_out->display_color_depth = depth;
4176 } while (--depth > COLOR_DEPTH_666);
4180 static void fill_stream_properties_from_drm_display_mode(
4181 struct dc_stream_state *stream,
4182 const struct drm_display_mode *mode_in,
4183 const struct drm_connector *connector,
4184 const struct drm_connector_state *connector_state,
4185 const struct dc_stream_state *old_stream,
4188 struct dc_crtc_timing *timing_out = &stream->timing;
4189 const struct drm_display_info *info = &connector->display_info;
4190 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4191 struct hdmi_vendor_infoframe hv_frame;
4192 struct hdmi_avi_infoframe avi_frame;
4194 memset(&hv_frame, 0, sizeof(hv_frame));
4195 memset(&avi_frame, 0, sizeof(avi_frame));
4197 timing_out->h_border_left = 0;
4198 timing_out->h_border_right = 0;
4199 timing_out->v_border_top = 0;
4200 timing_out->v_border_bottom = 0;
4201 /* TODO: un-hardcode */
4202 if (drm_mode_is_420_only(info, mode_in)
4203 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4204 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4205 else if (drm_mode_is_420_also(info, mode_in)
4206 && aconnector->force_yuv420_output)
4207 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4208 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4209 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4210 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4212 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4214 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4215 timing_out->display_color_depth = convert_color_depth_from_display_info(
4217 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4219 timing_out->scan_type = SCANNING_TYPE_NODATA;
4220 timing_out->hdmi_vic = 0;
4223 timing_out->vic = old_stream->timing.vic;
4224 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4225 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4227 timing_out->vic = drm_match_cea_mode(mode_in);
4228 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4229 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4230 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4231 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4234 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4235 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4236 timing_out->vic = avi_frame.video_code;
4237 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4238 timing_out->hdmi_vic = hv_frame.vic;
4241 timing_out->h_addressable = mode_in->crtc_hdisplay;
4242 timing_out->h_total = mode_in->crtc_htotal;
4243 timing_out->h_sync_width =
4244 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4245 timing_out->h_front_porch =
4246 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4247 timing_out->v_total = mode_in->crtc_vtotal;
4248 timing_out->v_addressable = mode_in->crtc_vdisplay;
4249 timing_out->v_front_porch =
4250 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4251 timing_out->v_sync_width =
4252 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4253 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4254 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4256 stream->output_color_space = get_output_color_space(timing_out);
4258 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4259 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4260 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4261 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4262 drm_mode_is_420_also(info, mode_in) &&
4263 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4264 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4265 adjust_colour_depth_from_display_info(timing_out, info);
4270 static void fill_audio_info(struct audio_info *audio_info,
4271 const struct drm_connector *drm_connector,
4272 const struct dc_sink *dc_sink)
4275 int cea_revision = 0;
4276 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4278 audio_info->manufacture_id = edid_caps->manufacturer_id;
4279 audio_info->product_id = edid_caps->product_id;
4281 cea_revision = drm_connector->display_info.cea_rev;
4283 strscpy(audio_info->display_name,
4284 edid_caps->display_name,
4285 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4287 if (cea_revision >= 3) {
4288 audio_info->mode_count = edid_caps->audio_mode_count;
4290 for (i = 0; i < audio_info->mode_count; ++i) {
4291 audio_info->modes[i].format_code =
4292 (enum audio_format_code)
4293 (edid_caps->audio_modes[i].format_code);
4294 audio_info->modes[i].channel_count =
4295 edid_caps->audio_modes[i].channel_count;
4296 audio_info->modes[i].sample_rates.all =
4297 edid_caps->audio_modes[i].sample_rate;
4298 audio_info->modes[i].sample_size =
4299 edid_caps->audio_modes[i].sample_size;
4303 audio_info->flags.all = edid_caps->speaker_flags;
4305 /* TODO: We only check for the progressive mode, check for interlace mode too */
4306 if (drm_connector->latency_present[0]) {
4307 audio_info->video_latency = drm_connector->video_latency[0];
4308 audio_info->audio_latency = drm_connector->audio_latency[0];
4311 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4316 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4317 struct drm_display_mode *dst_mode)
4319 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4320 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4321 dst_mode->crtc_clock = src_mode->crtc_clock;
4322 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4323 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4324 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4325 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4326 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4327 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4328 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4329 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4330 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4331 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4332 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4336 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4337 const struct drm_display_mode *native_mode,
4340 if (scale_enabled) {
4341 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4342 } else if (native_mode->clock == drm_mode->clock &&
4343 native_mode->htotal == drm_mode->htotal &&
4344 native_mode->vtotal == drm_mode->vtotal) {
4345 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4347 /* no scaling nor amdgpu inserted, no need to patch */
4351 static struct dc_sink *
4352 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4354 struct dc_sink_init_data sink_init_data = { 0 };
4355 struct dc_sink *sink = NULL;
4356 sink_init_data.link = aconnector->dc_link;
4357 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4359 sink = dc_sink_create(&sink_init_data);
4361 DRM_ERROR("Failed to create sink!\n");
4364 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4369 static void set_multisync_trigger_params(
4370 struct dc_stream_state *stream)
4372 if (stream->triggered_crtc_reset.enabled) {
4373 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4374 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4378 static void set_master_stream(struct dc_stream_state *stream_set[],
4381 int j, highest_rfr = 0, master_stream = 0;
4383 for (j = 0; j < stream_count; j++) {
4384 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4385 int refresh_rate = 0;
4387 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4388 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4389 if (refresh_rate > highest_rfr) {
4390 highest_rfr = refresh_rate;
4395 for (j = 0; j < stream_count; j++) {
4397 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4401 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4405 if (context->stream_count < 2)
4407 for (i = 0; i < context->stream_count ; i++) {
4408 if (!context->streams[i])
4411 * TODO: add a function to read AMD VSDB bits and set
4412 * crtc_sync_master.multi_sync_enabled flag
4413 * For now it's set to false
4415 set_multisync_trigger_params(context->streams[i]);
4417 set_master_stream(context->streams, context->stream_count);
4420 static struct dc_stream_state *
4421 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4422 const struct drm_display_mode *drm_mode,
4423 const struct dm_connector_state *dm_state,
4424 const struct dc_stream_state *old_stream,
4427 struct drm_display_mode *preferred_mode = NULL;
4428 struct drm_connector *drm_connector;
4429 const struct drm_connector_state *con_state =
4430 dm_state ? &dm_state->base : NULL;
4431 struct dc_stream_state *stream = NULL;
4432 struct drm_display_mode mode = *drm_mode;
4433 bool native_mode_found = false;
4434 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4436 int preferred_refresh = 0;
4437 #if defined(CONFIG_DRM_AMD_DC_DCN)
4438 struct dsc_dec_dpcd_caps dsc_caps;
4440 uint32_t link_bandwidth_kbps;
4442 struct dc_sink *sink = NULL;
4443 if (aconnector == NULL) {
4444 DRM_ERROR("aconnector is NULL!\n");
4448 drm_connector = &aconnector->base;
4450 if (!aconnector->dc_sink) {
4451 sink = create_fake_sink(aconnector);
4455 sink = aconnector->dc_sink;
4456 dc_sink_retain(sink);
4459 stream = dc_create_stream_for_sink(sink);
4461 if (stream == NULL) {
4462 DRM_ERROR("Failed to create stream for sink!\n");
4466 stream->dm_stream_context = aconnector;
4468 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4469 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4471 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4472 /* Search for preferred mode */
4473 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4474 native_mode_found = true;
4478 if (!native_mode_found)
4479 preferred_mode = list_first_entry_or_null(
4480 &aconnector->base.modes,
4481 struct drm_display_mode,
4484 mode_refresh = drm_mode_vrefresh(&mode);
4486 if (preferred_mode == NULL) {
4488 * This may not be an error, the use case is when we have no
4489 * usermode calls to reset and set mode upon hotplug. In this
4490 * case, we call set mode ourselves to restore the previous mode
4491 * and the modelist may not be filled in in time.
4493 DRM_DEBUG_DRIVER("No preferred mode found\n");
4495 decide_crtc_timing_for_drm_display_mode(
4496 &mode, preferred_mode,
4497 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4498 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4502 drm_mode_set_crtcinfo(&mode, 0);
4505 * If scaling is enabled and refresh rate didn't change
4506 * we copy the vic and polarities of the old timings
4508 if (!scale || mode_refresh != preferred_refresh)
4509 fill_stream_properties_from_drm_display_mode(stream,
4510 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4512 fill_stream_properties_from_drm_display_mode(stream,
4513 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4515 stream->timing.flags.DSC = 0;
4517 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4518 #if defined(CONFIG_DRM_AMD_DC_DCN)
4519 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4520 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4521 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4524 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4525 dc_link_get_link_cap(aconnector->dc_link));
4527 #if defined(CONFIG_DRM_AMD_DC_DCN)
4528 if (dsc_caps.is_dsc_supported)
4529 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4531 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4532 link_bandwidth_kbps,
4534 &stream->timing.dsc_cfg))
4535 stream->timing.flags.DSC = 1;
4539 update_stream_scaling_settings(&mode, dm_state, stream);
4542 &stream->audio_info,
4546 update_stream_signal(stream, sink);
4548 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4549 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4550 if (stream->link->psr_settings.psr_feature_enabled) {
4551 struct dc *core_dc = stream->link->ctx->dc;
4553 if (dc_is_dmcu_initialized(core_dc)) {
4555 // should decide stream support vsc sdp colorimetry capability
4556 // before building vsc info packet
4558 stream->use_vsc_sdp_for_colorimetry = false;
4559 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4560 stream->use_vsc_sdp_for_colorimetry =
4561 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4563 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4564 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4565 stream->use_vsc_sdp_for_colorimetry = true;
4568 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4572 dc_sink_release(sink);
4577 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4579 drm_crtc_cleanup(crtc);
4583 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4584 struct drm_crtc_state *state)
4586 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4588 /* TODO Destroy dc_stream objects are stream object is flattened */
4590 dc_stream_release(cur->stream);
4593 __drm_atomic_helper_crtc_destroy_state(state);
4599 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4601 struct dm_crtc_state *state;
4604 dm_crtc_destroy_state(crtc, crtc->state);
4606 state = kzalloc(sizeof(*state), GFP_KERNEL);
4607 if (WARN_ON(!state))
4610 crtc->state = &state->base;
4611 crtc->state->crtc = crtc;
4615 static struct drm_crtc_state *
4616 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4618 struct dm_crtc_state *state, *cur;
4620 cur = to_dm_crtc_state(crtc->state);
4622 if (WARN_ON(!crtc->state))
4625 state = kzalloc(sizeof(*state), GFP_KERNEL);
4629 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4632 state->stream = cur->stream;
4633 dc_stream_retain(state->stream);
4636 state->active_planes = cur->active_planes;
4637 state->interrupts_enabled = cur->interrupts_enabled;
4638 state->vrr_params = cur->vrr_params;
4639 state->vrr_infopacket = cur->vrr_infopacket;
4640 state->abm_level = cur->abm_level;
4641 state->vrr_supported = cur->vrr_supported;
4642 state->freesync_config = cur->freesync_config;
4643 state->crc_src = cur->crc_src;
4644 state->cm_has_degamma = cur->cm_has_degamma;
4645 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4647 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4649 return &state->base;
4652 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4654 enum dc_irq_source irq_source;
4655 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4656 struct amdgpu_device *adev = crtc->dev->dev_private;
4659 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4661 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4663 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4664 acrtc->crtc_id, enable ? "en" : "dis", rc);
4668 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4670 enum dc_irq_source irq_source;
4671 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4672 struct amdgpu_device *adev = crtc->dev->dev_private;
4673 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4677 /* vblank irq on -> Only need vupdate irq in vrr mode */
4678 if (amdgpu_dm_vrr_active(acrtc_state))
4679 rc = dm_set_vupdate_irq(crtc, true);
4681 /* vblank irq off -> vupdate irq off */
4682 rc = dm_set_vupdate_irq(crtc, false);
4688 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4689 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4692 static int dm_enable_vblank(struct drm_crtc *crtc)
4694 return dm_set_vblank(crtc, true);
4697 static void dm_disable_vblank(struct drm_crtc *crtc)
4699 dm_set_vblank(crtc, false);
4702 /* Implemented only the options currently availible for the driver */
4703 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4704 .reset = dm_crtc_reset_state,
4705 .destroy = amdgpu_dm_crtc_destroy,
4706 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4707 .set_config = drm_atomic_helper_set_config,
4708 .page_flip = drm_atomic_helper_page_flip,
4709 .atomic_duplicate_state = dm_crtc_duplicate_state,
4710 .atomic_destroy_state = dm_crtc_destroy_state,
4711 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4712 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4713 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4714 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4715 .enable_vblank = dm_enable_vblank,
4716 .disable_vblank = dm_disable_vblank,
4717 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4720 static enum drm_connector_status
4721 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4724 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4728 * 1. This interface is NOT called in context of HPD irq.
4729 * 2. This interface *is called* in context of user-mode ioctl. Which
4730 * makes it a bad place for *any* MST-related activity.
4733 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4734 !aconnector->fake_enable)
4735 connected = (aconnector->dc_sink != NULL);
4737 connected = (aconnector->base.force == DRM_FORCE_ON);
4739 return (connected ? connector_status_connected :
4740 connector_status_disconnected);
4743 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4744 struct drm_connector_state *connector_state,
4745 struct drm_property *property,
4748 struct drm_device *dev = connector->dev;
4749 struct amdgpu_device *adev = dev->dev_private;
4750 struct dm_connector_state *dm_old_state =
4751 to_dm_connector_state(connector->state);
4752 struct dm_connector_state *dm_new_state =
4753 to_dm_connector_state(connector_state);
4757 if (property == dev->mode_config.scaling_mode_property) {
4758 enum amdgpu_rmx_type rmx_type;
4761 case DRM_MODE_SCALE_CENTER:
4762 rmx_type = RMX_CENTER;
4764 case DRM_MODE_SCALE_ASPECT:
4765 rmx_type = RMX_ASPECT;
4767 case DRM_MODE_SCALE_FULLSCREEN:
4768 rmx_type = RMX_FULL;
4770 case DRM_MODE_SCALE_NONE:
4776 if (dm_old_state->scaling == rmx_type)
4779 dm_new_state->scaling = rmx_type;
4781 } else if (property == adev->mode_info.underscan_hborder_property) {
4782 dm_new_state->underscan_hborder = val;
4784 } else if (property == adev->mode_info.underscan_vborder_property) {
4785 dm_new_state->underscan_vborder = val;
4787 } else if (property == adev->mode_info.underscan_property) {
4788 dm_new_state->underscan_enable = val;
4790 } else if (property == adev->mode_info.abm_level_property) {
4791 dm_new_state->abm_level = val;
4798 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4799 const struct drm_connector_state *state,
4800 struct drm_property *property,
4803 struct drm_device *dev = connector->dev;
4804 struct amdgpu_device *adev = dev->dev_private;
4805 struct dm_connector_state *dm_state =
4806 to_dm_connector_state(state);
4809 if (property == dev->mode_config.scaling_mode_property) {
4810 switch (dm_state->scaling) {
4812 *val = DRM_MODE_SCALE_CENTER;
4815 *val = DRM_MODE_SCALE_ASPECT;
4818 *val = DRM_MODE_SCALE_FULLSCREEN;
4822 *val = DRM_MODE_SCALE_NONE;
4826 } else if (property == adev->mode_info.underscan_hborder_property) {
4827 *val = dm_state->underscan_hborder;
4829 } else if (property == adev->mode_info.underscan_vborder_property) {
4830 *val = dm_state->underscan_vborder;
4832 } else if (property == adev->mode_info.underscan_property) {
4833 *val = dm_state->underscan_enable;
4835 } else if (property == adev->mode_info.abm_level_property) {
4836 *val = dm_state->abm_level;
4843 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4845 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4847 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4850 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4852 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4853 const struct dc_link *link = aconnector->dc_link;
4854 struct amdgpu_device *adev = connector->dev->dev_private;
4855 struct amdgpu_display_manager *dm = &adev->dm;
4857 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4858 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4860 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4861 link->type != dc_connection_none &&
4862 dm->backlight_dev) {
4863 backlight_device_unregister(dm->backlight_dev);
4864 dm->backlight_dev = NULL;
4868 if (aconnector->dc_em_sink)
4869 dc_sink_release(aconnector->dc_em_sink);
4870 aconnector->dc_em_sink = NULL;
4871 if (aconnector->dc_sink)
4872 dc_sink_release(aconnector->dc_sink);
4873 aconnector->dc_sink = NULL;
4875 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4876 drm_connector_unregister(connector);
4877 drm_connector_cleanup(connector);
4878 if (aconnector->i2c) {
4879 i2c_del_adapter(&aconnector->i2c->base);
4880 kfree(aconnector->i2c);
4882 kfree(aconnector->dm_dp_aux.aux.name);
4887 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4889 struct dm_connector_state *state =
4890 to_dm_connector_state(connector->state);
4892 if (connector->state)
4893 __drm_atomic_helper_connector_destroy_state(connector->state);
4897 state = kzalloc(sizeof(*state), GFP_KERNEL);
4900 state->scaling = RMX_OFF;
4901 state->underscan_enable = false;
4902 state->underscan_hborder = 0;
4903 state->underscan_vborder = 0;
4904 state->base.max_requested_bpc = 8;
4905 state->vcpi_slots = 0;
4907 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4908 state->abm_level = amdgpu_dm_abm_level;
4910 __drm_atomic_helper_connector_reset(connector, &state->base);
4914 struct drm_connector_state *
4915 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4917 struct dm_connector_state *state =
4918 to_dm_connector_state(connector->state);
4920 struct dm_connector_state *new_state =
4921 kmemdup(state, sizeof(*state), GFP_KERNEL);
4926 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4928 new_state->freesync_capable = state->freesync_capable;
4929 new_state->abm_level = state->abm_level;
4930 new_state->scaling = state->scaling;
4931 new_state->underscan_enable = state->underscan_enable;
4932 new_state->underscan_hborder = state->underscan_hborder;
4933 new_state->underscan_vborder = state->underscan_vborder;
4934 new_state->vcpi_slots = state->vcpi_slots;
4935 new_state->pbn = state->pbn;
4936 return &new_state->base;
4940 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4942 struct amdgpu_dm_connector *amdgpu_dm_connector =
4943 to_amdgpu_dm_connector(connector);
4946 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4947 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4948 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4949 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4954 #if defined(CONFIG_DEBUG_FS)
4955 connector_debugfs_init(amdgpu_dm_connector);
4961 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4962 .reset = amdgpu_dm_connector_funcs_reset,
4963 .detect = amdgpu_dm_connector_detect,
4964 .fill_modes = drm_helper_probe_single_connector_modes,
4965 .destroy = amdgpu_dm_connector_destroy,
4966 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4967 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4968 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4969 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4970 .late_register = amdgpu_dm_connector_late_register,
4971 .early_unregister = amdgpu_dm_connector_unregister
4974 static int get_modes(struct drm_connector *connector)
4976 return amdgpu_dm_connector_get_modes(connector);
4979 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4981 struct dc_sink_init_data init_params = {
4982 .link = aconnector->dc_link,
4983 .sink_signal = SIGNAL_TYPE_VIRTUAL
4987 if (!aconnector->base.edid_blob_ptr) {
4988 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4989 aconnector->base.name);
4991 aconnector->base.force = DRM_FORCE_OFF;
4992 aconnector->base.override_edid = false;
4996 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4998 aconnector->edid = edid;
5000 aconnector->dc_em_sink = dc_link_add_remote_sink(
5001 aconnector->dc_link,
5003 (edid->extensions + 1) * EDID_LENGTH,
5006 if (aconnector->base.force == DRM_FORCE_ON) {
5007 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5008 aconnector->dc_link->local_sink :
5009 aconnector->dc_em_sink;
5010 dc_sink_retain(aconnector->dc_sink);
5014 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5016 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5019 * In case of headless boot with force on for DP managed connector
5020 * Those settings have to be != 0 to get initial modeset
5022 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5023 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5024 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5028 aconnector->base.override_edid = true;
5029 create_eml_sink(aconnector);
5032 static struct dc_stream_state *
5033 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5034 const struct drm_display_mode *drm_mode,
5035 const struct dm_connector_state *dm_state,
5036 const struct dc_stream_state *old_stream)
5038 struct drm_connector *connector = &aconnector->base;
5039 struct amdgpu_device *adev = connector->dev->dev_private;
5040 struct dc_stream_state *stream;
5041 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5042 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5043 enum dc_status dc_result = DC_OK;
5046 stream = create_stream_for_sink(aconnector, drm_mode,
5047 dm_state, old_stream,
5049 if (stream == NULL) {
5050 DRM_ERROR("Failed to create stream for sink!\n");
5054 dc_result = dc_validate_stream(adev->dm.dc, stream);
5056 if (dc_result != DC_OK) {
5057 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
5063 dc_stream_release(stream);
5065 requested_bpc -= 2; /* lower bpc to retry validation */
5068 } while (stream == NULL && requested_bpc >= 6);
5073 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5074 struct drm_display_mode *mode)
5076 int result = MODE_ERROR;
5077 struct dc_sink *dc_sink;
5078 /* TODO: Unhardcode stream count */
5079 struct dc_stream_state *stream;
5080 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5082 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5083 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5087 * Only run this the first time mode_valid is called to initilialize
5090 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5091 !aconnector->dc_em_sink)
5092 handle_edid_mgmt(aconnector);
5094 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5096 if (dc_sink == NULL) {
5097 DRM_ERROR("dc_sink is NULL!\n");
5101 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5103 dc_stream_release(stream);
5108 /* TODO: error handling*/
5112 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5113 struct dc_info_packet *out)
5115 struct hdmi_drm_infoframe frame;
5116 unsigned char buf[30]; /* 26 + 4 */
5120 memset(out, 0, sizeof(*out));
5122 if (!state->hdr_output_metadata)
5125 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5129 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5133 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5137 /* Prepare the infopacket for DC. */
5138 switch (state->connector->connector_type) {
5139 case DRM_MODE_CONNECTOR_HDMIA:
5140 out->hb0 = 0x87; /* type */
5141 out->hb1 = 0x01; /* version */
5142 out->hb2 = 0x1A; /* length */
5143 out->sb[0] = buf[3]; /* checksum */
5147 case DRM_MODE_CONNECTOR_DisplayPort:
5148 case DRM_MODE_CONNECTOR_eDP:
5149 out->hb0 = 0x00; /* sdp id, zero */
5150 out->hb1 = 0x87; /* type */
5151 out->hb2 = 0x1D; /* payload len - 1 */
5152 out->hb3 = (0x13 << 2); /* sdp version */
5153 out->sb[0] = 0x01; /* version */
5154 out->sb[1] = 0x1A; /* length */
5162 memcpy(&out->sb[i], &buf[4], 26);
5165 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5166 sizeof(out->sb), false);
5172 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5173 const struct drm_connector_state *new_state)
5175 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5176 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5178 if (old_blob != new_blob) {
5179 if (old_blob && new_blob &&
5180 old_blob->length == new_blob->length)
5181 return memcmp(old_blob->data, new_blob->data,
5191 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5192 struct drm_atomic_state *state)
5194 struct drm_connector_state *new_con_state =
5195 drm_atomic_get_new_connector_state(state, conn);
5196 struct drm_connector_state *old_con_state =
5197 drm_atomic_get_old_connector_state(state, conn);
5198 struct drm_crtc *crtc = new_con_state->crtc;
5199 struct drm_crtc_state *new_crtc_state;
5205 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5206 struct dc_info_packet hdr_infopacket;
5208 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5212 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5213 if (IS_ERR(new_crtc_state))
5214 return PTR_ERR(new_crtc_state);
5217 * DC considers the stream backends changed if the
5218 * static metadata changes. Forcing the modeset also
5219 * gives a simple way for userspace to switch from
5220 * 8bpc to 10bpc when setting the metadata to enter
5223 * Changing the static metadata after it's been
5224 * set is permissible, however. So only force a
5225 * modeset if we're entering or exiting HDR.
5227 new_crtc_state->mode_changed =
5228 !old_con_state->hdr_output_metadata ||
5229 !new_con_state->hdr_output_metadata;
5235 static const struct drm_connector_helper_funcs
5236 amdgpu_dm_connector_helper_funcs = {
5238 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5239 * modes will be filtered by drm_mode_validate_size(), and those modes
5240 * are missing after user start lightdm. So we need to renew modes list.
5241 * in get_modes call back, not just return the modes count
5243 .get_modes = get_modes,
5244 .mode_valid = amdgpu_dm_connector_mode_valid,
5245 .atomic_check = amdgpu_dm_connector_atomic_check,
5248 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5252 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5254 struct drm_device *dev = new_crtc_state->crtc->dev;
5255 struct drm_plane *plane;
5257 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5258 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5265 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5267 struct drm_atomic_state *state = new_crtc_state->state;
5268 struct drm_plane *plane;
5271 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5272 struct drm_plane_state *new_plane_state;
5274 /* Cursor planes are "fake". */
5275 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5278 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5280 if (!new_plane_state) {
5282 * The plane is enable on the CRTC and hasn't changed
5283 * state. This means that it previously passed
5284 * validation and is therefore enabled.
5290 /* We need a framebuffer to be considered enabled. */
5291 num_active += (new_plane_state->fb != NULL);
5298 * Sets whether interrupts should be enabled on a specific CRTC.
5299 * We require that the stream be enabled and that there exist active
5300 * DC planes on the stream.
5303 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5304 struct drm_crtc_state *new_crtc_state)
5306 struct dm_crtc_state *dm_new_crtc_state =
5307 to_dm_crtc_state(new_crtc_state);
5309 dm_new_crtc_state->active_planes = 0;
5310 dm_new_crtc_state->interrupts_enabled = false;
5312 if (!dm_new_crtc_state->stream)
5315 dm_new_crtc_state->active_planes =
5316 count_crtc_active_planes(new_crtc_state);
5318 dm_new_crtc_state->interrupts_enabled =
5319 dm_new_crtc_state->active_planes > 0;
5322 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5323 struct drm_crtc_state *state)
5325 struct amdgpu_device *adev = crtc->dev->dev_private;
5326 struct dc *dc = adev->dm.dc;
5327 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5331 * Update interrupt state for the CRTC. This needs to happen whenever
5332 * the CRTC has changed or whenever any of its planes have changed.
5333 * Atomic check satisfies both of these requirements since the CRTC
5334 * is added to the state by DRM during drm_atomic_helper_check_planes.
5336 dm_update_crtc_interrupt_state(crtc, state);
5338 if (unlikely(!dm_crtc_state->stream &&
5339 modeset_required(state, NULL, dm_crtc_state->stream))) {
5344 /* In some use cases, like reset, no stream is attached */
5345 if (!dm_crtc_state->stream)
5349 * We want at least one hardware plane enabled to use
5350 * the stream with a cursor enabled.
5352 if (state->enable && state->active &&
5353 does_crtc_have_active_cursor(state) &&
5354 dm_crtc_state->active_planes == 0)
5357 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5363 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5364 const struct drm_display_mode *mode,
5365 struct drm_display_mode *adjusted_mode)
5370 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5371 .disable = dm_crtc_helper_disable,
5372 .atomic_check = dm_crtc_helper_atomic_check,
5373 .mode_fixup = dm_crtc_helper_mode_fixup,
5374 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5377 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5382 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5384 switch (display_color_depth) {
5385 case COLOR_DEPTH_666:
5387 case COLOR_DEPTH_888:
5389 case COLOR_DEPTH_101010:
5391 case COLOR_DEPTH_121212:
5393 case COLOR_DEPTH_141414:
5395 case COLOR_DEPTH_161616:
5403 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5404 struct drm_crtc_state *crtc_state,
5405 struct drm_connector_state *conn_state)
5407 struct drm_atomic_state *state = crtc_state->state;
5408 struct drm_connector *connector = conn_state->connector;
5409 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5410 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5411 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5412 struct drm_dp_mst_topology_mgr *mst_mgr;
5413 struct drm_dp_mst_port *mst_port;
5414 enum dc_color_depth color_depth;
5416 bool is_y420 = false;
5418 if (!aconnector->port || !aconnector->dc_sink)
5421 mst_port = aconnector->port;
5422 mst_mgr = &aconnector->mst_port->mst_mgr;
5424 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5427 if (!state->duplicated) {
5428 int max_bpc = conn_state->max_requested_bpc;
5429 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5430 aconnector->force_yuv420_output;
5431 color_depth = convert_color_depth_from_display_info(connector,
5434 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5435 clock = adjusted_mode->clock;
5436 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5438 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5441 dm_new_connector_state->pbn,
5443 if (dm_new_connector_state->vcpi_slots < 0) {
5444 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5445 return dm_new_connector_state->vcpi_slots;
5450 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5451 .disable = dm_encoder_helper_disable,
5452 .atomic_check = dm_encoder_helper_atomic_check
5455 #if defined(CONFIG_DRM_AMD_DC_DCN)
5456 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5457 struct dc_state *dc_state)
5459 struct dc_stream_state *stream = NULL;
5460 struct drm_connector *connector;
5461 struct drm_connector_state *new_con_state, *old_con_state;
5462 struct amdgpu_dm_connector *aconnector;
5463 struct dm_connector_state *dm_conn_state;
5464 int i, j, clock, bpp;
5465 int vcpi, pbn_div, pbn = 0;
5467 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5469 aconnector = to_amdgpu_dm_connector(connector);
5471 if (!aconnector->port)
5474 if (!new_con_state || !new_con_state->crtc)
5477 dm_conn_state = to_dm_connector_state(new_con_state);
5479 for (j = 0; j < dc_state->stream_count; j++) {
5480 stream = dc_state->streams[j];
5484 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5493 if (stream->timing.flags.DSC != 1) {
5494 drm_dp_mst_atomic_enable_dsc(state,
5502 pbn_div = dm_mst_get_pbn_divider(stream->link);
5503 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5504 clock = stream->timing.pix_clk_100hz / 10;
5505 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5506 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5513 dm_conn_state->pbn = pbn;
5514 dm_conn_state->vcpi_slots = vcpi;
5520 static void dm_drm_plane_reset(struct drm_plane *plane)
5522 struct dm_plane_state *amdgpu_state = NULL;
5525 plane->funcs->atomic_destroy_state(plane, plane->state);
5527 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5528 WARN_ON(amdgpu_state == NULL);
5531 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5534 static struct drm_plane_state *
5535 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5537 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5539 old_dm_plane_state = to_dm_plane_state(plane->state);
5540 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5541 if (!dm_plane_state)
5544 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5546 if (old_dm_plane_state->dc_state) {
5547 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5548 dc_plane_state_retain(dm_plane_state->dc_state);
5551 return &dm_plane_state->base;
5554 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5555 struct drm_plane_state *state)
5557 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5559 if (dm_plane_state->dc_state)
5560 dc_plane_state_release(dm_plane_state->dc_state);
5562 drm_atomic_helper_plane_destroy_state(plane, state);
5565 static const struct drm_plane_funcs dm_plane_funcs = {
5566 .update_plane = drm_atomic_helper_update_plane,
5567 .disable_plane = drm_atomic_helper_disable_plane,
5568 .destroy = drm_primary_helper_destroy,
5569 .reset = dm_drm_plane_reset,
5570 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5571 .atomic_destroy_state = dm_drm_plane_destroy_state,
5574 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5575 struct drm_plane_state *new_state)
5577 struct amdgpu_framebuffer *afb;
5578 struct drm_gem_object *obj;
5579 struct amdgpu_device *adev;
5580 struct amdgpu_bo *rbo;
5581 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5582 struct list_head list;
5583 struct ttm_validate_buffer tv;
5584 struct ww_acquire_ctx ticket;
5585 uint64_t tiling_flags;
5588 bool tmz_surface = false;
5589 bool force_disable_dcc = false;
5591 dm_plane_state_old = to_dm_plane_state(plane->state);
5592 dm_plane_state_new = to_dm_plane_state(new_state);
5594 if (!new_state->fb) {
5595 DRM_DEBUG_DRIVER("No FB bound\n");
5599 afb = to_amdgpu_framebuffer(new_state->fb);
5600 obj = new_state->fb->obj[0];
5601 rbo = gem_to_amdgpu_bo(obj);
5602 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5603 INIT_LIST_HEAD(&list);
5607 list_add(&tv.head, &list);
5609 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5611 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5615 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5616 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5618 domain = AMDGPU_GEM_DOMAIN_VRAM;
5620 r = amdgpu_bo_pin(rbo, domain);
5621 if (unlikely(r != 0)) {
5622 if (r != -ERESTARTSYS)
5623 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5624 ttm_eu_backoff_reservation(&ticket, &list);
5628 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5629 if (unlikely(r != 0)) {
5630 amdgpu_bo_unpin(rbo);
5631 ttm_eu_backoff_reservation(&ticket, &list);
5632 DRM_ERROR("%p bind failed\n", rbo);
5636 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5638 tmz_surface = amdgpu_bo_encrypted(rbo);
5640 ttm_eu_backoff_reservation(&ticket, &list);
5642 afb->address = amdgpu_bo_gpu_offset(rbo);
5646 if (dm_plane_state_new->dc_state &&
5647 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5648 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5650 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5651 fill_plane_buffer_attributes(
5652 adev, afb, plane_state->format, plane_state->rotation,
5653 tiling_flags, &plane_state->tiling_info,
5654 &plane_state->plane_size, &plane_state->dcc,
5655 &plane_state->address, tmz_surface,
5662 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5663 struct drm_plane_state *old_state)
5665 struct amdgpu_bo *rbo;
5671 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5672 r = amdgpu_bo_reserve(rbo, false);
5674 DRM_ERROR("failed to reserve rbo before unpin\n");
5678 amdgpu_bo_unpin(rbo);
5679 amdgpu_bo_unreserve(rbo);
5680 amdgpu_bo_unref(&rbo);
5683 static int dm_plane_atomic_check(struct drm_plane *plane,
5684 struct drm_plane_state *state)
5686 struct amdgpu_device *adev = plane->dev->dev_private;
5687 struct dc *dc = adev->dm.dc;
5688 struct dm_plane_state *dm_plane_state;
5689 struct dc_scaling_info scaling_info;
5692 dm_plane_state = to_dm_plane_state(state);
5694 if (!dm_plane_state->dc_state)
5697 ret = fill_dc_scaling_info(state, &scaling_info);
5701 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5707 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5708 struct drm_plane_state *new_plane_state)
5710 /* Only support async updates on cursor planes. */
5711 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5717 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5718 struct drm_plane_state *new_state)
5720 struct drm_plane_state *old_state =
5721 drm_atomic_get_old_plane_state(new_state->state, plane);
5723 swap(plane->state->fb, new_state->fb);
5725 plane->state->src_x = new_state->src_x;
5726 plane->state->src_y = new_state->src_y;
5727 plane->state->src_w = new_state->src_w;
5728 plane->state->src_h = new_state->src_h;
5729 plane->state->crtc_x = new_state->crtc_x;
5730 plane->state->crtc_y = new_state->crtc_y;
5731 plane->state->crtc_w = new_state->crtc_w;
5732 plane->state->crtc_h = new_state->crtc_h;
5734 handle_cursor_update(plane, old_state);
5737 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5738 .prepare_fb = dm_plane_helper_prepare_fb,
5739 .cleanup_fb = dm_plane_helper_cleanup_fb,
5740 .atomic_check = dm_plane_atomic_check,
5741 .atomic_async_check = dm_plane_atomic_async_check,
5742 .atomic_async_update = dm_plane_atomic_async_update
5746 * TODO: these are currently initialized to rgb formats only.
5747 * For future use cases we should either initialize them dynamically based on
5748 * plane capabilities, or initialize this array to all formats, so internal drm
5749 * check will succeed, and let DC implement proper check
5751 static const uint32_t rgb_formats[] = {
5752 DRM_FORMAT_XRGB8888,
5753 DRM_FORMAT_ARGB8888,
5754 DRM_FORMAT_RGBA8888,
5755 DRM_FORMAT_XRGB2101010,
5756 DRM_FORMAT_XBGR2101010,
5757 DRM_FORMAT_ARGB2101010,
5758 DRM_FORMAT_ABGR2101010,
5759 DRM_FORMAT_XBGR8888,
5760 DRM_FORMAT_ABGR8888,
5764 static const uint32_t overlay_formats[] = {
5765 DRM_FORMAT_XRGB8888,
5766 DRM_FORMAT_ARGB8888,
5767 DRM_FORMAT_RGBA8888,
5768 DRM_FORMAT_XBGR8888,
5769 DRM_FORMAT_ABGR8888,
5773 static const u32 cursor_formats[] = {
5777 static int get_plane_formats(const struct drm_plane *plane,
5778 const struct dc_plane_cap *plane_cap,
5779 uint32_t *formats, int max_formats)
5781 int i, num_formats = 0;
5784 * TODO: Query support for each group of formats directly from
5785 * DC plane caps. This will require adding more formats to the
5789 switch (plane->type) {
5790 case DRM_PLANE_TYPE_PRIMARY:
5791 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5792 if (num_formats >= max_formats)
5795 formats[num_formats++] = rgb_formats[i];
5798 if (plane_cap && plane_cap->pixel_format_support.nv12)
5799 formats[num_formats++] = DRM_FORMAT_NV12;
5800 if (plane_cap && plane_cap->pixel_format_support.p010)
5801 formats[num_formats++] = DRM_FORMAT_P010;
5802 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5803 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5804 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5805 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5806 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5810 case DRM_PLANE_TYPE_OVERLAY:
5811 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5812 if (num_formats >= max_formats)
5815 formats[num_formats++] = overlay_formats[i];
5819 case DRM_PLANE_TYPE_CURSOR:
5820 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5821 if (num_formats >= max_formats)
5824 formats[num_formats++] = cursor_formats[i];
5832 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5833 struct drm_plane *plane,
5834 unsigned long possible_crtcs,
5835 const struct dc_plane_cap *plane_cap)
5837 uint32_t formats[32];
5841 num_formats = get_plane_formats(plane, plane_cap, formats,
5842 ARRAY_SIZE(formats));
5844 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5845 &dm_plane_funcs, formats, num_formats,
5846 NULL, plane->type, NULL);
5850 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5851 plane_cap && plane_cap->per_pixel_alpha) {
5852 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5853 BIT(DRM_MODE_BLEND_PREMULTI);
5855 drm_plane_create_alpha_property(plane);
5856 drm_plane_create_blend_mode_property(plane, blend_caps);
5859 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5861 (plane_cap->pixel_format_support.nv12 ||
5862 plane_cap->pixel_format_support.p010)) {
5863 /* This only affects YUV formats. */
5864 drm_plane_create_color_properties(
5866 BIT(DRM_COLOR_YCBCR_BT601) |
5867 BIT(DRM_COLOR_YCBCR_BT709) |
5868 BIT(DRM_COLOR_YCBCR_BT2020),
5869 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5870 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5871 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5874 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5876 /* Create (reset) the plane state */
5877 if (plane->funcs->reset)
5878 plane->funcs->reset(plane);
5883 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5884 struct drm_plane *plane,
5885 uint32_t crtc_index)
5887 struct amdgpu_crtc *acrtc = NULL;
5888 struct drm_plane *cursor_plane;
5892 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5896 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5897 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5899 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5903 res = drm_crtc_init_with_planes(
5908 &amdgpu_dm_crtc_funcs, NULL);
5913 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5915 /* Create (reset) the plane state */
5916 if (acrtc->base.funcs->reset)
5917 acrtc->base.funcs->reset(&acrtc->base);
5919 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5920 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5922 acrtc->crtc_id = crtc_index;
5923 acrtc->base.enabled = false;
5924 acrtc->otg_inst = -1;
5926 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5927 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5928 true, MAX_COLOR_LUT_ENTRIES);
5929 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5935 kfree(cursor_plane);
5940 static int to_drm_connector_type(enum signal_type st)
5943 case SIGNAL_TYPE_HDMI_TYPE_A:
5944 return DRM_MODE_CONNECTOR_HDMIA;
5945 case SIGNAL_TYPE_EDP:
5946 return DRM_MODE_CONNECTOR_eDP;
5947 case SIGNAL_TYPE_LVDS:
5948 return DRM_MODE_CONNECTOR_LVDS;
5949 case SIGNAL_TYPE_RGB:
5950 return DRM_MODE_CONNECTOR_VGA;
5951 case SIGNAL_TYPE_DISPLAY_PORT:
5952 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5953 return DRM_MODE_CONNECTOR_DisplayPort;
5954 case SIGNAL_TYPE_DVI_DUAL_LINK:
5955 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5956 return DRM_MODE_CONNECTOR_DVID;
5957 case SIGNAL_TYPE_VIRTUAL:
5958 return DRM_MODE_CONNECTOR_VIRTUAL;
5961 return DRM_MODE_CONNECTOR_Unknown;
5965 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5967 struct drm_encoder *encoder;
5969 /* There is only one encoder per connector */
5970 drm_connector_for_each_possible_encoder(connector, encoder)
5976 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5978 struct drm_encoder *encoder;
5979 struct amdgpu_encoder *amdgpu_encoder;
5981 encoder = amdgpu_dm_connector_to_encoder(connector);
5983 if (encoder == NULL)
5986 amdgpu_encoder = to_amdgpu_encoder(encoder);
5988 amdgpu_encoder->native_mode.clock = 0;
5990 if (!list_empty(&connector->probed_modes)) {
5991 struct drm_display_mode *preferred_mode = NULL;
5993 list_for_each_entry(preferred_mode,
5994 &connector->probed_modes,
5996 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5997 amdgpu_encoder->native_mode = *preferred_mode;
6005 static struct drm_display_mode *
6006 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6008 int hdisplay, int vdisplay)
6010 struct drm_device *dev = encoder->dev;
6011 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6012 struct drm_display_mode *mode = NULL;
6013 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6015 mode = drm_mode_duplicate(dev, native_mode);
6020 mode->hdisplay = hdisplay;
6021 mode->vdisplay = vdisplay;
6022 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6023 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6029 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6030 struct drm_connector *connector)
6032 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6033 struct drm_display_mode *mode = NULL;
6034 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6035 struct amdgpu_dm_connector *amdgpu_dm_connector =
6036 to_amdgpu_dm_connector(connector);
6040 char name[DRM_DISPLAY_MODE_LEN];
6043 } common_modes[] = {
6044 { "640x480", 640, 480},
6045 { "800x600", 800, 600},
6046 { "1024x768", 1024, 768},
6047 { "1280x720", 1280, 720},
6048 { "1280x800", 1280, 800},
6049 {"1280x1024", 1280, 1024},
6050 { "1440x900", 1440, 900},
6051 {"1680x1050", 1680, 1050},
6052 {"1600x1200", 1600, 1200},
6053 {"1920x1080", 1920, 1080},
6054 {"1920x1200", 1920, 1200}
6057 n = ARRAY_SIZE(common_modes);
6059 for (i = 0; i < n; i++) {
6060 struct drm_display_mode *curmode = NULL;
6061 bool mode_existed = false;
6063 if (common_modes[i].w > native_mode->hdisplay ||
6064 common_modes[i].h > native_mode->vdisplay ||
6065 (common_modes[i].w == native_mode->hdisplay &&
6066 common_modes[i].h == native_mode->vdisplay))
6069 list_for_each_entry(curmode, &connector->probed_modes, head) {
6070 if (common_modes[i].w == curmode->hdisplay &&
6071 common_modes[i].h == curmode->vdisplay) {
6072 mode_existed = true;
6080 mode = amdgpu_dm_create_common_mode(encoder,
6081 common_modes[i].name, common_modes[i].w,
6083 drm_mode_probed_add(connector, mode);
6084 amdgpu_dm_connector->num_modes++;
6088 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6091 struct amdgpu_dm_connector *amdgpu_dm_connector =
6092 to_amdgpu_dm_connector(connector);
6095 /* empty probed_modes */
6096 INIT_LIST_HEAD(&connector->probed_modes);
6097 amdgpu_dm_connector->num_modes =
6098 drm_add_edid_modes(connector, edid);
6100 /* sorting the probed modes before calling function
6101 * amdgpu_dm_get_native_mode() since EDID can have
6102 * more than one preferred mode. The modes that are
6103 * later in the probed mode list could be of higher
6104 * and preferred resolution. For example, 3840x2160
6105 * resolution in base EDID preferred timing and 4096x2160
6106 * preferred resolution in DID extension block later.
6108 drm_mode_sort(&connector->probed_modes);
6109 amdgpu_dm_get_native_mode(connector);
6111 amdgpu_dm_connector->num_modes = 0;
6115 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6117 struct amdgpu_dm_connector *amdgpu_dm_connector =
6118 to_amdgpu_dm_connector(connector);
6119 struct drm_encoder *encoder;
6120 struct edid *edid = amdgpu_dm_connector->edid;
6122 encoder = amdgpu_dm_connector_to_encoder(connector);
6124 if (!edid || !drm_edid_is_valid(edid)) {
6125 amdgpu_dm_connector->num_modes =
6126 drm_add_modes_noedid(connector, 640, 480);
6128 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6129 amdgpu_dm_connector_add_common_modes(encoder, connector);
6131 amdgpu_dm_fbc_init(connector);
6133 return amdgpu_dm_connector->num_modes;
6136 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6137 struct amdgpu_dm_connector *aconnector,
6139 struct dc_link *link,
6142 struct amdgpu_device *adev = dm->ddev->dev_private;
6145 * Some of the properties below require access to state, like bpc.
6146 * Allocate some default initial connector state with our reset helper.
6148 if (aconnector->base.funcs->reset)
6149 aconnector->base.funcs->reset(&aconnector->base);
6151 aconnector->connector_id = link_index;
6152 aconnector->dc_link = link;
6153 aconnector->base.interlace_allowed = false;
6154 aconnector->base.doublescan_allowed = false;
6155 aconnector->base.stereo_allowed = false;
6156 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6157 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6158 aconnector->audio_inst = -1;
6159 mutex_init(&aconnector->hpd_lock);
6162 * configure support HPD hot plug connector_>polled default value is 0
6163 * which means HPD hot plug not supported
6165 switch (connector_type) {
6166 case DRM_MODE_CONNECTOR_HDMIA:
6167 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6168 aconnector->base.ycbcr_420_allowed =
6169 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6171 case DRM_MODE_CONNECTOR_DisplayPort:
6172 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6173 aconnector->base.ycbcr_420_allowed =
6174 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6176 case DRM_MODE_CONNECTOR_DVID:
6177 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6183 drm_object_attach_property(&aconnector->base.base,
6184 dm->ddev->mode_config.scaling_mode_property,
6185 DRM_MODE_SCALE_NONE);
6187 drm_object_attach_property(&aconnector->base.base,
6188 adev->mode_info.underscan_property,
6190 drm_object_attach_property(&aconnector->base.base,
6191 adev->mode_info.underscan_hborder_property,
6193 drm_object_attach_property(&aconnector->base.base,
6194 adev->mode_info.underscan_vborder_property,
6197 if (!aconnector->mst_port)
6198 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6200 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6201 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6202 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6204 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6205 dc_is_dmcu_initialized(adev->dm.dc)) {
6206 drm_object_attach_property(&aconnector->base.base,
6207 adev->mode_info.abm_level_property, 0);
6210 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6211 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6212 connector_type == DRM_MODE_CONNECTOR_eDP) {
6213 drm_object_attach_property(
6214 &aconnector->base.base,
6215 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6217 if (!aconnector->mst_port)
6218 drm_connector_attach_vrr_capable_property(&aconnector->base);
6220 #ifdef CONFIG_DRM_AMD_DC_HDCP
6221 if (adev->dm.hdcp_workqueue)
6222 drm_connector_attach_content_protection_property(&aconnector->base, true);
6227 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6228 struct i2c_msg *msgs, int num)
6230 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6231 struct ddc_service *ddc_service = i2c->ddc_service;
6232 struct i2c_command cmd;
6236 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6241 cmd.number_of_payloads = num;
6242 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6245 for (i = 0; i < num; i++) {
6246 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6247 cmd.payloads[i].address = msgs[i].addr;
6248 cmd.payloads[i].length = msgs[i].len;
6249 cmd.payloads[i].data = msgs[i].buf;
6253 ddc_service->ctx->dc,
6254 ddc_service->ddc_pin->hw_info.ddc_channel,
6258 kfree(cmd.payloads);
6262 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6264 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6267 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6268 .master_xfer = amdgpu_dm_i2c_xfer,
6269 .functionality = amdgpu_dm_i2c_func,
6272 static struct amdgpu_i2c_adapter *
6273 create_i2c(struct ddc_service *ddc_service,
6277 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6278 struct amdgpu_i2c_adapter *i2c;
6280 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6283 i2c->base.owner = THIS_MODULE;
6284 i2c->base.class = I2C_CLASS_DDC;
6285 i2c->base.dev.parent = &adev->pdev->dev;
6286 i2c->base.algo = &amdgpu_dm_i2c_algo;
6287 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6288 i2c_set_adapdata(&i2c->base, i2c);
6289 i2c->ddc_service = ddc_service;
6290 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6297 * Note: this function assumes that dc_link_detect() was called for the
6298 * dc_link which will be represented by this aconnector.
6300 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6301 struct amdgpu_dm_connector *aconnector,
6302 uint32_t link_index,
6303 struct amdgpu_encoder *aencoder)
6307 struct dc *dc = dm->dc;
6308 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6309 struct amdgpu_i2c_adapter *i2c;
6311 link->priv = aconnector;
6313 DRM_DEBUG_DRIVER("%s()\n", __func__);
6315 i2c = create_i2c(link->ddc, link->link_index, &res);
6317 DRM_ERROR("Failed to create i2c adapter data\n");
6321 aconnector->i2c = i2c;
6322 res = i2c_add_adapter(&i2c->base);
6325 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6329 connector_type = to_drm_connector_type(link->connector_signal);
6331 res = drm_connector_init_with_ddc(
6334 &amdgpu_dm_connector_funcs,
6339 DRM_ERROR("connector_init failed\n");
6340 aconnector->connector_id = -1;
6344 drm_connector_helper_add(
6346 &amdgpu_dm_connector_helper_funcs);
6348 amdgpu_dm_connector_init_helper(
6355 drm_connector_attach_encoder(
6356 &aconnector->base, &aencoder->base);
6358 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6359 || connector_type == DRM_MODE_CONNECTOR_eDP)
6360 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6365 aconnector->i2c = NULL;
6370 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6372 switch (adev->mode_info.num_crtc) {
6389 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6390 struct amdgpu_encoder *aencoder,
6391 uint32_t link_index)
6393 struct amdgpu_device *adev = dev->dev_private;
6395 int res = drm_encoder_init(dev,
6397 &amdgpu_dm_encoder_funcs,
6398 DRM_MODE_ENCODER_TMDS,
6401 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6404 aencoder->encoder_id = link_index;
6406 aencoder->encoder_id = -1;
6408 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6413 static void manage_dm_interrupts(struct amdgpu_device *adev,
6414 struct amdgpu_crtc *acrtc,
6418 * this is not correct translation but will work as soon as VBLANK
6419 * constant is the same as PFLIP
6422 amdgpu_display_crtc_idx_to_irq_type(
6427 drm_crtc_vblank_on(&acrtc->base);
6430 &adev->pageflip_irq,
6436 &adev->pageflip_irq,
6438 drm_crtc_vblank_off(&acrtc->base);
6443 is_scaling_state_different(const struct dm_connector_state *dm_state,
6444 const struct dm_connector_state *old_dm_state)
6446 if (dm_state->scaling != old_dm_state->scaling)
6448 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6449 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6451 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6452 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6454 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6455 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6460 #ifdef CONFIG_DRM_AMD_DC_HDCP
6461 static bool is_content_protection_different(struct drm_connector_state *state,
6462 const struct drm_connector_state *old_state,
6463 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6465 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6467 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6468 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6469 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6473 /* CP is being re enabled, ignore this */
6474 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6475 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6476 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6480 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6481 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6482 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6483 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6485 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6486 * hot-plug, headless s3, dpms
6488 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6489 aconnector->dc_sink != NULL)
6492 if (old_state->content_protection == state->content_protection)
6495 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6502 static void remove_stream(struct amdgpu_device *adev,
6503 struct amdgpu_crtc *acrtc,
6504 struct dc_stream_state *stream)
6506 /* this is the update mode case */
6508 acrtc->otg_inst = -1;
6509 acrtc->enabled = false;
6512 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6513 struct dc_cursor_position *position)
6515 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6517 int xorigin = 0, yorigin = 0;
6519 position->enable = false;
6523 if (!crtc || !plane->state->fb)
6526 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6527 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6528 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6530 plane->state->crtc_w,
6531 plane->state->crtc_h);
6535 x = plane->state->crtc_x;
6536 y = plane->state->crtc_y;
6538 if (x <= -amdgpu_crtc->max_cursor_width ||
6539 y <= -amdgpu_crtc->max_cursor_height)
6543 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6547 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6550 position->enable = true;
6551 position->translate_by_source = true;
6554 position->x_hotspot = xorigin;
6555 position->y_hotspot = yorigin;
6560 static void handle_cursor_update(struct drm_plane *plane,
6561 struct drm_plane_state *old_plane_state)
6563 struct amdgpu_device *adev = plane->dev->dev_private;
6564 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6565 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6566 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6567 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6568 uint64_t address = afb ? afb->address : 0;
6569 struct dc_cursor_position position;
6570 struct dc_cursor_attributes attributes;
6573 if (!plane->state->fb && !old_plane_state->fb)
6576 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6578 amdgpu_crtc->crtc_id,
6579 plane->state->crtc_w,
6580 plane->state->crtc_h);
6582 ret = get_cursor_position(plane, crtc, &position);
6586 if (!position.enable) {
6587 /* turn off cursor */
6588 if (crtc_state && crtc_state->stream) {
6589 mutex_lock(&adev->dm.dc_lock);
6590 dc_stream_set_cursor_position(crtc_state->stream,
6592 mutex_unlock(&adev->dm.dc_lock);
6597 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6598 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6600 memset(&attributes, 0, sizeof(attributes));
6601 attributes.address.high_part = upper_32_bits(address);
6602 attributes.address.low_part = lower_32_bits(address);
6603 attributes.width = plane->state->crtc_w;
6604 attributes.height = plane->state->crtc_h;
6605 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6606 attributes.rotation_angle = 0;
6607 attributes.attribute_flags.value = 0;
6609 attributes.pitch = attributes.width;
6611 if (crtc_state->stream) {
6612 mutex_lock(&adev->dm.dc_lock);
6613 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6615 DRM_ERROR("DC failed to set cursor attributes\n");
6617 if (!dc_stream_set_cursor_position(crtc_state->stream,
6619 DRM_ERROR("DC failed to set cursor position\n");
6620 mutex_unlock(&adev->dm.dc_lock);
6624 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6627 assert_spin_locked(&acrtc->base.dev->event_lock);
6628 WARN_ON(acrtc->event);
6630 acrtc->event = acrtc->base.state->event;
6632 /* Set the flip status */
6633 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6635 /* Mark this event as consumed */
6636 acrtc->base.state->event = NULL;
6638 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6642 static void update_freesync_state_on_stream(
6643 struct amdgpu_display_manager *dm,
6644 struct dm_crtc_state *new_crtc_state,
6645 struct dc_stream_state *new_stream,
6646 struct dc_plane_state *surface,
6647 u32 flip_timestamp_in_us)
6649 struct mod_vrr_params vrr_params;
6650 struct dc_info_packet vrr_infopacket = {0};
6651 struct amdgpu_device *adev = dm->adev;
6652 unsigned long flags;
6658 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6659 * For now it's sufficient to just guard against these conditions.
6662 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6665 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6666 vrr_params = new_crtc_state->vrr_params;
6669 mod_freesync_handle_preflip(
6670 dm->freesync_module,
6673 flip_timestamp_in_us,
6676 if (adev->family < AMDGPU_FAMILY_AI &&
6677 amdgpu_dm_vrr_active(new_crtc_state)) {
6678 mod_freesync_handle_v_update(dm->freesync_module,
6679 new_stream, &vrr_params);
6681 /* Need to call this before the frame ends. */
6682 dc_stream_adjust_vmin_vmax(dm->dc,
6683 new_crtc_state->stream,
6684 &vrr_params.adjust);
6688 mod_freesync_build_vrr_infopacket(
6689 dm->freesync_module,
6693 TRANSFER_FUNC_UNKNOWN,
6696 new_crtc_state->freesync_timing_changed |=
6697 (memcmp(&new_crtc_state->vrr_params.adjust,
6699 sizeof(vrr_params.adjust)) != 0);
6701 new_crtc_state->freesync_vrr_info_changed |=
6702 (memcmp(&new_crtc_state->vrr_infopacket,
6704 sizeof(vrr_infopacket)) != 0);
6706 new_crtc_state->vrr_params = vrr_params;
6707 new_crtc_state->vrr_infopacket = vrr_infopacket;
6709 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6710 new_stream->vrr_infopacket = vrr_infopacket;
6712 if (new_crtc_state->freesync_vrr_info_changed)
6713 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6714 new_crtc_state->base.crtc->base.id,
6715 (int)new_crtc_state->base.vrr_enabled,
6716 (int)vrr_params.state);
6718 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6721 static void pre_update_freesync_state_on_stream(
6722 struct amdgpu_display_manager *dm,
6723 struct dm_crtc_state *new_crtc_state)
6725 struct dc_stream_state *new_stream = new_crtc_state->stream;
6726 struct mod_vrr_params vrr_params;
6727 struct mod_freesync_config config = new_crtc_state->freesync_config;
6728 struct amdgpu_device *adev = dm->adev;
6729 unsigned long flags;
6735 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6736 * For now it's sufficient to just guard against these conditions.
6738 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6741 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6742 vrr_params = new_crtc_state->vrr_params;
6744 if (new_crtc_state->vrr_supported &&
6745 config.min_refresh_in_uhz &&
6746 config.max_refresh_in_uhz) {
6747 config.state = new_crtc_state->base.vrr_enabled ?
6748 VRR_STATE_ACTIVE_VARIABLE :
6751 config.state = VRR_STATE_UNSUPPORTED;
6754 mod_freesync_build_vrr_params(dm->freesync_module,
6756 &config, &vrr_params);
6758 new_crtc_state->freesync_timing_changed |=
6759 (memcmp(&new_crtc_state->vrr_params.adjust,
6761 sizeof(vrr_params.adjust)) != 0);
6763 new_crtc_state->vrr_params = vrr_params;
6764 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6767 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6768 struct dm_crtc_state *new_state)
6770 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6771 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6773 if (!old_vrr_active && new_vrr_active) {
6774 /* Transition VRR inactive -> active:
6775 * While VRR is active, we must not disable vblank irq, as a
6776 * reenable after disable would compute bogus vblank/pflip
6777 * timestamps if it likely happened inside display front-porch.
6779 * We also need vupdate irq for the actual core vblank handling
6782 dm_set_vupdate_irq(new_state->base.crtc, true);
6783 drm_crtc_vblank_get(new_state->base.crtc);
6784 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6785 __func__, new_state->base.crtc->base.id);
6786 } else if (old_vrr_active && !new_vrr_active) {
6787 /* Transition VRR active -> inactive:
6788 * Allow vblank irq disable again for fixed refresh rate.
6790 dm_set_vupdate_irq(new_state->base.crtc, false);
6791 drm_crtc_vblank_put(new_state->base.crtc);
6792 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6793 __func__, new_state->base.crtc->base.id);
6797 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6799 struct drm_plane *plane;
6800 struct drm_plane_state *old_plane_state, *new_plane_state;
6804 * TODO: Make this per-stream so we don't issue redundant updates for
6805 * commits with multiple streams.
6807 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6809 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6810 handle_cursor_update(plane, old_plane_state);
6813 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6814 struct dc_state *dc_state,
6815 struct drm_device *dev,
6816 struct amdgpu_display_manager *dm,
6817 struct drm_crtc *pcrtc,
6818 bool wait_for_vblank)
6821 uint64_t timestamp_ns;
6822 struct drm_plane *plane;
6823 struct drm_plane_state *old_plane_state, *new_plane_state;
6824 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6825 struct drm_crtc_state *new_pcrtc_state =
6826 drm_atomic_get_new_crtc_state(state, pcrtc);
6827 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6828 struct dm_crtc_state *dm_old_crtc_state =
6829 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6830 int planes_count = 0, vpos, hpos;
6832 unsigned long flags;
6833 struct amdgpu_bo *abo;
6834 uint64_t tiling_flags;
6835 bool tmz_surface = false;
6836 uint32_t target_vblank, last_flip_vblank;
6837 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6838 bool pflip_present = false;
6840 struct dc_surface_update surface_updates[MAX_SURFACES];
6841 struct dc_plane_info plane_infos[MAX_SURFACES];
6842 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6843 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6844 struct dc_stream_update stream_update;
6847 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6850 dm_error("Failed to allocate update bundle\n");
6855 * Disable the cursor first if we're disabling all the planes.
6856 * It'll remain on the screen after the planes are re-enabled
6859 if (acrtc_state->active_planes == 0)
6860 amdgpu_dm_commit_cursors(state);
6862 /* update planes when needed */
6863 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6864 struct drm_crtc *crtc = new_plane_state->crtc;
6865 struct drm_crtc_state *new_crtc_state;
6866 struct drm_framebuffer *fb = new_plane_state->fb;
6867 bool plane_needs_flip;
6868 struct dc_plane_state *dc_plane;
6869 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6871 /* Cursor plane is handled after stream updates */
6872 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6875 if (!fb || !crtc || pcrtc != crtc)
6878 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6879 if (!new_crtc_state->active)
6882 dc_plane = dm_new_plane_state->dc_state;
6884 bundle->surface_updates[planes_count].surface = dc_plane;
6885 if (new_pcrtc_state->color_mgmt_changed) {
6886 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6887 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6888 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6891 fill_dc_scaling_info(new_plane_state,
6892 &bundle->scaling_infos[planes_count]);
6894 bundle->surface_updates[planes_count].scaling_info =
6895 &bundle->scaling_infos[planes_count];
6897 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6899 pflip_present = pflip_present || plane_needs_flip;
6901 if (!plane_needs_flip) {
6906 abo = gem_to_amdgpu_bo(fb->obj[0]);
6909 * Wait for all fences on this FB. Do limited wait to avoid
6910 * deadlock during GPU reset when this fence will not signal
6911 * but we hold reservation lock for the BO.
6913 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6915 msecs_to_jiffies(5000));
6916 if (unlikely(r <= 0))
6917 DRM_ERROR("Waiting for fences timed out!");
6920 * TODO This might fail and hence better not used, wait
6921 * explicitly on fences instead
6922 * and in general should be called for
6923 * blocking commit to as per framework helpers
6925 r = amdgpu_bo_reserve(abo, true);
6926 if (unlikely(r != 0))
6927 DRM_ERROR("failed to reserve buffer before flip\n");
6929 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6931 tmz_surface = amdgpu_bo_encrypted(abo);
6933 amdgpu_bo_unreserve(abo);
6935 fill_dc_plane_info_and_addr(
6936 dm->adev, new_plane_state, tiling_flags,
6937 &bundle->plane_infos[planes_count],
6938 &bundle->flip_addrs[planes_count].address,
6942 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6943 new_plane_state->plane->index,
6944 bundle->plane_infos[planes_count].dcc.enable);
6946 bundle->surface_updates[planes_count].plane_info =
6947 &bundle->plane_infos[planes_count];
6950 * Only allow immediate flips for fast updates that don't
6951 * change FB pitch, DCC state, rotation or mirroing.
6953 bundle->flip_addrs[planes_count].flip_immediate =
6954 crtc->state->async_flip &&
6955 acrtc_state->update_type == UPDATE_TYPE_FAST;
6957 timestamp_ns = ktime_get_ns();
6958 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6959 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6960 bundle->surface_updates[planes_count].surface = dc_plane;
6962 if (!bundle->surface_updates[planes_count].surface) {
6963 DRM_ERROR("No surface for CRTC: id=%d\n",
6964 acrtc_attach->crtc_id);
6968 if (plane == pcrtc->primary)
6969 update_freesync_state_on_stream(
6972 acrtc_state->stream,
6974 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6976 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6978 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6979 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6985 if (pflip_present) {
6987 /* Use old throttling in non-vrr fixed refresh rate mode
6988 * to keep flip scheduling based on target vblank counts
6989 * working in a backwards compatible way, e.g., for
6990 * clients using the GLX_OML_sync_control extension or
6991 * DRI3/Present extension with defined target_msc.
6993 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6996 /* For variable refresh rate mode only:
6997 * Get vblank of last completed flip to avoid > 1 vrr
6998 * flips per video frame by use of throttling, but allow
6999 * flip programming anywhere in the possibly large
7000 * variable vrr vblank interval for fine-grained flip
7001 * timing control and more opportunity to avoid stutter
7002 * on late submission of flips.
7004 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7005 last_flip_vblank = acrtc_attach->last_flip_vblank;
7006 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7009 target_vblank = last_flip_vblank + wait_for_vblank;
7012 * Wait until we're out of the vertical blank period before the one
7013 * targeted by the flip
7015 while ((acrtc_attach->enabled &&
7016 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7017 0, &vpos, &hpos, NULL,
7018 NULL, &pcrtc->hwmode)
7019 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7020 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7021 (int)(target_vblank -
7022 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7023 usleep_range(1000, 1100);
7026 if (acrtc_attach->base.state->event) {
7027 drm_crtc_vblank_get(pcrtc);
7029 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7031 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7032 prepare_flip_isr(acrtc_attach);
7034 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7037 if (acrtc_state->stream) {
7038 if (acrtc_state->freesync_vrr_info_changed)
7039 bundle->stream_update.vrr_infopacket =
7040 &acrtc_state->stream->vrr_infopacket;
7044 /* Update the planes if changed or disable if we don't have any. */
7045 if ((planes_count || acrtc_state->active_planes == 0) &&
7046 acrtc_state->stream) {
7047 bundle->stream_update.stream = acrtc_state->stream;
7048 if (new_pcrtc_state->mode_changed) {
7049 bundle->stream_update.src = acrtc_state->stream->src;
7050 bundle->stream_update.dst = acrtc_state->stream->dst;
7053 if (new_pcrtc_state->color_mgmt_changed) {
7055 * TODO: This isn't fully correct since we've actually
7056 * already modified the stream in place.
7058 bundle->stream_update.gamut_remap =
7059 &acrtc_state->stream->gamut_remap_matrix;
7060 bundle->stream_update.output_csc_transform =
7061 &acrtc_state->stream->csc_color_matrix;
7062 bundle->stream_update.out_transfer_func =
7063 acrtc_state->stream->out_transfer_func;
7066 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7067 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7068 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7071 * If FreeSync state on the stream has changed then we need to
7072 * re-adjust the min/max bounds now that DC doesn't handle this
7073 * as part of commit.
7075 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7076 amdgpu_dm_vrr_active(acrtc_state)) {
7077 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7078 dc_stream_adjust_vmin_vmax(
7079 dm->dc, acrtc_state->stream,
7080 &acrtc_state->vrr_params.adjust);
7081 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7083 mutex_lock(&dm->dc_lock);
7084 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7085 acrtc_state->stream->link->psr_settings.psr_allow_active)
7086 amdgpu_dm_psr_disable(acrtc_state->stream);
7088 dc_commit_updates_for_stream(dm->dc,
7089 bundle->surface_updates,
7091 acrtc_state->stream,
7092 &bundle->stream_update,
7095 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7096 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7097 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7098 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7099 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7100 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7101 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7102 amdgpu_dm_psr_enable(acrtc_state->stream);
7105 mutex_unlock(&dm->dc_lock);
7109 * Update cursor state *after* programming all the planes.
7110 * This avoids redundant programming in the case where we're going
7111 * to be disabling a single plane - those pipes are being disabled.
7113 if (acrtc_state->active_planes)
7114 amdgpu_dm_commit_cursors(state);
7120 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7121 struct drm_atomic_state *state)
7123 struct amdgpu_device *adev = dev->dev_private;
7124 struct amdgpu_dm_connector *aconnector;
7125 struct drm_connector *connector;
7126 struct drm_connector_state *old_con_state, *new_con_state;
7127 struct drm_crtc_state *new_crtc_state;
7128 struct dm_crtc_state *new_dm_crtc_state;
7129 const struct dc_stream_status *status;
7132 /* Notify device removals. */
7133 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7134 if (old_con_state->crtc != new_con_state->crtc) {
7135 /* CRTC changes require notification. */
7139 if (!new_con_state->crtc)
7142 new_crtc_state = drm_atomic_get_new_crtc_state(
7143 state, new_con_state->crtc);
7145 if (!new_crtc_state)
7148 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7152 aconnector = to_amdgpu_dm_connector(connector);
7154 mutex_lock(&adev->dm.audio_lock);
7155 inst = aconnector->audio_inst;
7156 aconnector->audio_inst = -1;
7157 mutex_unlock(&adev->dm.audio_lock);
7159 amdgpu_dm_audio_eld_notify(adev, inst);
7162 /* Notify audio device additions. */
7163 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7164 if (!new_con_state->crtc)
7167 new_crtc_state = drm_atomic_get_new_crtc_state(
7168 state, new_con_state->crtc);
7170 if (!new_crtc_state)
7173 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7176 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7177 if (!new_dm_crtc_state->stream)
7180 status = dc_stream_get_status(new_dm_crtc_state->stream);
7184 aconnector = to_amdgpu_dm_connector(connector);
7186 mutex_lock(&adev->dm.audio_lock);
7187 inst = status->audio_inst;
7188 aconnector->audio_inst = inst;
7189 mutex_unlock(&adev->dm.audio_lock);
7191 amdgpu_dm_audio_eld_notify(adev, inst);
7196 * Enable interrupts on CRTCs that are newly active, undergone
7197 * a modeset, or have active planes again.
7199 * Done in two passes, based on the for_modeset flag:
7200 * Pass 1: For CRTCs going through modeset
7201 * Pass 2: For CRTCs going from 0 to n active planes
7203 * Interrupts can only be enabled after the planes are programmed,
7204 * so this requires a two-pass approach since we don't want to
7205 * just defer the interrupts until after commit planes every time.
7207 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
7208 struct drm_atomic_state *state,
7211 struct amdgpu_device *adev = dev->dev_private;
7212 struct drm_crtc *crtc;
7213 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7215 #ifdef CONFIG_DEBUG_FS
7216 enum amdgpu_dm_pipe_crc_source source;
7219 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7220 new_crtc_state, i) {
7221 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7222 struct dm_crtc_state *dm_new_crtc_state =
7223 to_dm_crtc_state(new_crtc_state);
7224 struct dm_crtc_state *dm_old_crtc_state =
7225 to_dm_crtc_state(old_crtc_state);
7226 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7229 run_pass = (for_modeset && modeset) ||
7230 (!for_modeset && !modeset &&
7231 !dm_old_crtc_state->interrupts_enabled);
7236 if (!dm_new_crtc_state->interrupts_enabled)
7239 manage_dm_interrupts(adev, acrtc, true);
7241 #ifdef CONFIG_DEBUG_FS
7242 /* The stream has changed so CRC capture needs to re-enabled. */
7243 source = dm_new_crtc_state->crc_src;
7244 if (amdgpu_dm_is_valid_crc_source(source)) {
7245 amdgpu_dm_crtc_configure_crc_source(
7246 crtc, dm_new_crtc_state,
7247 dm_new_crtc_state->crc_src);
7254 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7255 * @crtc_state: the DRM CRTC state
7256 * @stream_state: the DC stream state.
7258 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7259 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7261 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7262 struct dc_stream_state *stream_state)
7264 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7267 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7268 struct drm_atomic_state *state,
7271 struct drm_crtc *crtc;
7272 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7273 struct amdgpu_device *adev = dev->dev_private;
7277 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7278 * a modeset, being disabled, or have no active planes.
7280 * It's done in atomic commit rather than commit tail for now since
7281 * some of these interrupt handlers access the current CRTC state and
7282 * potentially the stream pointer itself.
7284 * Since the atomic state is swapped within atomic commit and not within
7285 * commit tail this would leave to new state (that hasn't been committed yet)
7286 * being accesssed from within the handlers.
7288 * TODO: Fix this so we can do this in commit tail and not have to block
7291 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7292 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7293 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7294 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7296 if (dm_old_crtc_state->interrupts_enabled &&
7297 (!dm_new_crtc_state->interrupts_enabled ||
7298 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7299 manage_dm_interrupts(adev, acrtc, false);
7302 * Add check here for SoC's that support hardware cursor plane, to
7303 * unset legacy_cursor_update
7306 return drm_atomic_helper_commit(dev, state, nonblock);
7308 /*TODO Handle EINTR, reenable IRQ*/
7312 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7313 * @state: The atomic state to commit
7315 * This will tell DC to commit the constructed DC state from atomic_check,
7316 * programming the hardware. Any failures here implies a hardware failure, since
7317 * atomic check should have filtered anything non-kosher.
7319 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7321 struct drm_device *dev = state->dev;
7322 struct amdgpu_device *adev = dev->dev_private;
7323 struct amdgpu_display_manager *dm = &adev->dm;
7324 struct dm_atomic_state *dm_state;
7325 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7327 struct drm_crtc *crtc;
7328 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7329 unsigned long flags;
7330 bool wait_for_vblank = true;
7331 struct drm_connector *connector;
7332 struct drm_connector_state *old_con_state, *new_con_state;
7333 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7334 int crtc_disable_count = 0;
7336 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7338 dm_state = dm_atomic_get_new_state(state);
7339 if (dm_state && dm_state->context) {
7340 dc_state = dm_state->context;
7342 /* No state changes, retain current state. */
7343 dc_state_temp = dc_create_state(dm->dc);
7344 ASSERT(dc_state_temp);
7345 dc_state = dc_state_temp;
7346 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7349 /* update changed items */
7350 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7351 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7353 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7354 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7357 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7358 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7359 "connectors_changed:%d\n",
7361 new_crtc_state->enable,
7362 new_crtc_state->active,
7363 new_crtc_state->planes_changed,
7364 new_crtc_state->mode_changed,
7365 new_crtc_state->active_changed,
7366 new_crtc_state->connectors_changed);
7368 /* Copy all transient state flags into dc state */
7369 if (dm_new_crtc_state->stream) {
7370 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7371 dm_new_crtc_state->stream);
7374 /* handles headless hotplug case, updating new_state and
7375 * aconnector as needed
7378 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7380 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7382 if (!dm_new_crtc_state->stream) {
7384 * this could happen because of issues with
7385 * userspace notifications delivery.
7386 * In this case userspace tries to set mode on
7387 * display which is disconnected in fact.
7388 * dc_sink is NULL in this case on aconnector.
7389 * We expect reset mode will come soon.
7391 * This can also happen when unplug is done
7392 * during resume sequence ended
7394 * In this case, we want to pretend we still
7395 * have a sink to keep the pipe running so that
7396 * hw state is consistent with the sw state
7398 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7399 __func__, acrtc->base.base.id);
7403 if (dm_old_crtc_state->stream)
7404 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7406 pm_runtime_get_noresume(dev->dev);
7408 acrtc->enabled = true;
7409 acrtc->hw_mode = new_crtc_state->mode;
7410 crtc->hwmode = new_crtc_state->mode;
7411 } else if (modereset_required(new_crtc_state)) {
7412 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7413 /* i.e. reset mode */
7414 if (dm_old_crtc_state->stream) {
7415 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7416 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7418 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7421 } /* for_each_crtc_in_state() */
7424 dm_enable_per_frame_crtc_master_sync(dc_state);
7425 mutex_lock(&dm->dc_lock);
7426 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7427 mutex_unlock(&dm->dc_lock);
7430 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7431 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7433 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7435 if (dm_new_crtc_state->stream != NULL) {
7436 const struct dc_stream_status *status =
7437 dc_stream_get_status(dm_new_crtc_state->stream);
7440 status = dc_stream_get_status_from_state(dc_state,
7441 dm_new_crtc_state->stream);
7444 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7446 acrtc->otg_inst = status->primary_otg_inst;
7449 #ifdef CONFIG_DRM_AMD_DC_HDCP
7450 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7451 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7452 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7453 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7455 new_crtc_state = NULL;
7458 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7460 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7462 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7463 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7464 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7465 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7469 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7470 hdcp_update_display(
7471 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7472 new_con_state->hdcp_content_type,
7473 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7478 /* Handle connector state changes */
7479 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7480 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7481 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7482 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7483 struct dc_surface_update dummy_updates[MAX_SURFACES];
7484 struct dc_stream_update stream_update;
7485 struct dc_info_packet hdr_packet;
7486 struct dc_stream_status *status = NULL;
7487 bool abm_changed, hdr_changed, scaling_changed;
7489 memset(&dummy_updates, 0, sizeof(dummy_updates));
7490 memset(&stream_update, 0, sizeof(stream_update));
7493 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7494 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7497 /* Skip any modesets/resets */
7498 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7501 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7502 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7504 scaling_changed = is_scaling_state_different(dm_new_con_state,
7507 abm_changed = dm_new_crtc_state->abm_level !=
7508 dm_old_crtc_state->abm_level;
7511 is_hdr_metadata_different(old_con_state, new_con_state);
7513 if (!scaling_changed && !abm_changed && !hdr_changed)
7516 stream_update.stream = dm_new_crtc_state->stream;
7517 if (scaling_changed) {
7518 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7519 dm_new_con_state, dm_new_crtc_state->stream);
7521 stream_update.src = dm_new_crtc_state->stream->src;
7522 stream_update.dst = dm_new_crtc_state->stream->dst;
7526 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7528 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7532 fill_hdr_info_packet(new_con_state, &hdr_packet);
7533 stream_update.hdr_static_metadata = &hdr_packet;
7536 status = dc_stream_get_status(dm_new_crtc_state->stream);
7538 WARN_ON(!status->plane_count);
7541 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7542 * Here we create an empty update on each plane.
7543 * To fix this, DC should permit updating only stream properties.
7545 for (j = 0; j < status->plane_count; j++)
7546 dummy_updates[j].surface = status->plane_states[0];
7549 mutex_lock(&dm->dc_lock);
7550 dc_commit_updates_for_stream(dm->dc,
7552 status->plane_count,
7553 dm_new_crtc_state->stream,
7556 mutex_unlock(&dm->dc_lock);
7559 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7560 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7561 new_crtc_state, i) {
7562 if (old_crtc_state->active && !new_crtc_state->active)
7563 crtc_disable_count++;
7565 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7566 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7568 /* Update freesync active state. */
7569 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7571 /* Handle vrr on->off / off->on transitions */
7572 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7576 /* Enable interrupts for CRTCs going through a modeset. */
7577 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7579 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7580 if (new_crtc_state->async_flip)
7581 wait_for_vblank = false;
7583 /* update planes when needed per crtc*/
7584 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7585 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7587 if (dm_new_crtc_state->stream)
7588 amdgpu_dm_commit_planes(state, dc_state, dev,
7589 dm, crtc, wait_for_vblank);
7592 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7593 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7595 /* Update audio instances for each connector. */
7596 amdgpu_dm_commit_audio(dev, state);
7599 * send vblank event on all events not handled in flip and
7600 * mark consumed event for drm_atomic_helper_commit_hw_done
7602 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7603 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7605 if (new_crtc_state->event)
7606 drm_send_event_locked(dev, &new_crtc_state->event->base);
7608 new_crtc_state->event = NULL;
7610 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7612 /* Signal HW programming completion */
7613 drm_atomic_helper_commit_hw_done(state);
7615 if (wait_for_vblank)
7616 drm_atomic_helper_wait_for_flip_done(dev, state);
7618 drm_atomic_helper_cleanup_planes(dev, state);
7621 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7622 * so we can put the GPU into runtime suspend if we're not driving any
7625 for (i = 0; i < crtc_disable_count; i++)
7626 pm_runtime_put_autosuspend(dev->dev);
7627 pm_runtime_mark_last_busy(dev->dev);
7630 dc_release_state(dc_state_temp);
7634 static int dm_force_atomic_commit(struct drm_connector *connector)
7637 struct drm_device *ddev = connector->dev;
7638 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7639 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7640 struct drm_plane *plane = disconnected_acrtc->base.primary;
7641 struct drm_connector_state *conn_state;
7642 struct drm_crtc_state *crtc_state;
7643 struct drm_plane_state *plane_state;
7648 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7650 /* Construct an atomic state to restore previous display setting */
7653 * Attach connectors to drm_atomic_state
7655 conn_state = drm_atomic_get_connector_state(state, connector);
7657 ret = PTR_ERR_OR_ZERO(conn_state);
7661 /* Attach crtc to drm_atomic_state*/
7662 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7664 ret = PTR_ERR_OR_ZERO(crtc_state);
7668 /* force a restore */
7669 crtc_state->mode_changed = true;
7671 /* Attach plane to drm_atomic_state */
7672 plane_state = drm_atomic_get_plane_state(state, plane);
7674 ret = PTR_ERR_OR_ZERO(plane_state);
7679 /* Call commit internally with the state we just constructed */
7680 ret = drm_atomic_commit(state);
7685 DRM_ERROR("Restoring old state failed with %i\n", ret);
7686 drm_atomic_state_put(state);
7692 * This function handles all cases when set mode does not come upon hotplug.
7693 * This includes when a display is unplugged then plugged back into the
7694 * same port and when running without usermode desktop manager supprot
7696 void dm_restore_drm_connector_state(struct drm_device *dev,
7697 struct drm_connector *connector)
7699 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7700 struct amdgpu_crtc *disconnected_acrtc;
7701 struct dm_crtc_state *acrtc_state;
7703 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7706 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7707 if (!disconnected_acrtc)
7710 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7711 if (!acrtc_state->stream)
7715 * If the previous sink is not released and different from the current,
7716 * we deduce we are in a state where we can not rely on usermode call
7717 * to turn on the display, so we do it here
7719 if (acrtc_state->stream->sink != aconnector->dc_sink)
7720 dm_force_atomic_commit(&aconnector->base);
7724 * Grabs all modesetting locks to serialize against any blocking commits,
7725 * Waits for completion of all non blocking commits.
7727 static int do_aquire_global_lock(struct drm_device *dev,
7728 struct drm_atomic_state *state)
7730 struct drm_crtc *crtc;
7731 struct drm_crtc_commit *commit;
7735 * Adding all modeset locks to aquire_ctx will
7736 * ensure that when the framework release it the
7737 * extra locks we are locking here will get released to
7739 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7743 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7744 spin_lock(&crtc->commit_lock);
7745 commit = list_first_entry_or_null(&crtc->commit_list,
7746 struct drm_crtc_commit, commit_entry);
7748 drm_crtc_commit_get(commit);
7749 spin_unlock(&crtc->commit_lock);
7755 * Make sure all pending HW programming completed and
7758 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7761 ret = wait_for_completion_interruptible_timeout(
7762 &commit->flip_done, 10*HZ);
7765 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7766 "timed out\n", crtc->base.id, crtc->name);
7768 drm_crtc_commit_put(commit);
7771 return ret < 0 ? ret : 0;
7774 static void get_freesync_config_for_crtc(
7775 struct dm_crtc_state *new_crtc_state,
7776 struct dm_connector_state *new_con_state)
7778 struct mod_freesync_config config = {0};
7779 struct amdgpu_dm_connector *aconnector =
7780 to_amdgpu_dm_connector(new_con_state->base.connector);
7781 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7782 int vrefresh = drm_mode_vrefresh(mode);
7784 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7785 vrefresh >= aconnector->min_vfreq &&
7786 vrefresh <= aconnector->max_vfreq;
7788 if (new_crtc_state->vrr_supported) {
7789 new_crtc_state->stream->ignore_msa_timing_param = true;
7790 config.state = new_crtc_state->base.vrr_enabled ?
7791 VRR_STATE_ACTIVE_VARIABLE :
7793 config.min_refresh_in_uhz =
7794 aconnector->min_vfreq * 1000000;
7795 config.max_refresh_in_uhz =
7796 aconnector->max_vfreq * 1000000;
7797 config.vsif_supported = true;
7801 new_crtc_state->freesync_config = config;
7804 static void reset_freesync_config_for_crtc(
7805 struct dm_crtc_state *new_crtc_state)
7807 new_crtc_state->vrr_supported = false;
7809 memset(&new_crtc_state->vrr_params, 0,
7810 sizeof(new_crtc_state->vrr_params));
7811 memset(&new_crtc_state->vrr_infopacket, 0,
7812 sizeof(new_crtc_state->vrr_infopacket));
7815 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7816 struct drm_atomic_state *state,
7817 struct drm_crtc *crtc,
7818 struct drm_crtc_state *old_crtc_state,
7819 struct drm_crtc_state *new_crtc_state,
7821 bool *lock_and_validation_needed)
7823 struct dm_atomic_state *dm_state = NULL;
7824 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7825 struct dc_stream_state *new_stream;
7829 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7830 * update changed items
7832 struct amdgpu_crtc *acrtc = NULL;
7833 struct amdgpu_dm_connector *aconnector = NULL;
7834 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7835 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7839 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7840 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7841 acrtc = to_amdgpu_crtc(crtc);
7842 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7844 /* TODO This hack should go away */
7845 if (aconnector && enable) {
7846 /* Make sure fake sink is created in plug-in scenario */
7847 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7849 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7852 if (IS_ERR(drm_new_conn_state)) {
7853 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7857 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7858 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7860 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7863 new_stream = create_validate_stream_for_sink(aconnector,
7864 &new_crtc_state->mode,
7866 dm_old_crtc_state->stream);
7869 * we can have no stream on ACTION_SET if a display
7870 * was disconnected during S3, in this case it is not an
7871 * error, the OS will be updated after detection, and
7872 * will do the right thing on next atomic commit
7876 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7877 __func__, acrtc->base.base.id);
7882 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7884 ret = fill_hdr_info_packet(drm_new_conn_state,
7885 &new_stream->hdr_static_metadata);
7890 * If we already removed the old stream from the context
7891 * (and set the new stream to NULL) then we can't reuse
7892 * the old stream even if the stream and scaling are unchanged.
7893 * We'll hit the BUG_ON and black screen.
7895 * TODO: Refactor this function to allow this check to work
7896 * in all conditions.
7898 if (dm_new_crtc_state->stream &&
7899 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7900 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7901 new_crtc_state->mode_changed = false;
7902 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7903 new_crtc_state->mode_changed);
7907 /* mode_changed flag may get updated above, need to check again */
7908 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7912 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7913 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7914 "connectors_changed:%d\n",
7916 new_crtc_state->enable,
7917 new_crtc_state->active,
7918 new_crtc_state->planes_changed,
7919 new_crtc_state->mode_changed,
7920 new_crtc_state->active_changed,
7921 new_crtc_state->connectors_changed);
7923 /* Remove stream for any changed/disabled CRTC */
7926 if (!dm_old_crtc_state->stream)
7929 ret = dm_atomic_get_state(state, &dm_state);
7933 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7936 /* i.e. reset mode */
7937 if (dc_remove_stream_from_ctx(
7940 dm_old_crtc_state->stream) != DC_OK) {
7945 dc_stream_release(dm_old_crtc_state->stream);
7946 dm_new_crtc_state->stream = NULL;
7948 reset_freesync_config_for_crtc(dm_new_crtc_state);
7950 *lock_and_validation_needed = true;
7952 } else {/* Add stream for any updated/enabled CRTC */
7954 * Quick fix to prevent NULL pointer on new_stream when
7955 * added MST connectors not found in existing crtc_state in the chained mode
7956 * TODO: need to dig out the root cause of that
7958 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7961 if (modereset_required(new_crtc_state))
7964 if (modeset_required(new_crtc_state, new_stream,
7965 dm_old_crtc_state->stream)) {
7967 WARN_ON(dm_new_crtc_state->stream);
7969 ret = dm_atomic_get_state(state, &dm_state);
7973 dm_new_crtc_state->stream = new_stream;
7975 dc_stream_retain(new_stream);
7977 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7980 if (dc_add_stream_to_ctx(
7983 dm_new_crtc_state->stream) != DC_OK) {
7988 *lock_and_validation_needed = true;
7993 /* Release extra reference */
7995 dc_stream_release(new_stream);
7998 * We want to do dc stream updates that do not require a
7999 * full modeset below.
8001 if (!(enable && aconnector && new_crtc_state->enable &&
8002 new_crtc_state->active))
8005 * Given above conditions, the dc state cannot be NULL because:
8006 * 1. We're in the process of enabling CRTCs (just been added
8007 * to the dc context, or already is on the context)
8008 * 2. Has a valid connector attached, and
8009 * 3. Is currently active and enabled.
8010 * => The dc stream state currently exists.
8012 BUG_ON(dm_new_crtc_state->stream == NULL);
8014 /* Scaling or underscan settings */
8015 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8016 update_stream_scaling_settings(
8017 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8020 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8023 * Color management settings. We also update color properties
8024 * when a modeset is needed, to ensure it gets reprogrammed.
8026 if (dm_new_crtc_state->base.color_mgmt_changed ||
8027 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8028 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8033 /* Update Freesync settings. */
8034 get_freesync_config_for_crtc(dm_new_crtc_state,
8041 dc_stream_release(new_stream);
8045 static bool should_reset_plane(struct drm_atomic_state *state,
8046 struct drm_plane *plane,
8047 struct drm_plane_state *old_plane_state,
8048 struct drm_plane_state *new_plane_state)
8050 struct drm_plane *other;
8051 struct drm_plane_state *old_other_state, *new_other_state;
8052 struct drm_crtc_state *new_crtc_state;
8056 * TODO: Remove this hack once the checks below are sufficient
8057 * enough to determine when we need to reset all the planes on
8060 if (state->allow_modeset)
8063 /* Exit early if we know that we're adding or removing the plane. */
8064 if (old_plane_state->crtc != new_plane_state->crtc)
8067 /* old crtc == new_crtc == NULL, plane not in context. */
8068 if (!new_plane_state->crtc)
8072 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8074 if (!new_crtc_state)
8077 /* CRTC Degamma changes currently require us to recreate planes. */
8078 if (new_crtc_state->color_mgmt_changed)
8081 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8085 * If there are any new primary or overlay planes being added or
8086 * removed then the z-order can potentially change. To ensure
8087 * correct z-order and pipe acquisition the current DC architecture
8088 * requires us to remove and recreate all existing planes.
8090 * TODO: Come up with a more elegant solution for this.
8092 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8093 if (other->type == DRM_PLANE_TYPE_CURSOR)
8096 if (old_other_state->crtc != new_plane_state->crtc &&
8097 new_other_state->crtc != new_plane_state->crtc)
8100 if (old_other_state->crtc != new_other_state->crtc)
8103 /* TODO: Remove this once we can handle fast format changes. */
8104 if (old_other_state->fb && new_other_state->fb &&
8105 old_other_state->fb->format != new_other_state->fb->format)
8112 static int dm_update_plane_state(struct dc *dc,
8113 struct drm_atomic_state *state,
8114 struct drm_plane *plane,
8115 struct drm_plane_state *old_plane_state,
8116 struct drm_plane_state *new_plane_state,
8118 bool *lock_and_validation_needed)
8121 struct dm_atomic_state *dm_state = NULL;
8122 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8123 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8124 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8125 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8126 struct amdgpu_crtc *new_acrtc;
8131 new_plane_crtc = new_plane_state->crtc;
8132 old_plane_crtc = old_plane_state->crtc;
8133 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8134 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8136 /*TODO Implement better atomic check for cursor plane */
8137 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8138 if (!enable || !new_plane_crtc ||
8139 drm_atomic_plane_disabling(plane->state, new_plane_state))
8142 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8144 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8145 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8146 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8147 new_plane_state->crtc_w, new_plane_state->crtc_h);
8154 needs_reset = should_reset_plane(state, plane, old_plane_state,
8157 /* Remove any changed/removed planes */
8162 if (!old_plane_crtc)
8165 old_crtc_state = drm_atomic_get_old_crtc_state(
8166 state, old_plane_crtc);
8167 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8169 if (!dm_old_crtc_state->stream)
8172 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8173 plane->base.id, old_plane_crtc->base.id);
8175 ret = dm_atomic_get_state(state, &dm_state);
8179 if (!dc_remove_plane_from_context(
8181 dm_old_crtc_state->stream,
8182 dm_old_plane_state->dc_state,
8183 dm_state->context)) {
8190 dc_plane_state_release(dm_old_plane_state->dc_state);
8191 dm_new_plane_state->dc_state = NULL;
8193 *lock_and_validation_needed = true;
8195 } else { /* Add new planes */
8196 struct dc_plane_state *dc_new_plane_state;
8198 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8201 if (!new_plane_crtc)
8204 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8205 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8207 if (!dm_new_crtc_state->stream)
8213 WARN_ON(dm_new_plane_state->dc_state);
8215 dc_new_plane_state = dc_create_plane_state(dc);
8216 if (!dc_new_plane_state)
8219 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8220 plane->base.id, new_plane_crtc->base.id);
8222 ret = fill_dc_plane_attributes(
8223 new_plane_crtc->dev->dev_private,
8228 dc_plane_state_release(dc_new_plane_state);
8232 ret = dm_atomic_get_state(state, &dm_state);
8234 dc_plane_state_release(dc_new_plane_state);
8239 * Any atomic check errors that occur after this will
8240 * not need a release. The plane state will be attached
8241 * to the stream, and therefore part of the atomic
8242 * state. It'll be released when the atomic state is
8245 if (!dc_add_plane_to_context(
8247 dm_new_crtc_state->stream,
8249 dm_state->context)) {
8251 dc_plane_state_release(dc_new_plane_state);
8255 dm_new_plane_state->dc_state = dc_new_plane_state;
8257 /* Tell DC to do a full surface update every time there
8258 * is a plane change. Inefficient, but works for now.
8260 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8262 *lock_and_validation_needed = true;
8270 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8271 struct drm_atomic_state *state,
8272 enum surface_update_type *out_type)
8274 struct dc *dc = dm->dc;
8275 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8276 int i, j, num_plane, ret = 0;
8277 struct drm_plane_state *old_plane_state, *new_plane_state;
8278 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8279 struct drm_crtc *new_plane_crtc;
8280 struct drm_plane *plane;
8282 struct drm_crtc *crtc;
8283 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8284 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8285 struct dc_stream_status *status = NULL;
8286 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8287 struct surface_info_bundle {
8288 struct dc_surface_update surface_updates[MAX_SURFACES];
8289 struct dc_plane_info plane_infos[MAX_SURFACES];
8290 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8291 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8292 struct dc_stream_update stream_update;
8295 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8298 DRM_ERROR("Failed to allocate update bundle\n");
8299 /* Set type to FULL to avoid crashing in DC*/
8300 update_type = UPDATE_TYPE_FULL;
8304 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8306 memset(bundle, 0, sizeof(struct surface_info_bundle));
8308 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8309 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8312 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8313 update_type = UPDATE_TYPE_FULL;
8317 if (!new_dm_crtc_state->stream)
8320 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8321 const struct amdgpu_framebuffer *amdgpu_fb =
8322 to_amdgpu_framebuffer(new_plane_state->fb);
8323 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8324 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8325 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8326 uint64_t tiling_flags;
8327 bool tmz_surface = false;
8329 new_plane_crtc = new_plane_state->crtc;
8330 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8331 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8333 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8336 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8337 update_type = UPDATE_TYPE_FULL;
8341 if (crtc != new_plane_crtc)
8344 bundle->surface_updates[num_plane].surface =
8345 new_dm_plane_state->dc_state;
8347 if (new_crtc_state->mode_changed) {
8348 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8349 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8352 if (new_crtc_state->color_mgmt_changed) {
8353 bundle->surface_updates[num_plane].gamma =
8354 new_dm_plane_state->dc_state->gamma_correction;
8355 bundle->surface_updates[num_plane].in_transfer_func =
8356 new_dm_plane_state->dc_state->in_transfer_func;
8357 bundle->surface_updates[num_plane].gamut_remap_matrix =
8358 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8359 bundle->stream_update.gamut_remap =
8360 &new_dm_crtc_state->stream->gamut_remap_matrix;
8361 bundle->stream_update.output_csc_transform =
8362 &new_dm_crtc_state->stream->csc_color_matrix;
8363 bundle->stream_update.out_transfer_func =
8364 new_dm_crtc_state->stream->out_transfer_func;
8367 ret = fill_dc_scaling_info(new_plane_state,
8372 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8375 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8379 ret = fill_dc_plane_info_and_addr(
8380 dm->adev, new_plane_state, tiling_flags,
8382 &flip_addr->address, tmz_surface,
8387 bundle->surface_updates[num_plane].plane_info = plane_info;
8388 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8397 ret = dm_atomic_get_state(state, &dm_state);
8401 old_dm_state = dm_atomic_get_old_state(state);
8402 if (!old_dm_state) {
8407 status = dc_stream_get_status_from_state(old_dm_state->context,
8408 new_dm_crtc_state->stream);
8409 bundle->stream_update.stream = new_dm_crtc_state->stream;
8411 * TODO: DC modifies the surface during this call so we need
8412 * to lock here - find a way to do this without locking.
8414 mutex_lock(&dm->dc_lock);
8415 update_type = dc_check_update_surfaces_for_stream(
8416 dc, bundle->surface_updates, num_plane,
8417 &bundle->stream_update, status);
8418 mutex_unlock(&dm->dc_lock);
8420 if (update_type > UPDATE_TYPE_MED) {
8421 update_type = UPDATE_TYPE_FULL;
8429 *out_type = update_type;
8433 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8435 struct drm_connector *connector;
8436 struct drm_connector_state *conn_state;
8437 struct amdgpu_dm_connector *aconnector = NULL;
8439 for_each_new_connector_in_state(state, connector, conn_state, i) {
8440 if (conn_state->crtc != crtc)
8443 aconnector = to_amdgpu_dm_connector(connector);
8444 if (!aconnector->port || !aconnector->mst_port)
8453 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8457 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8458 * @dev: The DRM device
8459 * @state: The atomic state to commit
8461 * Validate that the given atomic state is programmable by DC into hardware.
8462 * This involves constructing a &struct dc_state reflecting the new hardware
8463 * state we wish to commit, then querying DC to see if it is programmable. It's
8464 * important not to modify the existing DC state. Otherwise, atomic_check
8465 * may unexpectedly commit hardware changes.
8467 * When validating the DC state, it's important that the right locks are
8468 * acquired. For full updates case which removes/adds/updates streams on one
8469 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8470 * that any such full update commit will wait for completion of any outstanding
8471 * flip using DRMs synchronization events. See
8472 * dm_determine_update_type_for_commit()
8474 * Note that DM adds the affected connectors for all CRTCs in state, when that
8475 * might not seem necessary. This is because DC stream creation requires the
8476 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8477 * be possible but non-trivial - a possible TODO item.
8479 * Return: -Error code if validation failed.
8481 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8482 struct drm_atomic_state *state)
8484 struct amdgpu_device *adev = dev->dev_private;
8485 struct dm_atomic_state *dm_state = NULL;
8486 struct dc *dc = adev->dm.dc;
8487 struct drm_connector *connector;
8488 struct drm_connector_state *old_con_state, *new_con_state;
8489 struct drm_crtc *crtc;
8490 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8491 struct drm_plane *plane;
8492 struct drm_plane_state *old_plane_state, *new_plane_state;
8493 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8494 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8499 * This bool will be set for true for any modeset/reset
8500 * or plane update which implies non fast surface update.
8502 bool lock_and_validation_needed = false;
8504 ret = drm_atomic_helper_check_modeset(dev, state);
8508 if (adev->asic_type >= CHIP_NAVI10) {
8509 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8510 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8511 ret = add_affected_mst_dsc_crtcs(state, crtc);
8518 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8519 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8520 !new_crtc_state->color_mgmt_changed &&
8521 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8524 if (!new_crtc_state->enable)
8527 ret = drm_atomic_add_affected_connectors(state, crtc);
8531 ret = drm_atomic_add_affected_planes(state, crtc);
8537 * Add all primary and overlay planes on the CRTC to the state
8538 * whenever a plane is enabled to maintain correct z-ordering
8539 * and to enable fast surface updates.
8541 drm_for_each_crtc(crtc, dev) {
8542 bool modified = false;
8544 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8545 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8548 if (new_plane_state->crtc == crtc ||
8549 old_plane_state->crtc == crtc) {
8558 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8559 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8563 drm_atomic_get_plane_state(state, plane);
8565 if (IS_ERR(new_plane_state)) {
8566 ret = PTR_ERR(new_plane_state);
8572 /* Remove exiting planes if they are modified */
8573 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8574 ret = dm_update_plane_state(dc, state, plane,
8578 &lock_and_validation_needed);
8583 /* Disable all crtcs which require disable */
8584 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8585 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8589 &lock_and_validation_needed);
8594 /* Enable all crtcs which require enable */
8595 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8596 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8600 &lock_and_validation_needed);
8605 /* Add new/modified planes */
8606 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8607 ret = dm_update_plane_state(dc, state, plane,
8611 &lock_and_validation_needed);
8616 /* Run this here since we want to validate the streams we created */
8617 ret = drm_atomic_helper_check_planes(dev, state);
8621 if (state->legacy_cursor_update) {
8623 * This is a fast cursor update coming from the plane update
8624 * helper, check if it can be done asynchronously for better
8627 state->async_update =
8628 !drm_atomic_helper_async_check(dev, state);
8631 * Skip the remaining global validation if this is an async
8632 * update. Cursor updates can be done without affecting
8633 * state or bandwidth calcs and this avoids the performance
8634 * penalty of locking the private state object and
8635 * allocating a new dc_state.
8637 if (state->async_update)
8641 /* Check scaling and underscan changes*/
8642 /* TODO Removed scaling changes validation due to inability to commit
8643 * new stream into context w\o causing full reset. Need to
8644 * decide how to handle.
8646 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8647 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8648 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8649 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8651 /* Skip any modesets/resets */
8652 if (!acrtc || drm_atomic_crtc_needs_modeset(
8653 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8656 /* Skip any thing not scale or underscan changes */
8657 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8660 overall_update_type = UPDATE_TYPE_FULL;
8661 lock_and_validation_needed = true;
8664 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8668 if (overall_update_type < update_type)
8669 overall_update_type = update_type;
8672 * lock_and_validation_needed was an old way to determine if we need to set
8673 * the global lock. Leaving it in to check if we broke any corner cases
8674 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8675 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8677 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8678 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8680 if (overall_update_type > UPDATE_TYPE_FAST) {
8681 ret = dm_atomic_get_state(state, &dm_state);
8685 ret = do_aquire_global_lock(dev, state);
8689 #if defined(CONFIG_DRM_AMD_DC_DCN)
8690 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8693 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8699 * Perform validation of MST topology in the state:
8700 * We need to perform MST atomic check before calling
8701 * dc_validate_global_state(), or there is a chance
8702 * to get stuck in an infinite loop and hang eventually.
8704 ret = drm_dp_mst_atomic_check(state);
8708 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8714 * The commit is a fast update. Fast updates shouldn't change
8715 * the DC context, affect global validation, and can have their
8716 * commit work done in parallel with other commits not touching
8717 * the same resource. If we have a new DC context as part of
8718 * the DM atomic state from validation we need to free it and
8719 * retain the existing one instead.
8721 * Furthermore, since the DM atomic state only contains the DC
8722 * context and can safely be annulled, we can free the state
8723 * and clear the associated private object now to free
8724 * some memory and avoid a possible use-after-free later.
8727 for (i = 0; i < state->num_private_objs; i++) {
8728 struct drm_private_obj *obj = state->private_objs[i].ptr;
8730 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8731 int j = state->num_private_objs-1;
8733 dm_atomic_destroy_state(obj,
8734 state->private_objs[i].state);
8736 /* If i is not at the end of the array then the
8737 * last element needs to be moved to where i was
8738 * before the array can safely be truncated.
8741 state->private_objs[i] =
8742 state->private_objs[j];
8744 state->private_objs[j].ptr = NULL;
8745 state->private_objs[j].state = NULL;
8746 state->private_objs[j].old_state = NULL;
8747 state->private_objs[j].new_state = NULL;
8749 state->num_private_objs = j;
8755 /* Store the overall update type for use later in atomic check. */
8756 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8757 struct dm_crtc_state *dm_new_crtc_state =
8758 to_dm_crtc_state(new_crtc_state);
8760 dm_new_crtc_state->update_type = (int)overall_update_type;
8763 /* Must be success */
8768 if (ret == -EDEADLK)
8769 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8770 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8771 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8773 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8778 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8779 struct amdgpu_dm_connector *amdgpu_dm_connector)
8782 bool capable = false;
8784 if (amdgpu_dm_connector->dc_link &&
8785 dm_helpers_dp_read_dpcd(
8787 amdgpu_dm_connector->dc_link,
8788 DP_DOWN_STREAM_PORT_COUNT,
8790 sizeof(dpcd_data))) {
8791 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8796 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8800 bool edid_check_required;
8801 struct detailed_timing *timing;
8802 struct detailed_non_pixel *data;
8803 struct detailed_data_monitor_range *range;
8804 struct amdgpu_dm_connector *amdgpu_dm_connector =
8805 to_amdgpu_dm_connector(connector);
8806 struct dm_connector_state *dm_con_state = NULL;
8808 struct drm_device *dev = connector->dev;
8809 struct amdgpu_device *adev = dev->dev_private;
8810 bool freesync_capable = false;
8812 if (!connector->state) {
8813 DRM_ERROR("%s - Connector has no state", __func__);
8818 dm_con_state = to_dm_connector_state(connector->state);
8820 amdgpu_dm_connector->min_vfreq = 0;
8821 amdgpu_dm_connector->max_vfreq = 0;
8822 amdgpu_dm_connector->pixel_clock_mhz = 0;
8827 dm_con_state = to_dm_connector_state(connector->state);
8829 edid_check_required = false;
8830 if (!amdgpu_dm_connector->dc_sink) {
8831 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8834 if (!adev->dm.freesync_module)
8837 * if edid non zero restrict freesync only for dp and edp
8840 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8841 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8842 edid_check_required = is_dp_capable_without_timing_msa(
8844 amdgpu_dm_connector);
8847 if (edid_check_required == true && (edid->version > 1 ||
8848 (edid->version == 1 && edid->revision > 1))) {
8849 for (i = 0; i < 4; i++) {
8851 timing = &edid->detailed_timings[i];
8852 data = &timing->data.other_data;
8853 range = &data->data.range;
8855 * Check if monitor has continuous frequency mode
8857 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8860 * Check for flag range limits only. If flag == 1 then
8861 * no additional timing information provided.
8862 * Default GTF, GTF Secondary curve and CVT are not
8865 if (range->flags != 1)
8868 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8869 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8870 amdgpu_dm_connector->pixel_clock_mhz =
8871 range->pixel_clock_mhz * 10;
8875 if (amdgpu_dm_connector->max_vfreq -
8876 amdgpu_dm_connector->min_vfreq > 10) {
8878 freesync_capable = true;
8884 dm_con_state->freesync_capable = freesync_capable;
8886 if (connector->vrr_capable_property)
8887 drm_connector_set_vrr_capable_property(connector,
8891 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8893 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8895 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8897 if (link->type == dc_connection_none)
8899 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8900 dpcd_data, sizeof(dpcd_data))) {
8901 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8903 if (dpcd_data[0] == 0) {
8904 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8905 link->psr_settings.psr_feature_enabled = false;
8907 link->psr_settings.psr_version = DC_PSR_VERSION_1;
8908 link->psr_settings.psr_feature_enabled = true;
8911 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8916 * amdgpu_dm_link_setup_psr() - configure psr link
8917 * @stream: stream state
8919 * Return: true if success
8921 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8923 struct dc_link *link = NULL;
8924 struct psr_config psr_config = {0};
8925 struct psr_context psr_context = {0};
8931 link = stream->link;
8933 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8935 if (psr_config.psr_version > 0) {
8936 psr_config.psr_exit_link_training_required = 0x1;
8937 psr_config.psr_frame_capture_indication_req = 0;
8938 psr_config.psr_rfb_setup_time = 0x37;
8939 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8940 psr_config.allow_smu_optimizations = 0x0;
8942 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8945 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8951 * amdgpu_dm_psr_enable() - enable psr f/w
8952 * @stream: stream state
8954 * Return: true if success
8956 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8958 struct dc_link *link = stream->link;
8959 unsigned int vsync_rate_hz = 0;
8960 struct dc_static_screen_params params = {0};
8961 /* Calculate number of static frames before generating interrupt to
8964 // Init fail safe of 2 frames static
8965 unsigned int num_frames_static = 2;
8967 DRM_DEBUG_DRIVER("Enabling psr...\n");
8969 vsync_rate_hz = div64_u64(div64_u64((
8970 stream->timing.pix_clk_100hz * 100),
8971 stream->timing.v_total),
8972 stream->timing.h_total);
8975 * Calculate number of frames such that at least 30 ms of time has
8978 if (vsync_rate_hz != 0) {
8979 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8980 num_frames_static = (30000 / frame_time_microsec) + 1;
8983 params.triggers.cursor_update = true;
8984 params.triggers.overlay_update = true;
8985 params.triggers.surface_update = true;
8986 params.num_frames = num_frames_static;
8988 dc_stream_set_static_screen_params(link->ctx->dc,
8992 return dc_link_set_psr_allow_active(link, true, false);
8996 * amdgpu_dm_psr_disable() - disable psr f/w
8997 * @stream: stream state
8999 * Return: true if success
9001 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9004 DRM_DEBUG_DRIVER("Disabling psr...\n");
9006 return dc_link_set_psr_allow_active(stream->link, false, true);