2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
107 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
123 * The root control structure is &struct amdgpu_display_manager.
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 * initializes drm_device display related structures, based on the information
132 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133 * drm_encoder, drm_mode_config
135 * Returns 0 on success
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 struct drm_plane *plane,
143 unsigned long possible_crtcs,
144 const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 struct drm_plane *plane,
147 uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 struct amdgpu_dm_connector *amdgpu_dm_connector,
151 struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 struct amdgpu_encoder *aencoder,
154 uint32_t link_index);
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 struct drm_atomic_state *state,
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 struct drm_atomic_state *state);
167 static void handle_cursor_update(struct drm_plane *plane,
168 struct drm_plane_state *old_plane_state);
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
177 * dm_vblank_get_counter
180 * Get counter for number of vertical blanks
183 * struct amdgpu_device *adev - [in] desired amdgpu device
184 * int disp_idx - [in] which CRTC to get the counter from
187 * Counter for vertical blanks
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
191 if (crtc >= adev->mode_info.num_crtc)
194 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
199 if (acrtc_state->stream == NULL) {
200 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
205 return dc_stream_get_vblank_counter(acrtc_state->stream);
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 u32 *vbl, u32 *position)
212 uint32_t v_blank_start, v_blank_end, h_position, v_position;
214 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
217 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
221 if (acrtc_state->stream == NULL) {
222 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
228 * TODO rework base driver to use values directly.
229 * for now parse it back into reg-format
231 dc_stream_get_scanoutpos(acrtc_state->stream,
237 *position = v_position | (h_position << 16);
238 *vbl = v_blank_start | (v_blank_end << 16);
244 static bool dm_is_idle(void *handle)
250 static int dm_wait_for_idle(void *handle)
256 static bool dm_check_soft_reset(void *handle)
261 static int dm_soft_reset(void *handle)
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
271 struct drm_device *dev = adev_to_drm(adev);
272 struct drm_crtc *crtc;
273 struct amdgpu_crtc *amdgpu_crtc;
275 if (otg_inst == -1) {
277 return adev->mode_info.crtcs[0];
280 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 amdgpu_crtc = to_amdgpu_crtc(crtc);
283 if (amdgpu_crtc->otg_inst == otg_inst)
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
292 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
297 * dm_pflip_high_irq() - Handle pageflip interrupt
298 * @interrupt_params: ignored
300 * Handles the pageflip interrupt by notifying all interested parties
301 * that the pageflip has been completed.
303 static void dm_pflip_high_irq(void *interrupt_params)
305 struct amdgpu_crtc *amdgpu_crtc;
306 struct common_irq_params *irq_params = interrupt_params;
307 struct amdgpu_device *adev = irq_params->adev;
309 struct drm_pending_vblank_event *e;
310 struct dm_crtc_state *acrtc_state;
311 uint32_t vpos, hpos, v_blank_start, v_blank_end;
314 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
316 /* IRQ could occur when in initial stage */
317 /* TODO work and BO cleanup */
318 if (amdgpu_crtc == NULL) {
319 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
323 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
325 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 amdgpu_crtc->pflip_status,
328 AMDGPU_FLIP_SUBMITTED,
329 amdgpu_crtc->crtc_id,
331 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
335 /* page flip completed. */
336 e = amdgpu_crtc->event;
337 amdgpu_crtc->event = NULL;
342 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
345 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
347 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 &v_blank_end, &hpos, &vpos) ||
349 (vpos < v_blank_start)) {
350 /* Update to correct count and vblank timestamp if racing with
351 * vblank irq. This also updates to the correct vblank timestamp
352 * even in VRR mode, as scanout is past the front-porch atm.
354 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
356 /* Wake up userspace by sending the pageflip event with proper
357 * count and timestamp of vblank of flip completion.
360 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
362 /* Event sent, so done with vblank for this flip */
363 drm_crtc_vblank_put(&amdgpu_crtc->base);
366 /* VRR active and inside front-porch: vblank count and
367 * timestamp for pageflip event will only be up to date after
368 * drm_crtc_handle_vblank() has been executed from late vblank
369 * irq handler after start of back-porch (vline 0). We queue the
370 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 * updated timestamp and count, once it runs after us.
373 * We need to open-code this instead of using the helper
374 * drm_crtc_arm_vblank_event(), as that helper would
375 * call drm_crtc_accurate_vblank_count(), which we must
376 * not call in VRR mode while we are in front-porch!
379 /* sequence will be replaced by real count during send-out. */
380 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 e->pipe = amdgpu_crtc->crtc_id;
383 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
387 /* Keep track of vblank of this flip for flip throttling. We use the
388 * cooked hw counter, as that one incremented at start of this vblank
389 * of pageflip completion, so last_flip_vblank is the forbidden count
390 * for queueing new pageflips if vsync + VRR is enabled.
392 amdgpu_crtc->last_flip_vblank =
393 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
395 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
398 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 vrr_active, (int) !e);
403 static void dm_vupdate_high_irq(void *interrupt_params)
405 struct common_irq_params *irq_params = interrupt_params;
406 struct amdgpu_device *adev = irq_params->adev;
407 struct amdgpu_crtc *acrtc;
408 struct dm_crtc_state *acrtc_state;
411 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
414 acrtc_state = to_dm_crtc_state(acrtc->base.state);
416 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
418 amdgpu_dm_vrr_active(acrtc_state));
420 /* Core vblank handling is done here after end of front-porch in
421 * vrr mode, as vblank timestamping will give valid results
422 * while now done after front-porch. This will also deliver
423 * page-flip completion events that have been queued to us
424 * if a pageflip happened inside front-porch.
426 if (amdgpu_dm_vrr_active(acrtc_state)) {
427 drm_crtc_handle_vblank(&acrtc->base);
429 /* BTR processing for pre-DCE12 ASICs */
430 if (acrtc_state->stream &&
431 adev->family < AMDGPU_FAMILY_AI) {
432 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
433 mod_freesync_handle_v_update(
434 adev->dm.freesync_module,
436 &acrtc_state->vrr_params);
438 dc_stream_adjust_vmin_vmax(
441 &acrtc_state->vrr_params.adjust);
442 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
449 * dm_crtc_high_irq() - Handles CRTC interrupt
450 * @interrupt_params: used for determining the CRTC instance
452 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
455 static void dm_crtc_high_irq(void *interrupt_params)
457 struct common_irq_params *irq_params = interrupt_params;
458 struct amdgpu_device *adev = irq_params->adev;
459 struct amdgpu_crtc *acrtc;
460 struct dm_crtc_state *acrtc_state;
463 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
467 acrtc_state = to_dm_crtc_state(acrtc->base.state);
469 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 amdgpu_dm_vrr_active(acrtc_state),
471 acrtc_state->active_planes);
474 * Core vblank handling at start of front-porch is only possible
475 * in non-vrr mode, as only there vblank timestamping will give
476 * valid results while done in front-porch. Otherwise defer it
477 * to dm_vupdate_high_irq after end of front-porch.
479 if (!amdgpu_dm_vrr_active(acrtc_state))
480 drm_crtc_handle_vblank(&acrtc->base);
483 * Following stuff must happen at start of vblank, for crc
484 * computation and below-the-range btr support in vrr mode.
486 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
488 /* BTR updates need to happen before VUPDATE on Vega and above. */
489 if (adev->family < AMDGPU_FAMILY_AI)
492 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
494 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 mod_freesync_handle_v_update(adev->dm.freesync_module,
498 &acrtc_state->vrr_params);
500 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 &acrtc_state->vrr_params.adjust);
505 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 * In that case, pageflip completion interrupts won't fire and pageflip
507 * completion events won't get delivered. Prevent this by sending
508 * pending pageflip events from here if a flip is still pending.
510 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 * avoid race conditions between flip programming and completion,
512 * which could cause too early flip completion events.
514 if (adev->family >= AMDGPU_FAMILY_RV &&
515 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 acrtc_state->active_planes == 0) {
518 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
520 drm_crtc_vblank_put(&acrtc->base);
522 acrtc->pflip_status = AMDGPU_FLIP_NONE;
525 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
528 static int dm_set_clockgating_state(void *handle,
529 enum amd_clockgating_state state)
534 static int dm_set_powergating_state(void *handle,
535 enum amd_powergating_state state)
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
543 /* Allocate memory for FBC compressed data */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
546 struct drm_device *dev = connector->dev;
547 struct amdgpu_device *adev = drm_to_adev(dev);
548 struct dm_comressor_info *compressor = &adev->dm.compressor;
549 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 struct drm_display_mode *mode;
551 unsigned long max_size = 0;
553 if (adev->dm.dc->fbc_compressor == NULL)
556 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
559 if (compressor->bo_ptr)
563 list_for_each_entry(mode, &connector->modes, head) {
564 if (max_size < mode->htotal * mode->vtotal)
565 max_size = mode->htotal * mode->vtotal;
569 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 &compressor->gpu_addr, &compressor->cpu_addr);
574 DRM_ERROR("DM: Failed to initialize FBC\n");
576 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 int pipe, bool *enabled,
586 unsigned char *buf, int max_bytes)
588 struct drm_device *dev = dev_get_drvdata(kdev);
589 struct amdgpu_device *adev = drm_to_adev(dev);
590 struct drm_connector *connector;
591 struct drm_connector_list_iter conn_iter;
592 struct amdgpu_dm_connector *aconnector;
597 mutex_lock(&adev->dm.audio_lock);
599 drm_connector_list_iter_begin(dev, &conn_iter);
600 drm_for_each_connector_iter(connector, &conn_iter) {
601 aconnector = to_amdgpu_dm_connector(connector);
602 if (aconnector->audio_inst != port)
606 ret = drm_eld_size(connector->eld);
607 memcpy(buf, connector->eld, min(max_bytes, ret));
611 drm_connector_list_iter_end(&conn_iter);
613 mutex_unlock(&adev->dm.audio_lock);
615 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 .get_eld = amdgpu_dm_audio_component_get_eld,
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 struct device *hda_kdev, void *data)
627 struct drm_device *dev = dev_get_drvdata(kdev);
628 struct amdgpu_device *adev = drm_to_adev(dev);
629 struct drm_audio_component *acomp = data;
631 acomp->ops = &amdgpu_dm_audio_component_ops;
633 adev->dm.audio_component = acomp;
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 struct device *hda_kdev, void *data)
641 struct drm_device *dev = dev_get_drvdata(kdev);
642 struct amdgpu_device *adev = drm_to_adev(dev);
643 struct drm_audio_component *acomp = data;
647 adev->dm.audio_component = NULL;
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 .bind = amdgpu_dm_audio_component_bind,
652 .unbind = amdgpu_dm_audio_component_unbind,
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
662 adev->mode_info.audio.enabled = true;
664 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
666 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 adev->mode_info.audio.pin[i].channels = -1;
668 adev->mode_info.audio.pin[i].rate = -1;
669 adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 adev->mode_info.audio.pin[i].status_bits = 0;
671 adev->mode_info.audio.pin[i].category_code = 0;
672 adev->mode_info.audio.pin[i].connected = false;
673 adev->mode_info.audio.pin[i].id =
674 adev->dm.dc->res_pool->audios[i]->inst;
675 adev->mode_info.audio.pin[i].offset = 0;
678 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
682 adev->dm.audio_registered = true;
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
692 if (!adev->mode_info.audio.enabled)
695 if (adev->dm.audio_registered) {
696 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 adev->dm.audio_registered = false;
700 /* TODO: Disable audio? */
702 adev->mode_info.audio.enabled = false;
705 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
707 struct drm_audio_component *acomp = adev->dm.audio_component;
709 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
712 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
719 const struct dmcub_firmware_header_v1_0 *hdr;
720 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 struct abm *abm = adev->dm.dc->res_pool->abm;
725 struct dmub_srv_hw_params hw_params;
726 enum dmub_status status;
727 const unsigned char *fw_inst_const, *fw_bss_data;
728 uint32_t i, fw_inst_const_size, fw_bss_data_size;
732 /* DMUB isn't supported on the ASIC. */
736 DRM_ERROR("No framebuffer info for DMUB service.\n");
741 /* Firmware required for DMUB support. */
742 DRM_ERROR("No firmware provided for DMUB.\n");
746 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 if (status != DMUB_STATUS_OK) {
748 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
752 if (!has_hw_support) {
753 DRM_INFO("DMUB unsupported on ASIC\n");
757 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
759 fw_inst_const = dmub_fw->data +
760 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 fw_bss_data = dmub_fw->data +
764 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 le32_to_cpu(hdr->inst_const_bytes);
767 /* Copy firmware and bios info into FB memory. */
768 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
771 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
773 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 * amdgpu_ucode_init_single_fw will load dmub firmware
775 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 * will be done by dm_dmub_hw_init
778 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
783 if (fw_bss_data_size)
784 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 fw_bss_data, fw_bss_data_size);
787 /* Copy firmware bios info into FB memory. */
788 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
791 /* Reset regions that need to be reset. */
792 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
795 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
798 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
801 /* Initialize hardware. */
802 memset(&hw_params, 0, sizeof(hw_params));
803 hw_params.fb_base = adev->gmc.fb_start;
804 hw_params.fb_offset = adev->gmc.aper_base;
806 /* backdoor load firmware and trigger dmub running */
807 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 hw_params.load_inst_const = true;
811 hw_params.psp_version = dmcu->psp_version;
813 for (i = 0; i < fb_info->num_fb; ++i)
814 hw_params.fb[i] = &fb_info->fb[i];
816 status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 if (status != DMUB_STATUS_OK) {
818 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
822 /* Wait for firmware load to finish. */
823 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 if (status != DMUB_STATUS_OK)
825 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
827 /* Init DMCU and ABM if available. */
829 dmcu->funcs->dmcu_init(dmcu);
830 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
833 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 if (!adev->dm.dc->ctx->dmub_srv) {
835 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
839 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 adev->dm.dmcub_fw_version);
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
847 struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 struct dc_callback_init init_params;
853 adev->dm.ddev = adev_to_drm(adev);
854 adev->dm.adev = adev;
856 /* Zero all the fields */
857 memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 memset(&init_params, 0, sizeof(init_params));
862 mutex_init(&adev->dm.dc_lock);
863 mutex_init(&adev->dm.audio_lock);
865 if(amdgpu_dm_irq_init(adev)) {
866 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
870 init_data.asic_id.chip_family = adev->family;
872 init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
875 init_data.asic_id.vram_width = adev->gmc.vram_width;
876 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 init_data.asic_id.atombios_base_address =
878 adev->mode_info.atom_context->bios;
880 init_data.driver = adev;
882 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
884 if (!adev->dm.cgs_device) {
885 DRM_ERROR("amdgpu: failed to create cgs device.\n");
889 init_data.cgs_device = adev->dm.cgs_device;
891 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
893 switch (adev->asic_type) {
898 init_data.flags.gpu_vm_support = true;
904 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 init_data.flags.fbc_support = true;
907 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 init_data.flags.multi_mon_pp_mclk_switch = true;
910 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 init_data.flags.disable_fractional_pwm = true;
913 init_data.flags.power_down_display_on_boot = true;
915 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
917 /* Display Core create. */
918 adev->dm.dc = dc_create(&init_data);
921 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
923 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
927 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
932 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
935 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 adev->dm.dc->debug.disable_stutter = true;
938 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 adev->dm.dc->debug.disable_dsc = true;
941 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 adev->dm.dc->debug.disable_clock_gate = true;
944 r = dm_dmub_hw_init(adev);
946 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
950 dc_hardware_init(adev->dm.dc);
952 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 if (!adev->dm.freesync_module) {
955 "amdgpu: failed to initialize freesync_module.\n");
957 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 adev->dm.freesync_module);
960 amdgpu_dm_init_color_mod();
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 if (adev->asic_type >= CHIP_RAVEN) {
964 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
966 if (!adev->dm.hdcp_workqueue)
967 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
969 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
971 dc_init_callbacks(adev->dm.dc, &init_params);
974 if (amdgpu_dm_initialize_drm_device(adev)) {
976 "amdgpu: failed to initialize sw for display support.\n");
980 /* Update the actual used number of crtc */
981 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
983 /* create fake encoders for MST */
984 dm_dp_create_fake_mst_encoders(adev);
986 /* TODO: Add_display_info? */
988 /* TODO use dynamic cursor width */
989 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
992 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
994 "amdgpu: failed to initialize sw for display support.\n");
998 DRM_DEBUG_DRIVER("KMS initialized.\n");
1002 amdgpu_dm_fini(adev);
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1011 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1015 amdgpu_dm_audio_fini(adev);
1017 amdgpu_dm_destroy_drm_device(&adev->dm);
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 if (adev->dm.hdcp_workqueue) {
1021 hdcp_destroy(adev->dm.hdcp_workqueue);
1022 adev->dm.hdcp_workqueue = NULL;
1026 dc_deinit_callbacks(adev->dm.dc);
1028 if (adev->dm.dc->ctx->dmub_srv) {
1029 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 adev->dm.dc->ctx->dmub_srv = NULL;
1033 if (adev->dm.dmub_bo)
1034 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 &adev->dm.dmub_bo_gpu_addr,
1036 &adev->dm.dmub_bo_cpu_addr);
1038 /* DC Destroy TODO: Replace destroy DAL */
1040 dc_destroy(&adev->dm.dc);
1042 * TODO: pageflip, vlank interrupt
1044 * amdgpu_dm_irq_fini(adev);
1047 if (adev->dm.cgs_device) {
1048 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 adev->dm.cgs_device = NULL;
1051 if (adev->dm.freesync_module) {
1052 mod_freesync_destroy(adev->dm.freesync_module);
1053 adev->dm.freesync_module = NULL;
1056 mutex_destroy(&adev->dm.audio_lock);
1057 mutex_destroy(&adev->dm.dc_lock);
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1064 const char *fw_name_dmcu = NULL;
1066 const struct dmcu_firmware_header_v1_0 *hdr;
1068 switch(adev->asic_type) {
1069 #if defined(CONFIG_DRM_AMD_DC_SI)
1084 case CHIP_POLARIS11:
1085 case CHIP_POLARIS10:
1086 case CHIP_POLARIS12:
1094 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095 case CHIP_SIENNA_CICHLID:
1096 case CHIP_NAVY_FLOUNDER:
1100 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1103 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1104 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1106 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1111 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1115 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1116 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1120 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1122 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124 adev->dm.fw_dmcu = NULL;
1128 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1133 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1135 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1137 release_firmware(adev->dm.fw_dmcu);
1138 adev->dm.fw_dmcu = NULL;
1142 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1143 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1144 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1145 adev->firmware.fw_size +=
1146 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1148 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1149 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1150 adev->firmware.fw_size +=
1151 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1153 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1155 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1160 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1162 struct amdgpu_device *adev = ctx;
1164 return dm_read_reg(adev->dm.dc->ctx, address);
1167 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1170 struct amdgpu_device *adev = ctx;
1172 return dm_write_reg(adev->dm.dc->ctx, address, value);
1175 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1177 struct dmub_srv_create_params create_params;
1178 struct dmub_srv_region_params region_params;
1179 struct dmub_srv_region_info region_info;
1180 struct dmub_srv_fb_params fb_params;
1181 struct dmub_srv_fb_info *fb_info;
1182 struct dmub_srv *dmub_srv;
1183 const struct dmcub_firmware_header_v1_0 *hdr;
1184 const char *fw_name_dmub;
1185 enum dmub_asic dmub_asic;
1186 enum dmub_status status;
1189 switch (adev->asic_type) {
1191 dmub_asic = DMUB_ASIC_DCN21;
1192 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1194 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195 case CHIP_SIENNA_CICHLID:
1196 dmub_asic = DMUB_ASIC_DCN30;
1197 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1199 case CHIP_NAVY_FLOUNDER:
1200 dmub_asic = DMUB_ASIC_DCN30;
1201 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1206 /* ASIC doesn't support DMUB. */
1210 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1212 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1216 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1218 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1222 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1224 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1226 AMDGPU_UCODE_ID_DMCUB;
1227 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1229 adev->firmware.fw_size +=
1230 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1232 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233 adev->dm.dmcub_fw_version);
1236 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1238 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1239 dmub_srv = adev->dm.dmub_srv;
1242 DRM_ERROR("Failed to allocate DMUB service!\n");
1246 memset(&create_params, 0, sizeof(create_params));
1247 create_params.user_ctx = adev;
1248 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1249 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1250 create_params.asic = dmub_asic;
1252 /* Create the DMUB service. */
1253 status = dmub_srv_create(dmub_srv, &create_params);
1254 if (status != DMUB_STATUS_OK) {
1255 DRM_ERROR("Error creating DMUB service: %d\n", status);
1259 /* Calculate the size of all the regions for the DMUB service. */
1260 memset(®ion_params, 0, sizeof(region_params));
1262 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1263 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1264 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1265 region_params.vbios_size = adev->bios_size;
1266 region_params.fw_bss_data = region_params.bss_data_size ?
1267 adev->dm.dmub_fw->data +
1268 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1269 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1270 region_params.fw_inst_const =
1271 adev->dm.dmub_fw->data +
1272 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1275 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1278 if (status != DMUB_STATUS_OK) {
1279 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1284 * Allocate a framebuffer based on the total size of all the regions.
1285 * TODO: Move this into GART.
1287 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1288 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1289 &adev->dm.dmub_bo_gpu_addr,
1290 &adev->dm.dmub_bo_cpu_addr);
1294 /* Rebase the regions on the framebuffer address. */
1295 memset(&fb_params, 0, sizeof(fb_params));
1296 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1297 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1298 fb_params.region_info = ®ion_info;
1300 adev->dm.dmub_fb_info =
1301 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1302 fb_info = adev->dm.dmub_fb_info;
1306 "Failed to allocate framebuffer info for DMUB service!\n");
1310 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1311 if (status != DMUB_STATUS_OK) {
1312 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1319 static int dm_sw_init(void *handle)
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1324 r = dm_dmub_sw_init(adev);
1328 return load_dmcu_fw(adev);
1331 static int dm_sw_fini(void *handle)
1333 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1335 kfree(adev->dm.dmub_fb_info);
1336 adev->dm.dmub_fb_info = NULL;
1338 if (adev->dm.dmub_srv) {
1339 dmub_srv_destroy(adev->dm.dmub_srv);
1340 adev->dm.dmub_srv = NULL;
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
1346 release_firmware(adev->dm.fw_dmcu);
1347 adev->dm.fw_dmcu = NULL;
1352 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1354 struct amdgpu_dm_connector *aconnector;
1355 struct drm_connector *connector;
1356 struct drm_connector_list_iter iter;
1359 drm_connector_list_iter_begin(dev, &iter);
1360 drm_for_each_connector_iter(connector, &iter) {
1361 aconnector = to_amdgpu_dm_connector(connector);
1362 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1363 aconnector->mst_mgr.aux) {
1364 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1366 aconnector->base.base.id);
1368 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1370 DRM_ERROR("DM_MST: Failed to start MST\n");
1371 aconnector->dc_link->type =
1372 dc_connection_single;
1377 drm_connector_list_iter_end(&iter);
1382 static int dm_late_init(void *handle)
1384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1386 struct dmcu_iram_parameters params;
1387 unsigned int linear_lut[16];
1389 struct dmcu *dmcu = NULL;
1392 if (!adev->dm.fw_dmcu)
1393 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1395 dmcu = adev->dm.dc->res_pool->dmcu;
1397 for (i = 0; i < 16; i++)
1398 linear_lut[i] = 0xFFFF * i / 15;
1401 params.backlight_ramping_start = 0xCCCC;
1402 params.backlight_ramping_reduction = 0xCCCCCCCC;
1403 params.backlight_lut_array_size = 16;
1404 params.backlight_lut_array = linear_lut;
1406 /* Min backlight level after ABM reduction, Don't allow below 1%
1407 * 0xFFFF x 0.01 = 0x28F
1409 params.min_abm_backlight = 0x28F;
1411 /* In the case where abm is implemented on dmcub,
1412 * dmcu object will be null.
1413 * ABM 2.4 and up are implemented on dmcub.
1416 ret = dmcu_load_iram(dmcu, params);
1417 else if (adev->dm.dc->ctx->dmub_srv)
1418 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1423 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1426 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1428 struct amdgpu_dm_connector *aconnector;
1429 struct drm_connector *connector;
1430 struct drm_connector_list_iter iter;
1431 struct drm_dp_mst_topology_mgr *mgr;
1433 bool need_hotplug = false;
1435 drm_connector_list_iter_begin(dev, &iter);
1436 drm_for_each_connector_iter(connector, &iter) {
1437 aconnector = to_amdgpu_dm_connector(connector);
1438 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1439 aconnector->mst_port)
1442 mgr = &aconnector->mst_mgr;
1445 drm_dp_mst_topology_mgr_suspend(mgr);
1447 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1449 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1450 need_hotplug = true;
1454 drm_connector_list_iter_end(&iter);
1457 drm_kms_helper_hotplug_event(dev);
1460 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1462 struct smu_context *smu = &adev->smu;
1465 if (!is_support_sw_smu(adev))
1468 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469 * on window driver dc implementation.
1470 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471 * should be passed to smu during boot up and resume from s3.
1472 * boot up: dc calculate dcn watermark clock settings within dc_create,
1473 * dcn20_resource_construct
1474 * then call pplib functions below to pass the settings to smu:
1475 * smu_set_watermarks_for_clock_ranges
1476 * smu_set_watermarks_table
1477 * navi10_set_watermarks_table
1478 * smu_write_watermarks_table
1480 * For Renoir, clock settings of dcn watermark are also fixed values.
1481 * dc has implemented different flow for window driver:
1482 * dc_hardware_init / dc_set_power_state
1487 * smu_set_watermarks_for_clock_ranges
1488 * renoir_set_watermarks_table
1489 * smu_write_watermarks_table
1492 * dc_hardware_init -> amdgpu_dm_init
1493 * dc_set_power_state --> dm_resume
1495 * therefore, this function apply to navi10/12/14 but not Renoir
1498 switch(adev->asic_type) {
1507 ret = smu_write_watermarks_table(smu);
1509 DRM_ERROR("Failed to update WMTABLE!\n");
1517 * dm_hw_init() - Initialize DC device
1518 * @handle: The base driver device containing the amdgpu_dm device.
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1527 * Some notable things that are initialized here:
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1534 * - Debug FS entries, if enabled
1536 static int dm_hw_init(void *handle)
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev);
1541 amdgpu_dm_hpd_init(adev);
1547 * dm_hw_fini() - Teardown DC device
1548 * @handle: The base driver device containing the amdgpu_dm device.
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1554 static int dm_hw_fini(void *handle)
1556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1558 amdgpu_dm_hpd_fini(adev);
1560 amdgpu_dm_irq_fini(adev);
1561 amdgpu_dm_fini(adev);
1566 static int dm_enable_vblank(struct drm_crtc *crtc);
1567 static void dm_disable_vblank(struct drm_crtc *crtc);
1569 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1570 struct dc_state *state, bool enable)
1572 enum dc_irq_source irq_source;
1573 struct amdgpu_crtc *acrtc;
1577 for (i = 0; i < state->stream_count; i++) {
1578 acrtc = get_crtc_by_otg_inst(
1579 adev, state->stream_status[i].primary_otg_inst);
1581 if (acrtc && state->stream_status[i].plane_count != 0) {
1582 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1583 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1584 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585 acrtc->crtc_id, enable ? "en" : "dis", rc);
1587 DRM_WARN("Failed to %s pflip interrupts\n",
1588 enable ? "enable" : "disable");
1591 rc = dm_enable_vblank(&acrtc->base);
1593 DRM_WARN("Failed to enable vblank interrupts\n");
1595 dm_disable_vblank(&acrtc->base);
1603 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1605 struct dc_state *context = NULL;
1606 enum dc_status res = DC_ERROR_UNEXPECTED;
1608 struct dc_stream_state *del_streams[MAX_PIPES];
1609 int del_streams_count = 0;
1611 memset(del_streams, 0, sizeof(del_streams));
1613 context = dc_create_state(dc);
1614 if (context == NULL)
1615 goto context_alloc_fail;
1617 dc_resource_state_copy_construct_current(dc, context);
1619 /* First remove from context all streams */
1620 for (i = 0; i < context->stream_count; i++) {
1621 struct dc_stream_state *stream = context->streams[i];
1623 del_streams[del_streams_count++] = stream;
1626 /* Remove all planes for removed streams and then remove the streams */
1627 for (i = 0; i < del_streams_count; i++) {
1628 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1629 res = DC_FAIL_DETACH_SURFACES;
1633 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1639 res = dc_validate_global_state(dc, context, false);
1642 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1646 res = dc_commit_state(dc, context);
1649 dc_release_state(context);
1655 static int dm_suspend(void *handle)
1657 struct amdgpu_device *adev = handle;
1658 struct amdgpu_display_manager *dm = &adev->dm;
1661 if (amdgpu_in_reset(adev)) {
1662 mutex_lock(&dm->dc_lock);
1663 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1665 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1667 amdgpu_dm_commit_zero_streams(dm->dc);
1669 amdgpu_dm_irq_suspend(adev);
1674 WARN_ON(adev->dm.cached_state);
1675 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1677 s3_handle_mst(adev_to_drm(adev), true);
1679 amdgpu_dm_irq_suspend(adev);
1682 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1687 static struct amdgpu_dm_connector *
1688 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1689 struct drm_crtc *crtc)
1692 struct drm_connector_state *new_con_state;
1693 struct drm_connector *connector;
1694 struct drm_crtc *crtc_from_state;
1696 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1697 crtc_from_state = new_con_state->crtc;
1699 if (crtc_from_state == crtc)
1700 return to_amdgpu_dm_connector(connector);
1706 static void emulated_link_detect(struct dc_link *link)
1708 struct dc_sink_init_data sink_init_data = { 0 };
1709 struct display_sink_capability sink_caps = { 0 };
1710 enum dc_edid_status edid_status;
1711 struct dc_context *dc_ctx = link->ctx;
1712 struct dc_sink *sink = NULL;
1713 struct dc_sink *prev_sink = NULL;
1715 link->type = dc_connection_none;
1716 prev_sink = link->local_sink;
1718 if (prev_sink != NULL)
1719 dc_sink_retain(prev_sink);
1721 switch (link->connector_signal) {
1722 case SIGNAL_TYPE_HDMI_TYPE_A: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1728 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1729 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1734 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1735 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1740 case SIGNAL_TYPE_LVDS: {
1741 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1742 sink_caps.signal = SIGNAL_TYPE_LVDS;
1746 case SIGNAL_TYPE_EDP: {
1747 sink_caps.transaction_type =
1748 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749 sink_caps.signal = SIGNAL_TYPE_EDP;
1753 case SIGNAL_TYPE_DISPLAY_PORT: {
1754 sink_caps.transaction_type =
1755 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1756 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1761 DC_ERROR("Invalid connector type! signal:%d\n",
1762 link->connector_signal);
1766 sink_init_data.link = link;
1767 sink_init_data.sink_signal = sink_caps.signal;
1769 sink = dc_sink_create(&sink_init_data);
1771 DC_ERROR("Failed to create sink!\n");
1775 /* dc_sink_create returns a new reference */
1776 link->local_sink = sink;
1778 edid_status = dm_helpers_read_local_edid(
1783 if (edid_status != EDID_OK)
1784 DC_ERROR("Failed to read EDID");
1788 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1789 struct amdgpu_display_manager *dm)
1792 struct dc_surface_update surface_updates[MAX_SURFACES];
1793 struct dc_plane_info plane_infos[MAX_SURFACES];
1794 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1795 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1796 struct dc_stream_update stream_update;
1800 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1803 dm_error("Failed to allocate update bundle\n");
1807 for (k = 0; k < dc_state->stream_count; k++) {
1808 bundle->stream_update.stream = dc_state->streams[k];
1810 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1811 bundle->surface_updates[m].surface =
1812 dc_state->stream_status->plane_states[m];
1813 bundle->surface_updates[m].surface->force_full_update =
1816 dc_commit_updates_for_stream(
1817 dm->dc, bundle->surface_updates,
1818 dc_state->stream_status->plane_count,
1819 dc_state->streams[k], &bundle->stream_update, dc_state);
1828 static int dm_resume(void *handle)
1830 struct amdgpu_device *adev = handle;
1831 struct drm_device *ddev = adev_to_drm(adev);
1832 struct amdgpu_display_manager *dm = &adev->dm;
1833 struct amdgpu_dm_connector *aconnector;
1834 struct drm_connector *connector;
1835 struct drm_connector_list_iter iter;
1836 struct drm_crtc *crtc;
1837 struct drm_crtc_state *new_crtc_state;
1838 struct dm_crtc_state *dm_new_crtc_state;
1839 struct drm_plane *plane;
1840 struct drm_plane_state *new_plane_state;
1841 struct dm_plane_state *dm_new_plane_state;
1842 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1843 enum dc_connection_type new_connection_type = dc_connection_none;
1844 struct dc_state *dc_state;
1847 if (amdgpu_in_reset(adev)) {
1848 dc_state = dm->cached_dc_state;
1850 r = dm_dmub_hw_init(adev);
1852 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1854 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1857 amdgpu_dm_irq_resume_early(adev);
1859 for (i = 0; i < dc_state->stream_count; i++) {
1860 dc_state->streams[i]->mode_changed = true;
1861 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1862 dc_state->stream_status->plane_states[j]->update_flags.raw
1867 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1869 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1871 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1873 dc_release_state(dm->cached_dc_state);
1874 dm->cached_dc_state = NULL;
1876 amdgpu_dm_irq_resume_late(adev);
1878 mutex_unlock(&dm->dc_lock);
1882 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883 dc_release_state(dm_state->context);
1884 dm_state->context = dc_create_state(dm->dc);
1885 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886 dc_resource_state_construct(dm->dc, dm_state->context);
1888 /* Before powering on DC we need to re-initialize DMUB. */
1889 r = dm_dmub_hw_init(adev);
1891 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1893 /* power on hardware */
1894 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1896 /* program HPD filter */
1900 * early enable HPD Rx IRQ, should be done before set mode as short
1901 * pulse interrupts are used for MST
1903 amdgpu_dm_irq_resume_early(adev);
1905 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1906 s3_handle_mst(ddev, false);
1909 drm_connector_list_iter_begin(ddev, &iter);
1910 drm_for_each_connector_iter(connector, &iter) {
1911 aconnector = to_amdgpu_dm_connector(connector);
1914 * this is the case when traversing through already created
1915 * MST connectors, should be skipped
1917 if (aconnector->mst_port)
1920 mutex_lock(&aconnector->hpd_lock);
1921 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1922 DRM_ERROR("KMS: Failed to detect connector\n");
1924 if (aconnector->base.force && new_connection_type == dc_connection_none)
1925 emulated_link_detect(aconnector->dc_link);
1927 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1929 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1930 aconnector->fake_enable = false;
1932 if (aconnector->dc_sink)
1933 dc_sink_release(aconnector->dc_sink);
1934 aconnector->dc_sink = NULL;
1935 amdgpu_dm_update_connector_after_detect(aconnector);
1936 mutex_unlock(&aconnector->hpd_lock);
1938 drm_connector_list_iter_end(&iter);
1940 /* Force mode set in atomic commit */
1941 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1942 new_crtc_state->active_changed = true;
1945 * atomic_check is expected to create the dc states. We need to release
1946 * them here, since they were duplicated as part of the suspend
1949 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1950 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1951 if (dm_new_crtc_state->stream) {
1952 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1953 dc_stream_release(dm_new_crtc_state->stream);
1954 dm_new_crtc_state->stream = NULL;
1958 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1959 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1960 if (dm_new_plane_state->dc_state) {
1961 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1962 dc_plane_state_release(dm_new_plane_state->dc_state);
1963 dm_new_plane_state->dc_state = NULL;
1967 drm_atomic_helper_resume(ddev, dm->cached_state);
1969 dm->cached_state = NULL;
1971 amdgpu_dm_irq_resume_late(adev);
1973 amdgpu_dm_smu_write_watermarks_table(adev);
1981 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983 * the base driver's device list to be initialized and torn down accordingly.
1985 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1988 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1990 .early_init = dm_early_init,
1991 .late_init = dm_late_init,
1992 .sw_init = dm_sw_init,
1993 .sw_fini = dm_sw_fini,
1994 .hw_init = dm_hw_init,
1995 .hw_fini = dm_hw_fini,
1996 .suspend = dm_suspend,
1997 .resume = dm_resume,
1998 .is_idle = dm_is_idle,
1999 .wait_for_idle = dm_wait_for_idle,
2000 .check_soft_reset = dm_check_soft_reset,
2001 .soft_reset = dm_soft_reset,
2002 .set_clockgating_state = dm_set_clockgating_state,
2003 .set_powergating_state = dm_set_powergating_state,
2006 const struct amdgpu_ip_block_version dm_ip_block =
2008 .type = AMD_IP_BLOCK_TYPE_DCE,
2012 .funcs = &amdgpu_dm_funcs,
2022 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2023 .fb_create = amdgpu_display_user_framebuffer_create,
2024 .output_poll_changed = drm_fb_helper_output_poll_changed,
2025 .atomic_check = amdgpu_dm_atomic_check,
2026 .atomic_commit = amdgpu_dm_atomic_commit,
2029 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2030 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2033 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2035 u32 max_cll, min_cll, max, min, q, r;
2036 struct amdgpu_dm_backlight_caps *caps;
2037 struct amdgpu_display_manager *dm;
2038 struct drm_connector *conn_base;
2039 struct amdgpu_device *adev;
2040 struct dc_link *link = NULL;
2041 static const u8 pre_computed_values[] = {
2042 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2045 if (!aconnector || !aconnector->dc_link)
2048 link = aconnector->dc_link;
2049 if (link->connector_signal != SIGNAL_TYPE_EDP)
2052 conn_base = &aconnector->base;
2053 adev = drm_to_adev(conn_base->dev);
2055 caps = &dm->backlight_caps;
2056 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2057 caps->aux_support = false;
2058 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2059 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2061 if (caps->ext_caps->bits.oled == 1 ||
2062 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2063 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2064 caps->aux_support = true;
2066 /* From the specification (CTA-861-G), for calculating the maximum
2067 * luminance we need to use:
2068 * Luminance = 50*2**(CV/32)
2069 * Where CV is a one-byte value.
2070 * For calculating this expression we may need float point precision;
2071 * to avoid this complexity level, we take advantage that CV is divided
2072 * by a constant. From the Euclids division algorithm, we know that CV
2073 * can be written as: CV = 32*q + r. Next, we replace CV in the
2074 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075 * need to pre-compute the value of r/32. For pre-computing the values
2076 * We just used the following Ruby line:
2077 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078 * The results of the above expressions can be verified at
2079 * pre_computed_values.
2083 max = (1 << q) * pre_computed_values[r];
2085 // min luminance: maxLum * (CV/255)^2 / 100
2086 q = DIV_ROUND_CLOSEST(min_cll, 255);
2087 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2089 caps->aux_max_input_signal = max;
2090 caps->aux_min_input_signal = min;
2093 void amdgpu_dm_update_connector_after_detect(
2094 struct amdgpu_dm_connector *aconnector)
2096 struct drm_connector *connector = &aconnector->base;
2097 struct drm_device *dev = connector->dev;
2098 struct dc_sink *sink;
2100 /* MST handled by drm_mst framework */
2101 if (aconnector->mst_mgr.mst_state == true)
2105 sink = aconnector->dc_link->local_sink;
2107 dc_sink_retain(sink);
2110 * Edid mgmt connector gets first update only in mode_valid hook and then
2111 * the connector sink is set to either fake or physical sink depends on link status.
2112 * Skip if already done during boot.
2114 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2115 && aconnector->dc_em_sink) {
2118 * For S3 resume with headless use eml_sink to fake stream
2119 * because on resume connector->sink is set to NULL
2121 mutex_lock(&dev->mode_config.mutex);
2124 if (aconnector->dc_sink) {
2125 amdgpu_dm_update_freesync_caps(connector, NULL);
2127 * retain and release below are used to
2128 * bump up refcount for sink because the link doesn't point
2129 * to it anymore after disconnect, so on next crtc to connector
2130 * reshuffle by UMD we will get into unwanted dc_sink release
2132 dc_sink_release(aconnector->dc_sink);
2134 aconnector->dc_sink = sink;
2135 dc_sink_retain(aconnector->dc_sink);
2136 amdgpu_dm_update_freesync_caps(connector,
2139 amdgpu_dm_update_freesync_caps(connector, NULL);
2140 if (!aconnector->dc_sink) {
2141 aconnector->dc_sink = aconnector->dc_em_sink;
2142 dc_sink_retain(aconnector->dc_sink);
2146 mutex_unlock(&dev->mode_config.mutex);
2149 dc_sink_release(sink);
2154 * TODO: temporary guard to look for proper fix
2155 * if this sink is MST sink, we should not do anything
2157 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2158 dc_sink_release(sink);
2162 if (aconnector->dc_sink == sink) {
2164 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2167 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2168 aconnector->connector_id);
2170 dc_sink_release(sink);
2174 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2175 aconnector->connector_id, aconnector->dc_sink, sink);
2177 mutex_lock(&dev->mode_config.mutex);
2180 * 1. Update status of the drm connector
2181 * 2. Send an event and let userspace tell us what to do
2185 * TODO: check if we still need the S3 mode update workaround.
2186 * If yes, put it here.
2188 if (aconnector->dc_sink)
2189 amdgpu_dm_update_freesync_caps(connector, NULL);
2191 aconnector->dc_sink = sink;
2192 dc_sink_retain(aconnector->dc_sink);
2193 if (sink->dc_edid.length == 0) {
2194 aconnector->edid = NULL;
2195 if (aconnector->dc_link->aux_mode) {
2196 drm_dp_cec_unset_edid(
2197 &aconnector->dm_dp_aux.aux);
2201 (struct edid *)sink->dc_edid.raw_edid;
2203 drm_connector_update_edid_property(connector,
2205 drm_add_edid_modes(connector, aconnector->edid);
2207 if (aconnector->dc_link->aux_mode)
2208 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2212 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2213 update_connector_ext_caps(aconnector);
2215 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2216 amdgpu_dm_update_freesync_caps(connector, NULL);
2217 drm_connector_update_edid_property(connector, NULL);
2218 aconnector->num_modes = 0;
2219 dc_sink_release(aconnector->dc_sink);
2220 aconnector->dc_sink = NULL;
2221 aconnector->edid = NULL;
2222 #ifdef CONFIG_DRM_AMD_DC_HDCP
2223 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2224 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2225 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2229 mutex_unlock(&dev->mode_config.mutex);
2232 dc_sink_release(sink);
2235 static void handle_hpd_irq(void *param)
2237 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2238 struct drm_connector *connector = &aconnector->base;
2239 struct drm_device *dev = connector->dev;
2240 enum dc_connection_type new_connection_type = dc_connection_none;
2241 #ifdef CONFIG_DRM_AMD_DC_HDCP
2242 struct amdgpu_device *adev = drm_to_adev(dev);
2246 * In case of failure or MST no need to update connector status or notify the OS
2247 * since (for MST case) MST does this in its own context.
2249 mutex_lock(&aconnector->hpd_lock);
2251 #ifdef CONFIG_DRM_AMD_DC_HDCP
2252 if (adev->dm.hdcp_workqueue)
2253 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2255 if (aconnector->fake_enable)
2256 aconnector->fake_enable = false;
2258 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2259 DRM_ERROR("KMS: Failed to detect connector\n");
2261 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2262 emulated_link_detect(aconnector->dc_link);
2265 drm_modeset_lock_all(dev);
2266 dm_restore_drm_connector_state(dev, connector);
2267 drm_modeset_unlock_all(dev);
2269 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2270 drm_kms_helper_hotplug_event(dev);
2272 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2273 amdgpu_dm_update_connector_after_detect(aconnector);
2276 drm_modeset_lock_all(dev);
2277 dm_restore_drm_connector_state(dev, connector);
2278 drm_modeset_unlock_all(dev);
2280 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2281 drm_kms_helper_hotplug_event(dev);
2283 mutex_unlock(&aconnector->hpd_lock);
2287 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2289 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2291 bool new_irq_handled = false;
2293 int dpcd_bytes_to_read;
2295 const int max_process_count = 30;
2296 int process_count = 0;
2298 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2300 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2301 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2302 /* DPCD 0x200 - 0x201 for downstream IRQ */
2303 dpcd_addr = DP_SINK_COUNT;
2305 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2306 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2307 dpcd_addr = DP_SINK_COUNT_ESI;
2310 dret = drm_dp_dpcd_read(
2311 &aconnector->dm_dp_aux.aux,
2314 dpcd_bytes_to_read);
2316 while (dret == dpcd_bytes_to_read &&
2317 process_count < max_process_count) {
2323 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2324 /* handle HPD short pulse irq */
2325 if (aconnector->mst_mgr.mst_state)
2327 &aconnector->mst_mgr,
2331 if (new_irq_handled) {
2332 /* ACK at DPCD to notify down stream */
2333 const int ack_dpcd_bytes_to_write =
2334 dpcd_bytes_to_read - 1;
2336 for (retry = 0; retry < 3; retry++) {
2339 wret = drm_dp_dpcd_write(
2340 &aconnector->dm_dp_aux.aux,
2343 ack_dpcd_bytes_to_write);
2344 if (wret == ack_dpcd_bytes_to_write)
2348 /* check if there is new irq to be handled */
2349 dret = drm_dp_dpcd_read(
2350 &aconnector->dm_dp_aux.aux,
2353 dpcd_bytes_to_read);
2355 new_irq_handled = false;
2361 if (process_count == max_process_count)
2362 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2365 static void handle_hpd_rx_irq(void *param)
2367 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2368 struct drm_connector *connector = &aconnector->base;
2369 struct drm_device *dev = connector->dev;
2370 struct dc_link *dc_link = aconnector->dc_link;
2371 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2372 enum dc_connection_type new_connection_type = dc_connection_none;
2373 #ifdef CONFIG_DRM_AMD_DC_HDCP
2374 union hpd_irq_data hpd_irq_data;
2375 struct amdgpu_device *adev = drm_to_adev(dev);
2377 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2381 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2382 * conflict, after implement i2c helper, this mutex should be
2385 if (dc_link->type != dc_connection_mst_branch)
2386 mutex_lock(&aconnector->hpd_lock);
2389 #ifdef CONFIG_DRM_AMD_DC_HDCP
2390 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2392 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2394 !is_mst_root_connector) {
2395 /* Downstream Port status changed. */
2396 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2397 DRM_ERROR("KMS: Failed to detect connector\n");
2399 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2400 emulated_link_detect(dc_link);
2402 if (aconnector->fake_enable)
2403 aconnector->fake_enable = false;
2405 amdgpu_dm_update_connector_after_detect(aconnector);
2408 drm_modeset_lock_all(dev);
2409 dm_restore_drm_connector_state(dev, connector);
2410 drm_modeset_unlock_all(dev);
2412 drm_kms_helper_hotplug_event(dev);
2413 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2415 if (aconnector->fake_enable)
2416 aconnector->fake_enable = false;
2418 amdgpu_dm_update_connector_after_detect(aconnector);
2421 drm_modeset_lock_all(dev);
2422 dm_restore_drm_connector_state(dev, connector);
2423 drm_modeset_unlock_all(dev);
2425 drm_kms_helper_hotplug_event(dev);
2428 #ifdef CONFIG_DRM_AMD_DC_HDCP
2429 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2430 if (adev->dm.hdcp_workqueue)
2431 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2434 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2435 (dc_link->type == dc_connection_mst_branch))
2436 dm_handle_hpd_rx_irq(aconnector);
2438 if (dc_link->type != dc_connection_mst_branch) {
2439 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2440 mutex_unlock(&aconnector->hpd_lock);
2444 static void register_hpd_handlers(struct amdgpu_device *adev)
2446 struct drm_device *dev = adev_to_drm(adev);
2447 struct drm_connector *connector;
2448 struct amdgpu_dm_connector *aconnector;
2449 const struct dc_link *dc_link;
2450 struct dc_interrupt_params int_params = {0};
2452 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2453 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2455 list_for_each_entry(connector,
2456 &dev->mode_config.connector_list, head) {
2458 aconnector = to_amdgpu_dm_connector(connector);
2459 dc_link = aconnector->dc_link;
2461 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2462 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2463 int_params.irq_source = dc_link->irq_source_hpd;
2465 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467 (void *) aconnector);
2470 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2472 /* Also register for DP short pulse (hpd_rx). */
2473 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2474 int_params.irq_source = dc_link->irq_source_hpd_rx;
2476 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2478 (void *) aconnector);
2483 #if defined(CONFIG_DRM_AMD_DC_SI)
2484 /* Register IRQ sources and initialize IRQ callbacks */
2485 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2487 struct dc *dc = adev->dm.dc;
2488 struct common_irq_params *c_irq_params;
2489 struct dc_interrupt_params int_params = {0};
2492 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2494 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2495 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2498 * Actions of amdgpu_irq_add_id():
2499 * 1. Register a set() function with base driver.
2500 * Base driver will call set() function to enable/disable an
2501 * interrupt in DC hardware.
2502 * 2. Register amdgpu_dm_irq_handler().
2503 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2504 * coming from DC hardware.
2505 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2506 * for acknowledging and handling. */
2508 /* Use VBLANK interrupt */
2509 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2510 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2512 DRM_ERROR("Failed to add crtc irq id!\n");
2516 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2517 int_params.irq_source =
2518 dc_interrupt_to_irq_source(dc, i+1 , 0);
2520 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2522 c_irq_params->adev = adev;
2523 c_irq_params->irq_src = int_params.irq_source;
2525 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2526 dm_crtc_high_irq, c_irq_params);
2529 /* Use GRPH_PFLIP interrupt */
2530 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2531 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2532 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2534 DRM_ERROR("Failed to add page flip irq id!\n");
2538 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2539 int_params.irq_source =
2540 dc_interrupt_to_irq_source(dc, i, 0);
2542 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2544 c_irq_params->adev = adev;
2545 c_irq_params->irq_src = int_params.irq_source;
2547 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2548 dm_pflip_high_irq, c_irq_params);
2553 r = amdgpu_irq_add_id(adev, client_id,
2554 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2556 DRM_ERROR("Failed to add hpd irq id!\n");
2560 register_hpd_handlers(adev);
2566 /* Register IRQ sources and initialize IRQ callbacks */
2567 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2569 struct dc *dc = adev->dm.dc;
2570 struct common_irq_params *c_irq_params;
2571 struct dc_interrupt_params int_params = {0};
2574 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2576 if (adev->asic_type >= CHIP_VEGA10)
2577 client_id = SOC15_IH_CLIENTID_DCE;
2579 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2580 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2583 * Actions of amdgpu_irq_add_id():
2584 * 1. Register a set() function with base driver.
2585 * Base driver will call set() function to enable/disable an
2586 * interrupt in DC hardware.
2587 * 2. Register amdgpu_dm_irq_handler().
2588 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2589 * coming from DC hardware.
2590 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2591 * for acknowledging and handling. */
2593 /* Use VBLANK interrupt */
2594 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2595 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2597 DRM_ERROR("Failed to add crtc irq id!\n");
2601 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2602 int_params.irq_source =
2603 dc_interrupt_to_irq_source(dc, i, 0);
2605 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2607 c_irq_params->adev = adev;
2608 c_irq_params->irq_src = int_params.irq_source;
2610 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2611 dm_crtc_high_irq, c_irq_params);
2614 /* Use VUPDATE interrupt */
2615 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2616 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2618 DRM_ERROR("Failed to add vupdate irq id!\n");
2622 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2623 int_params.irq_source =
2624 dc_interrupt_to_irq_source(dc, i, 0);
2626 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2628 c_irq_params->adev = adev;
2629 c_irq_params->irq_src = int_params.irq_source;
2631 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2632 dm_vupdate_high_irq, c_irq_params);
2635 /* Use GRPH_PFLIP interrupt */
2636 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2637 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2638 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2640 DRM_ERROR("Failed to add page flip irq id!\n");
2644 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2645 int_params.irq_source =
2646 dc_interrupt_to_irq_source(dc, i, 0);
2648 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2650 c_irq_params->adev = adev;
2651 c_irq_params->irq_src = int_params.irq_source;
2653 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2654 dm_pflip_high_irq, c_irq_params);
2659 r = amdgpu_irq_add_id(adev, client_id,
2660 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2662 DRM_ERROR("Failed to add hpd irq id!\n");
2666 register_hpd_handlers(adev);
2671 #if defined(CONFIG_DRM_AMD_DC_DCN)
2672 /* Register IRQ sources and initialize IRQ callbacks */
2673 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2675 struct dc *dc = adev->dm.dc;
2676 struct common_irq_params *c_irq_params;
2677 struct dc_interrupt_params int_params = {0};
2681 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2682 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2685 * Actions of amdgpu_irq_add_id():
2686 * 1. Register a set() function with base driver.
2687 * Base driver will call set() function to enable/disable an
2688 * interrupt in DC hardware.
2689 * 2. Register amdgpu_dm_irq_handler().
2690 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2691 * coming from DC hardware.
2692 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2693 * for acknowledging and handling.
2696 /* Use VSTARTUP interrupt */
2697 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2698 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2700 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2703 DRM_ERROR("Failed to add crtc irq id!\n");
2707 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2708 int_params.irq_source =
2709 dc_interrupt_to_irq_source(dc, i, 0);
2711 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2713 c_irq_params->adev = adev;
2714 c_irq_params->irq_src = int_params.irq_source;
2716 amdgpu_dm_irq_register_interrupt(
2717 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2720 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2721 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2722 * to trigger at end of each vblank, regardless of state of the lock,
2723 * matching DCE behaviour.
2725 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2726 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2728 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2731 DRM_ERROR("Failed to add vupdate irq id!\n");
2735 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2736 int_params.irq_source =
2737 dc_interrupt_to_irq_source(dc, i, 0);
2739 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2741 c_irq_params->adev = adev;
2742 c_irq_params->irq_src = int_params.irq_source;
2744 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2745 dm_vupdate_high_irq, c_irq_params);
2748 /* Use GRPH_PFLIP interrupt */
2749 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2750 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2752 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2754 DRM_ERROR("Failed to add page flip irq id!\n");
2758 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2759 int_params.irq_source =
2760 dc_interrupt_to_irq_source(dc, i, 0);
2762 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2764 c_irq_params->adev = adev;
2765 c_irq_params->irq_src = int_params.irq_source;
2767 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2768 dm_pflip_high_irq, c_irq_params);
2773 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2776 DRM_ERROR("Failed to add hpd irq id!\n");
2780 register_hpd_handlers(adev);
2787 * Acquires the lock for the atomic state object and returns
2788 * the new atomic state.
2790 * This should only be called during atomic check.
2792 static int dm_atomic_get_state(struct drm_atomic_state *state,
2793 struct dm_atomic_state **dm_state)
2795 struct drm_device *dev = state->dev;
2796 struct amdgpu_device *adev = drm_to_adev(dev);
2797 struct amdgpu_display_manager *dm = &adev->dm;
2798 struct drm_private_state *priv_state;
2803 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2804 if (IS_ERR(priv_state))
2805 return PTR_ERR(priv_state);
2807 *dm_state = to_dm_atomic_state(priv_state);
2812 static struct dm_atomic_state *
2813 dm_atomic_get_new_state(struct drm_atomic_state *state)
2815 struct drm_device *dev = state->dev;
2816 struct amdgpu_device *adev = drm_to_adev(dev);
2817 struct amdgpu_display_manager *dm = &adev->dm;
2818 struct drm_private_obj *obj;
2819 struct drm_private_state *new_obj_state;
2822 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2823 if (obj->funcs == dm->atomic_obj.funcs)
2824 return to_dm_atomic_state(new_obj_state);
2830 static struct drm_private_state *
2831 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2833 struct dm_atomic_state *old_state, *new_state;
2835 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2839 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2841 old_state = to_dm_atomic_state(obj->state);
2843 if (old_state && old_state->context)
2844 new_state->context = dc_copy_state(old_state->context);
2846 if (!new_state->context) {
2851 return &new_state->base;
2854 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2855 struct drm_private_state *state)
2857 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2859 if (dm_state && dm_state->context)
2860 dc_release_state(dm_state->context);
2865 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2866 .atomic_duplicate_state = dm_atomic_duplicate_state,
2867 .atomic_destroy_state = dm_atomic_destroy_state,
2870 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2872 struct dm_atomic_state *state;
2875 adev->mode_info.mode_config_initialized = true;
2877 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2878 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2880 adev_to_drm(adev)->mode_config.max_width = 16384;
2881 adev_to_drm(adev)->mode_config.max_height = 16384;
2883 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2884 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2885 /* indicates support for immediate flip */
2886 adev_to_drm(adev)->mode_config.async_page_flip = true;
2888 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2890 state = kzalloc(sizeof(*state), GFP_KERNEL);
2894 state->context = dc_create_state(adev->dm.dc);
2895 if (!state->context) {
2900 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2902 drm_atomic_private_obj_init(adev_to_drm(adev),
2903 &adev->dm.atomic_obj,
2905 &dm_atomic_state_funcs);
2907 r = amdgpu_display_modeset_create_props(adev);
2911 r = amdgpu_dm_audio_init(adev);
2918 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2919 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2920 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2922 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2923 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2925 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2927 #if defined(CONFIG_ACPI)
2928 struct amdgpu_dm_backlight_caps caps;
2930 memset(&caps, 0, sizeof(caps));
2932 if (dm->backlight_caps.caps_valid)
2935 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2936 if (caps.caps_valid) {
2937 dm->backlight_caps.caps_valid = true;
2938 if (caps.aux_support)
2940 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2941 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2943 dm->backlight_caps.min_input_signal =
2944 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2945 dm->backlight_caps.max_input_signal =
2946 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2949 if (dm->backlight_caps.aux_support)
2952 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2953 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2957 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2964 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2965 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2970 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
2971 unsigned *min, unsigned *max)
2976 if (caps->aux_support) {
2977 // Firmware limits are in nits, DC API wants millinits.
2978 *max = 1000 * caps->aux_max_input_signal;
2979 *min = 1000 * caps->aux_min_input_signal;
2981 // Firmware limits are 8-bit, PWM control is 16-bit.
2982 *max = 0x101 * caps->max_input_signal;
2983 *min = 0x101 * caps->min_input_signal;
2988 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
2989 uint32_t brightness)
2993 if (!get_brightness_range(caps, &min, &max))
2996 // Rescale 0..255 to min..max
2997 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
2998 AMDGPU_MAX_BL_LEVEL);
3001 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3002 uint32_t brightness)
3006 if (!get_brightness_range(caps, &min, &max))
3009 if (brightness < min)
3011 // Rescale min..max to 0..255
3012 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3016 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3018 struct amdgpu_display_manager *dm = bl_get_data(bd);
3019 struct amdgpu_dm_backlight_caps caps;
3020 struct dc_link *link = NULL;
3024 amdgpu_dm_update_backlight_caps(dm);
3025 caps = dm->backlight_caps;
3027 link = (struct dc_link *)dm->backlight_link;
3029 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3030 // Change brightness based on AUX property
3031 if (caps.aux_support)
3032 return set_backlight_via_aux(link, brightness);
3034 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3039 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3041 struct amdgpu_display_manager *dm = bl_get_data(bd);
3042 int ret = dc_link_get_backlight_level(dm->backlight_link);
3044 if (ret == DC_ERROR_UNEXPECTED)
3045 return bd->props.brightness;
3046 return convert_brightness_to_user(&dm->backlight_caps, ret);
3049 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3050 .options = BL_CORE_SUSPENDRESUME,
3051 .get_brightness = amdgpu_dm_backlight_get_brightness,
3052 .update_status = amdgpu_dm_backlight_update_status,
3056 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3059 struct backlight_properties props = { 0 };
3061 amdgpu_dm_update_backlight_caps(dm);
3063 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3064 props.brightness = AMDGPU_MAX_BL_LEVEL;
3065 props.type = BACKLIGHT_RAW;
3067 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3068 adev_to_drm(dm->adev)->primary->index);
3070 dm->backlight_dev = backlight_device_register(bl_name,
3071 adev_to_drm(dm->adev)->dev,
3073 &amdgpu_dm_backlight_ops,
3076 if (IS_ERR(dm->backlight_dev))
3077 DRM_ERROR("DM: Backlight registration failed!\n");
3079 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3084 static int initialize_plane(struct amdgpu_display_manager *dm,
3085 struct amdgpu_mode_info *mode_info, int plane_id,
3086 enum drm_plane_type plane_type,
3087 const struct dc_plane_cap *plane_cap)
3089 struct drm_plane *plane;
3090 unsigned long possible_crtcs;
3093 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3095 DRM_ERROR("KMS: Failed to allocate plane\n");
3098 plane->type = plane_type;
3101 * HACK: IGT tests expect that the primary plane for a CRTC
3102 * can only have one possible CRTC. Only expose support for
3103 * any CRTC if they're not going to be used as a primary plane
3104 * for a CRTC - like overlay or underlay planes.
3106 possible_crtcs = 1 << plane_id;
3107 if (plane_id >= dm->dc->caps.max_streams)
3108 possible_crtcs = 0xff;
3110 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3113 DRM_ERROR("KMS: Failed to initialize plane\n");
3119 mode_info->planes[plane_id] = plane;
3125 static void register_backlight_device(struct amdgpu_display_manager *dm,
3126 struct dc_link *link)
3128 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3129 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3131 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3132 link->type != dc_connection_none) {
3134 * Event if registration failed, we should continue with
3135 * DM initialization because not having a backlight control
3136 * is better then a black screen.
3138 amdgpu_dm_register_backlight_device(dm);
3140 if (dm->backlight_dev)
3141 dm->backlight_link = link;
3148 * In this architecture, the association
3149 * connector -> encoder -> crtc
3150 * id not really requried. The crtc and connector will hold the
3151 * display_index as an abstraction to use with DAL component
3153 * Returns 0 on success
3155 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3157 struct amdgpu_display_manager *dm = &adev->dm;
3159 struct amdgpu_dm_connector *aconnector = NULL;
3160 struct amdgpu_encoder *aencoder = NULL;
3161 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3163 int32_t primary_planes;
3164 enum dc_connection_type new_connection_type = dc_connection_none;
3165 const struct dc_plane_cap *plane;
3167 link_cnt = dm->dc->caps.max_links;
3168 if (amdgpu_dm_mode_config_init(dm->adev)) {
3169 DRM_ERROR("DM: Failed to initialize mode config\n");
3173 /* There is one primary plane per CRTC */
3174 primary_planes = dm->dc->caps.max_streams;
3175 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3178 * Initialize primary planes, implicit planes for legacy IOCTLS.
3179 * Order is reversed to match iteration order in atomic check.
3181 for (i = (primary_planes - 1); i >= 0; i--) {
3182 plane = &dm->dc->caps.planes[i];
3184 if (initialize_plane(dm, mode_info, i,
3185 DRM_PLANE_TYPE_PRIMARY, plane)) {
3186 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3192 * Initialize overlay planes, index starting after primary planes.
3193 * These planes have a higher DRM index than the primary planes since
3194 * they should be considered as having a higher z-order.
3195 * Order is reversed to match iteration order in atomic check.
3197 * Only support DCN for now, and only expose one so we don't encourage
3198 * userspace to use up all the pipes.
3200 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3201 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3203 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3206 if (!plane->blends_with_above || !plane->blends_with_below)
3209 if (!plane->pixel_format_support.argb8888)
3212 if (initialize_plane(dm, NULL, primary_planes + i,
3213 DRM_PLANE_TYPE_OVERLAY, plane)) {
3214 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3218 /* Only create one overlay plane. */
3222 for (i = 0; i < dm->dc->caps.max_streams; i++)
3223 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3224 DRM_ERROR("KMS: Failed to initialize crtc\n");
3228 dm->display_indexes_num = dm->dc->caps.max_streams;
3230 /* loops over all connectors on the board */
3231 for (i = 0; i < link_cnt; i++) {
3232 struct dc_link *link = NULL;
3234 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3236 "KMS: Cannot support more than %d display indexes\n",
3237 AMDGPU_DM_MAX_DISPLAY_INDEX);
3241 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3245 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3249 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3250 DRM_ERROR("KMS: Failed to initialize encoder\n");
3254 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3255 DRM_ERROR("KMS: Failed to initialize connector\n");
3259 link = dc_get_link_at_index(dm->dc, i);
3261 if (!dc_link_detect_sink(link, &new_connection_type))
3262 DRM_ERROR("KMS: Failed to detect connector\n");
3264 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3265 emulated_link_detect(link);
3266 amdgpu_dm_update_connector_after_detect(aconnector);
3268 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3269 amdgpu_dm_update_connector_after_detect(aconnector);
3270 register_backlight_device(dm, link);
3271 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3272 amdgpu_dm_set_psr_caps(link);
3278 /* Software is initialized. Now we can register interrupt handlers. */
3279 switch (adev->asic_type) {
3280 #if defined(CONFIG_DRM_AMD_DC_SI)
3285 if (dce60_register_irq_handlers(dm->adev)) {
3286 DRM_ERROR("DM: Failed to initialize IRQ\n");
3300 case CHIP_POLARIS11:
3301 case CHIP_POLARIS10:
3302 case CHIP_POLARIS12:
3307 if (dce110_register_irq_handlers(dm->adev)) {
3308 DRM_ERROR("DM: Failed to initialize IRQ\n");
3312 #if defined(CONFIG_DRM_AMD_DC_DCN)
3318 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3319 case CHIP_SIENNA_CICHLID:
3320 case CHIP_NAVY_FLOUNDER:
3322 if (dcn10_register_irq_handlers(dm->adev)) {
3323 DRM_ERROR("DM: Failed to initialize IRQ\n");
3329 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3333 /* No userspace support. */
3334 dm->dc->debug.disable_tri_buf = true;
3344 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3346 drm_mode_config_cleanup(dm->ddev);
3347 drm_atomic_private_obj_fini(&dm->atomic_obj);
3351 /******************************************************************************
3352 * amdgpu_display_funcs functions
3353 *****************************************************************************/
3356 * dm_bandwidth_update - program display watermarks
3358 * @adev: amdgpu_device pointer
3360 * Calculate and program the display watermarks and line buffer allocation.
3362 static void dm_bandwidth_update(struct amdgpu_device *adev)
3364 /* TODO: implement later */
3367 static const struct amdgpu_display_funcs dm_display_funcs = {
3368 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3369 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3370 .backlight_set_level = NULL, /* never called for DC */
3371 .backlight_get_level = NULL, /* never called for DC */
3372 .hpd_sense = NULL,/* called unconditionally */
3373 .hpd_set_polarity = NULL, /* called unconditionally */
3374 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3375 .page_flip_get_scanoutpos =
3376 dm_crtc_get_scanoutpos,/* called unconditionally */
3377 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3378 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3381 #if defined(CONFIG_DEBUG_KERNEL_DC)
3383 static ssize_t s3_debug_store(struct device *device,
3384 struct device_attribute *attr,
3390 struct drm_device *drm_dev = dev_get_drvdata(device);
3391 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3393 ret = kstrtoint(buf, 0, &s3_state);
3398 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3403 return ret == 0 ? count : 0;
3406 DEVICE_ATTR_WO(s3_debug);
3410 static int dm_early_init(void *handle)
3412 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3414 switch (adev->asic_type) {
3415 #if defined(CONFIG_DRM_AMD_DC_SI)
3419 adev->mode_info.num_crtc = 6;
3420 adev->mode_info.num_hpd = 6;
3421 adev->mode_info.num_dig = 6;
3424 adev->mode_info.num_crtc = 2;
3425 adev->mode_info.num_hpd = 2;
3426 adev->mode_info.num_dig = 2;
3431 adev->mode_info.num_crtc = 6;
3432 adev->mode_info.num_hpd = 6;
3433 adev->mode_info.num_dig = 6;
3436 adev->mode_info.num_crtc = 4;
3437 adev->mode_info.num_hpd = 6;
3438 adev->mode_info.num_dig = 7;
3442 adev->mode_info.num_crtc = 2;
3443 adev->mode_info.num_hpd = 6;
3444 adev->mode_info.num_dig = 6;
3448 adev->mode_info.num_crtc = 6;
3449 adev->mode_info.num_hpd = 6;
3450 adev->mode_info.num_dig = 7;
3453 adev->mode_info.num_crtc = 3;
3454 adev->mode_info.num_hpd = 6;
3455 adev->mode_info.num_dig = 9;
3458 adev->mode_info.num_crtc = 2;
3459 adev->mode_info.num_hpd = 6;
3460 adev->mode_info.num_dig = 9;
3462 case CHIP_POLARIS11:
3463 case CHIP_POLARIS12:
3464 adev->mode_info.num_crtc = 5;
3465 adev->mode_info.num_hpd = 5;
3466 adev->mode_info.num_dig = 5;
3468 case CHIP_POLARIS10:
3470 adev->mode_info.num_crtc = 6;
3471 adev->mode_info.num_hpd = 6;
3472 adev->mode_info.num_dig = 6;
3477 adev->mode_info.num_crtc = 6;
3478 adev->mode_info.num_hpd = 6;
3479 adev->mode_info.num_dig = 6;
3481 #if defined(CONFIG_DRM_AMD_DC_DCN)
3483 adev->mode_info.num_crtc = 4;
3484 adev->mode_info.num_hpd = 4;
3485 adev->mode_info.num_dig = 4;
3490 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3491 case CHIP_SIENNA_CICHLID:
3492 case CHIP_NAVY_FLOUNDER:
3494 adev->mode_info.num_crtc = 6;
3495 adev->mode_info.num_hpd = 6;
3496 adev->mode_info.num_dig = 6;
3499 adev->mode_info.num_crtc = 5;
3500 adev->mode_info.num_hpd = 5;
3501 adev->mode_info.num_dig = 5;
3504 adev->mode_info.num_crtc = 4;
3505 adev->mode_info.num_hpd = 4;
3506 adev->mode_info.num_dig = 4;
3509 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3513 amdgpu_dm_set_irq_funcs(adev);
3515 if (adev->mode_info.funcs == NULL)
3516 adev->mode_info.funcs = &dm_display_funcs;
3519 * Note: Do NOT change adev->audio_endpt_rreg and
3520 * adev->audio_endpt_wreg because they are initialised in
3521 * amdgpu_device_init()
3523 #if defined(CONFIG_DEBUG_KERNEL_DC)
3525 adev_to_drm(adev)->dev,
3526 &dev_attr_s3_debug);
3532 static bool modeset_required(struct drm_crtc_state *crtc_state,
3533 struct dc_stream_state *new_stream,
3534 struct dc_stream_state *old_stream)
3536 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3539 static bool modereset_required(struct drm_crtc_state *crtc_state)
3541 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3544 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3546 drm_encoder_cleanup(encoder);
3550 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3551 .destroy = amdgpu_dm_encoder_destroy,
3555 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3556 struct dc_scaling_info *scaling_info)
3558 int scale_w, scale_h;
3560 memset(scaling_info, 0, sizeof(*scaling_info));
3562 /* Source is fixed 16.16 but we ignore mantissa for now... */
3563 scaling_info->src_rect.x = state->src_x >> 16;
3564 scaling_info->src_rect.y = state->src_y >> 16;
3566 scaling_info->src_rect.width = state->src_w >> 16;
3567 if (scaling_info->src_rect.width == 0)
3570 scaling_info->src_rect.height = state->src_h >> 16;
3571 if (scaling_info->src_rect.height == 0)
3574 scaling_info->dst_rect.x = state->crtc_x;
3575 scaling_info->dst_rect.y = state->crtc_y;
3577 if (state->crtc_w == 0)
3580 scaling_info->dst_rect.width = state->crtc_w;
3582 if (state->crtc_h == 0)
3585 scaling_info->dst_rect.height = state->crtc_h;
3587 /* DRM doesn't specify clipping on destination output. */
3588 scaling_info->clip_rect = scaling_info->dst_rect;
3590 /* TODO: Validate scaling per-format with DC plane caps */
3591 scale_w = scaling_info->dst_rect.width * 1000 /
3592 scaling_info->src_rect.width;
3594 if (scale_w < 250 || scale_w > 16000)
3597 scale_h = scaling_info->dst_rect.height * 1000 /
3598 scaling_info->src_rect.height;
3600 if (scale_h < 250 || scale_h > 16000)
3604 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3605 * assume reasonable defaults based on the format.
3611 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3612 uint64_t *tiling_flags, bool *tmz_surface)
3614 struct amdgpu_bo *rbo;
3619 *tmz_surface = false;
3623 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3624 r = amdgpu_bo_reserve(rbo, false);
3627 /* Don't show error message when returning -ERESTARTSYS */
3628 if (r != -ERESTARTSYS)
3629 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3634 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3637 *tmz_surface = amdgpu_bo_encrypted(rbo);
3639 amdgpu_bo_unreserve(rbo);
3644 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3646 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3648 return offset ? (address + offset * 256) : 0;
3652 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3653 const struct amdgpu_framebuffer *afb,
3654 const enum surface_pixel_format format,
3655 const enum dc_rotation_angle rotation,
3656 const struct plane_size *plane_size,
3657 const union dc_tiling_info *tiling_info,
3658 const uint64_t info,
3659 struct dc_plane_dcc_param *dcc,
3660 struct dc_plane_address *address,
3661 bool force_disable_dcc)
3663 struct dc *dc = adev->dm.dc;
3664 struct dc_dcc_surface_param input;
3665 struct dc_surface_dcc_cap output;
3666 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3667 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3668 uint64_t dcc_address;
3670 memset(&input, 0, sizeof(input));
3671 memset(&output, 0, sizeof(output));
3673 if (force_disable_dcc)
3679 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3682 if (!dc->cap_funcs.get_dcc_compression_cap)
3685 input.format = format;
3686 input.surface_size.width = plane_size->surface_size.width;
3687 input.surface_size.height = plane_size->surface_size.height;
3688 input.swizzle_mode = tiling_info->gfx9.swizzle;
3690 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3691 input.scan = SCAN_DIRECTION_HORIZONTAL;
3692 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3693 input.scan = SCAN_DIRECTION_VERTICAL;
3695 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3698 if (!output.capable)
3701 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3706 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3707 dcc->independent_64b_blks = i64b;
3709 dcc_address = get_dcc_address(afb->address, info);
3710 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3711 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3717 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3718 const struct amdgpu_framebuffer *afb,
3719 const enum surface_pixel_format format,
3720 const enum dc_rotation_angle rotation,
3721 const uint64_t tiling_flags,
3722 union dc_tiling_info *tiling_info,
3723 struct plane_size *plane_size,
3724 struct dc_plane_dcc_param *dcc,
3725 struct dc_plane_address *address,
3727 bool force_disable_dcc)
3729 const struct drm_framebuffer *fb = &afb->base;
3732 memset(tiling_info, 0, sizeof(*tiling_info));
3733 memset(plane_size, 0, sizeof(*plane_size));
3734 memset(dcc, 0, sizeof(*dcc));
3735 memset(address, 0, sizeof(*address));
3737 address->tmz_surface = tmz_surface;
3739 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3740 plane_size->surface_size.x = 0;
3741 plane_size->surface_size.y = 0;
3742 plane_size->surface_size.width = fb->width;
3743 plane_size->surface_size.height = fb->height;
3744 plane_size->surface_pitch =
3745 fb->pitches[0] / fb->format->cpp[0];
3747 address->type = PLN_ADDR_TYPE_GRAPHICS;
3748 address->grph.addr.low_part = lower_32_bits(afb->address);
3749 address->grph.addr.high_part = upper_32_bits(afb->address);
3750 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3751 uint64_t chroma_addr = afb->address + fb->offsets[1];
3753 plane_size->surface_size.x = 0;
3754 plane_size->surface_size.y = 0;
3755 plane_size->surface_size.width = fb->width;
3756 plane_size->surface_size.height = fb->height;
3757 plane_size->surface_pitch =
3758 fb->pitches[0] / fb->format->cpp[0];
3760 plane_size->chroma_size.x = 0;
3761 plane_size->chroma_size.y = 0;
3762 /* TODO: set these based on surface format */
3763 plane_size->chroma_size.width = fb->width / 2;
3764 plane_size->chroma_size.height = fb->height / 2;
3766 plane_size->chroma_pitch =
3767 fb->pitches[1] / fb->format->cpp[1];
3769 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3770 address->video_progressive.luma_addr.low_part =
3771 lower_32_bits(afb->address);
3772 address->video_progressive.luma_addr.high_part =
3773 upper_32_bits(afb->address);
3774 address->video_progressive.chroma_addr.low_part =
3775 lower_32_bits(chroma_addr);
3776 address->video_progressive.chroma_addr.high_part =
3777 upper_32_bits(chroma_addr);
3780 /* Fill GFX8 params */
3781 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3782 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3784 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3785 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3786 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3787 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3788 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3790 /* XXX fix me for VI */
3791 tiling_info->gfx8.num_banks = num_banks;
3792 tiling_info->gfx8.array_mode =
3793 DC_ARRAY_2D_TILED_THIN1;
3794 tiling_info->gfx8.tile_split = tile_split;
3795 tiling_info->gfx8.bank_width = bankw;
3796 tiling_info->gfx8.bank_height = bankh;
3797 tiling_info->gfx8.tile_aspect = mtaspect;
3798 tiling_info->gfx8.tile_mode =
3799 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3800 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3801 == DC_ARRAY_1D_TILED_THIN1) {
3802 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3805 tiling_info->gfx8.pipe_config =
3806 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3808 if (adev->asic_type == CHIP_VEGA10 ||
3809 adev->asic_type == CHIP_VEGA12 ||
3810 adev->asic_type == CHIP_VEGA20 ||
3811 adev->asic_type == CHIP_NAVI10 ||
3812 adev->asic_type == CHIP_NAVI14 ||
3813 adev->asic_type == CHIP_NAVI12 ||
3814 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3815 adev->asic_type == CHIP_SIENNA_CICHLID ||
3816 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3818 adev->asic_type == CHIP_RENOIR ||
3819 adev->asic_type == CHIP_RAVEN) {
3820 /* Fill GFX9 params */
3821 tiling_info->gfx9.num_pipes =
3822 adev->gfx.config.gb_addr_config_fields.num_pipes;
3823 tiling_info->gfx9.num_banks =
3824 adev->gfx.config.gb_addr_config_fields.num_banks;
3825 tiling_info->gfx9.pipe_interleave =
3826 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3827 tiling_info->gfx9.num_shader_engines =
3828 adev->gfx.config.gb_addr_config_fields.num_se;
3829 tiling_info->gfx9.max_compressed_frags =
3830 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3831 tiling_info->gfx9.num_rb_per_se =
3832 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3833 tiling_info->gfx9.swizzle =
3834 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3835 tiling_info->gfx9.shaderEnable = 1;
3837 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3838 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3839 adev->asic_type == CHIP_NAVY_FLOUNDER)
3840 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3842 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3843 plane_size, tiling_info,
3844 tiling_flags, dcc, address,
3854 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3855 bool *per_pixel_alpha, bool *global_alpha,
3856 int *global_alpha_value)
3858 *per_pixel_alpha = false;
3859 *global_alpha = false;
3860 *global_alpha_value = 0xff;
3862 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3865 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3866 static const uint32_t alpha_formats[] = {
3867 DRM_FORMAT_ARGB8888,
3868 DRM_FORMAT_RGBA8888,
3869 DRM_FORMAT_ABGR8888,
3871 uint32_t format = plane_state->fb->format->format;
3874 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3875 if (format == alpha_formats[i]) {
3876 *per_pixel_alpha = true;
3882 if (plane_state->alpha < 0xffff) {
3883 *global_alpha = true;
3884 *global_alpha_value = plane_state->alpha >> 8;
3889 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3890 const enum surface_pixel_format format,
3891 enum dc_color_space *color_space)
3895 *color_space = COLOR_SPACE_SRGB;
3897 /* DRM color properties only affect non-RGB formats. */
3898 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3901 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3903 switch (plane_state->color_encoding) {
3904 case DRM_COLOR_YCBCR_BT601:
3906 *color_space = COLOR_SPACE_YCBCR601;
3908 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3911 case DRM_COLOR_YCBCR_BT709:
3913 *color_space = COLOR_SPACE_YCBCR709;
3915 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3918 case DRM_COLOR_YCBCR_BT2020:
3920 *color_space = COLOR_SPACE_2020_YCBCR;
3933 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3934 const struct drm_plane_state *plane_state,
3935 const uint64_t tiling_flags,
3936 struct dc_plane_info *plane_info,
3937 struct dc_plane_address *address,
3939 bool force_disable_dcc)
3941 const struct drm_framebuffer *fb = plane_state->fb;
3942 const struct amdgpu_framebuffer *afb =
3943 to_amdgpu_framebuffer(plane_state->fb);
3944 struct drm_format_name_buf format_name;
3947 memset(plane_info, 0, sizeof(*plane_info));
3949 switch (fb->format->format) {
3951 plane_info->format =
3952 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3954 case DRM_FORMAT_RGB565:
3955 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3957 case DRM_FORMAT_XRGB8888:
3958 case DRM_FORMAT_ARGB8888:
3959 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3961 case DRM_FORMAT_XRGB2101010:
3962 case DRM_FORMAT_ARGB2101010:
3963 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3965 case DRM_FORMAT_XBGR2101010:
3966 case DRM_FORMAT_ABGR2101010:
3967 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3969 case DRM_FORMAT_XBGR8888:
3970 case DRM_FORMAT_ABGR8888:
3971 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3973 case DRM_FORMAT_NV21:
3974 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3976 case DRM_FORMAT_NV12:
3977 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3979 case DRM_FORMAT_P010:
3980 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3982 case DRM_FORMAT_XRGB16161616F:
3983 case DRM_FORMAT_ARGB16161616F:
3984 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3986 case DRM_FORMAT_XBGR16161616F:
3987 case DRM_FORMAT_ABGR16161616F:
3988 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3992 "Unsupported screen format %s\n",
3993 drm_get_format_name(fb->format->format, &format_name));
3997 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3998 case DRM_MODE_ROTATE_0:
3999 plane_info->rotation = ROTATION_ANGLE_0;
4001 case DRM_MODE_ROTATE_90:
4002 plane_info->rotation = ROTATION_ANGLE_90;
4004 case DRM_MODE_ROTATE_180:
4005 plane_info->rotation = ROTATION_ANGLE_180;
4007 case DRM_MODE_ROTATE_270:
4008 plane_info->rotation = ROTATION_ANGLE_270;
4011 plane_info->rotation = ROTATION_ANGLE_0;
4015 plane_info->visible = true;
4016 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4018 plane_info->layer_index = 0;
4020 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4021 &plane_info->color_space);
4025 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4026 plane_info->rotation, tiling_flags,
4027 &plane_info->tiling_info,
4028 &plane_info->plane_size,
4029 &plane_info->dcc, address, tmz_surface,
4034 fill_blending_from_plane_state(
4035 plane_state, &plane_info->per_pixel_alpha,
4036 &plane_info->global_alpha, &plane_info->global_alpha_value);
4041 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4042 struct dc_plane_state *dc_plane_state,
4043 struct drm_plane_state *plane_state,
4044 struct drm_crtc_state *crtc_state)
4046 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4047 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4048 struct dc_scaling_info scaling_info;
4049 struct dc_plane_info plane_info;
4051 bool force_disable_dcc = false;
4053 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4057 dc_plane_state->src_rect = scaling_info.src_rect;
4058 dc_plane_state->dst_rect = scaling_info.dst_rect;
4059 dc_plane_state->clip_rect = scaling_info.clip_rect;
4060 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4062 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4063 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4064 dm_plane_state->tiling_flags,
4066 &dc_plane_state->address,
4067 dm_plane_state->tmz_surface,
4072 dc_plane_state->format = plane_info.format;
4073 dc_plane_state->color_space = plane_info.color_space;
4074 dc_plane_state->format = plane_info.format;
4075 dc_plane_state->plane_size = plane_info.plane_size;
4076 dc_plane_state->rotation = plane_info.rotation;
4077 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4078 dc_plane_state->stereo_format = plane_info.stereo_format;
4079 dc_plane_state->tiling_info = plane_info.tiling_info;
4080 dc_plane_state->visible = plane_info.visible;
4081 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4082 dc_plane_state->global_alpha = plane_info.global_alpha;
4083 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4084 dc_plane_state->dcc = plane_info.dcc;
4085 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4088 * Always set input transfer function, since plane state is refreshed
4091 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4098 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4099 const struct dm_connector_state *dm_state,
4100 struct dc_stream_state *stream)
4102 enum amdgpu_rmx_type rmx_type;
4104 struct rect src = { 0 }; /* viewport in composition space*/
4105 struct rect dst = { 0 }; /* stream addressable area */
4107 /* no mode. nothing to be done */
4111 /* Full screen scaling by default */
4112 src.width = mode->hdisplay;
4113 src.height = mode->vdisplay;
4114 dst.width = stream->timing.h_addressable;
4115 dst.height = stream->timing.v_addressable;
4118 rmx_type = dm_state->scaling;
4119 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4120 if (src.width * dst.height <
4121 src.height * dst.width) {
4122 /* height needs less upscaling/more downscaling */
4123 dst.width = src.width *
4124 dst.height / src.height;
4126 /* width needs less upscaling/more downscaling */
4127 dst.height = src.height *
4128 dst.width / src.width;
4130 } else if (rmx_type == RMX_CENTER) {
4134 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4135 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4137 if (dm_state->underscan_enable) {
4138 dst.x += dm_state->underscan_hborder / 2;
4139 dst.y += dm_state->underscan_vborder / 2;
4140 dst.width -= dm_state->underscan_hborder;
4141 dst.height -= dm_state->underscan_vborder;
4148 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4149 dst.x, dst.y, dst.width, dst.height);
4153 static enum dc_color_depth
4154 convert_color_depth_from_display_info(const struct drm_connector *connector,
4155 bool is_y420, int requested_bpc)
4162 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4163 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4165 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4167 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4170 bpc = (uint8_t)connector->display_info.bpc;
4171 /* Assume 8 bpc by default if no bpc is specified. */
4172 bpc = bpc ? bpc : 8;
4175 if (requested_bpc > 0) {
4177 * Cap display bpc based on the user requested value.
4179 * The value for state->max_bpc may not correctly updated
4180 * depending on when the connector gets added to the state
4181 * or if this was called outside of atomic check, so it
4182 * can't be used directly.
4184 bpc = min_t(u8, bpc, requested_bpc);
4186 /* Round down to the nearest even number. */
4187 bpc = bpc - (bpc & 1);
4193 * Temporary Work around, DRM doesn't parse color depth for
4194 * EDID revision before 1.4
4195 * TODO: Fix edid parsing
4197 return COLOR_DEPTH_888;
4199 return COLOR_DEPTH_666;
4201 return COLOR_DEPTH_888;
4203 return COLOR_DEPTH_101010;
4205 return COLOR_DEPTH_121212;
4207 return COLOR_DEPTH_141414;
4209 return COLOR_DEPTH_161616;
4211 return COLOR_DEPTH_UNDEFINED;
4215 static enum dc_aspect_ratio
4216 get_aspect_ratio(const struct drm_display_mode *mode_in)
4218 /* 1-1 mapping, since both enums follow the HDMI spec. */
4219 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4222 static enum dc_color_space
4223 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4225 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4227 switch (dc_crtc_timing->pixel_encoding) {
4228 case PIXEL_ENCODING_YCBCR422:
4229 case PIXEL_ENCODING_YCBCR444:
4230 case PIXEL_ENCODING_YCBCR420:
4233 * 27030khz is the separation point between HDTV and SDTV
4234 * according to HDMI spec, we use YCbCr709 and YCbCr601
4237 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4238 if (dc_crtc_timing->flags.Y_ONLY)
4240 COLOR_SPACE_YCBCR709_LIMITED;
4242 color_space = COLOR_SPACE_YCBCR709;
4244 if (dc_crtc_timing->flags.Y_ONLY)
4246 COLOR_SPACE_YCBCR601_LIMITED;
4248 color_space = COLOR_SPACE_YCBCR601;
4253 case PIXEL_ENCODING_RGB:
4254 color_space = COLOR_SPACE_SRGB;
4265 static bool adjust_colour_depth_from_display_info(
4266 struct dc_crtc_timing *timing_out,
4267 const struct drm_display_info *info)
4269 enum dc_color_depth depth = timing_out->display_color_depth;
4272 normalized_clk = timing_out->pix_clk_100hz / 10;
4273 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4274 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4275 normalized_clk /= 2;
4276 /* Adjusting pix clock following on HDMI spec based on colour depth */
4278 case COLOR_DEPTH_888:
4280 case COLOR_DEPTH_101010:
4281 normalized_clk = (normalized_clk * 30) / 24;
4283 case COLOR_DEPTH_121212:
4284 normalized_clk = (normalized_clk * 36) / 24;
4286 case COLOR_DEPTH_161616:
4287 normalized_clk = (normalized_clk * 48) / 24;
4290 /* The above depths are the only ones valid for HDMI. */
4293 if (normalized_clk <= info->max_tmds_clock) {
4294 timing_out->display_color_depth = depth;
4297 } while (--depth > COLOR_DEPTH_666);
4301 static void fill_stream_properties_from_drm_display_mode(
4302 struct dc_stream_state *stream,
4303 const struct drm_display_mode *mode_in,
4304 const struct drm_connector *connector,
4305 const struct drm_connector_state *connector_state,
4306 const struct dc_stream_state *old_stream,
4309 struct dc_crtc_timing *timing_out = &stream->timing;
4310 const struct drm_display_info *info = &connector->display_info;
4311 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4312 struct hdmi_vendor_infoframe hv_frame;
4313 struct hdmi_avi_infoframe avi_frame;
4315 memset(&hv_frame, 0, sizeof(hv_frame));
4316 memset(&avi_frame, 0, sizeof(avi_frame));
4318 timing_out->h_border_left = 0;
4319 timing_out->h_border_right = 0;
4320 timing_out->v_border_top = 0;
4321 timing_out->v_border_bottom = 0;
4322 /* TODO: un-hardcode */
4323 if (drm_mode_is_420_only(info, mode_in)
4324 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4325 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4326 else if (drm_mode_is_420_also(info, mode_in)
4327 && aconnector->force_yuv420_output)
4328 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4329 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4330 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4331 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4333 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4335 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4336 timing_out->display_color_depth = convert_color_depth_from_display_info(
4338 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4340 timing_out->scan_type = SCANNING_TYPE_NODATA;
4341 timing_out->hdmi_vic = 0;
4344 timing_out->vic = old_stream->timing.vic;
4345 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4346 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4348 timing_out->vic = drm_match_cea_mode(mode_in);
4349 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4350 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4351 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4352 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4355 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4356 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4357 timing_out->vic = avi_frame.video_code;
4358 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4359 timing_out->hdmi_vic = hv_frame.vic;
4362 timing_out->h_addressable = mode_in->crtc_hdisplay;
4363 timing_out->h_total = mode_in->crtc_htotal;
4364 timing_out->h_sync_width =
4365 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4366 timing_out->h_front_porch =
4367 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4368 timing_out->v_total = mode_in->crtc_vtotal;
4369 timing_out->v_addressable = mode_in->crtc_vdisplay;
4370 timing_out->v_front_porch =
4371 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4372 timing_out->v_sync_width =
4373 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4374 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4375 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4377 stream->output_color_space = get_output_color_space(timing_out);
4379 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4380 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4381 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4382 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4383 drm_mode_is_420_also(info, mode_in) &&
4384 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4385 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4386 adjust_colour_depth_from_display_info(timing_out, info);
4391 static void fill_audio_info(struct audio_info *audio_info,
4392 const struct drm_connector *drm_connector,
4393 const struct dc_sink *dc_sink)
4396 int cea_revision = 0;
4397 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4399 audio_info->manufacture_id = edid_caps->manufacturer_id;
4400 audio_info->product_id = edid_caps->product_id;
4402 cea_revision = drm_connector->display_info.cea_rev;
4404 strscpy(audio_info->display_name,
4405 edid_caps->display_name,
4406 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4408 if (cea_revision >= 3) {
4409 audio_info->mode_count = edid_caps->audio_mode_count;
4411 for (i = 0; i < audio_info->mode_count; ++i) {
4412 audio_info->modes[i].format_code =
4413 (enum audio_format_code)
4414 (edid_caps->audio_modes[i].format_code);
4415 audio_info->modes[i].channel_count =
4416 edid_caps->audio_modes[i].channel_count;
4417 audio_info->modes[i].sample_rates.all =
4418 edid_caps->audio_modes[i].sample_rate;
4419 audio_info->modes[i].sample_size =
4420 edid_caps->audio_modes[i].sample_size;
4424 audio_info->flags.all = edid_caps->speaker_flags;
4426 /* TODO: We only check for the progressive mode, check for interlace mode too */
4427 if (drm_connector->latency_present[0]) {
4428 audio_info->video_latency = drm_connector->video_latency[0];
4429 audio_info->audio_latency = drm_connector->audio_latency[0];
4432 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4437 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4438 struct drm_display_mode *dst_mode)
4440 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4441 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4442 dst_mode->crtc_clock = src_mode->crtc_clock;
4443 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4444 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4445 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4446 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4447 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4448 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4449 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4450 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4451 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4452 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4453 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4457 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4458 const struct drm_display_mode *native_mode,
4461 if (scale_enabled) {
4462 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4463 } else if (native_mode->clock == drm_mode->clock &&
4464 native_mode->htotal == drm_mode->htotal &&
4465 native_mode->vtotal == drm_mode->vtotal) {
4466 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4468 /* no scaling nor amdgpu inserted, no need to patch */
4472 static struct dc_sink *
4473 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4475 struct dc_sink_init_data sink_init_data = { 0 };
4476 struct dc_sink *sink = NULL;
4477 sink_init_data.link = aconnector->dc_link;
4478 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4480 sink = dc_sink_create(&sink_init_data);
4482 DRM_ERROR("Failed to create sink!\n");
4485 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4490 static void set_multisync_trigger_params(
4491 struct dc_stream_state *stream)
4493 if (stream->triggered_crtc_reset.enabled) {
4494 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4495 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4499 static void set_master_stream(struct dc_stream_state *stream_set[],
4502 int j, highest_rfr = 0, master_stream = 0;
4504 for (j = 0; j < stream_count; j++) {
4505 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4506 int refresh_rate = 0;
4508 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4509 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4510 if (refresh_rate > highest_rfr) {
4511 highest_rfr = refresh_rate;
4516 for (j = 0; j < stream_count; j++) {
4518 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4522 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4526 if (context->stream_count < 2)
4528 for (i = 0; i < context->stream_count ; i++) {
4529 if (!context->streams[i])
4532 * TODO: add a function to read AMD VSDB bits and set
4533 * crtc_sync_master.multi_sync_enabled flag
4534 * For now it's set to false
4536 set_multisync_trigger_params(context->streams[i]);
4538 set_master_stream(context->streams, context->stream_count);
4541 static struct dc_stream_state *
4542 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4543 const struct drm_display_mode *drm_mode,
4544 const struct dm_connector_state *dm_state,
4545 const struct dc_stream_state *old_stream,
4548 struct drm_display_mode *preferred_mode = NULL;
4549 struct drm_connector *drm_connector;
4550 const struct drm_connector_state *con_state =
4551 dm_state ? &dm_state->base : NULL;
4552 struct dc_stream_state *stream = NULL;
4553 struct drm_display_mode mode = *drm_mode;
4554 bool native_mode_found = false;
4555 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4557 int preferred_refresh = 0;
4558 #if defined(CONFIG_DRM_AMD_DC_DCN)
4559 struct dsc_dec_dpcd_caps dsc_caps;
4561 uint32_t link_bandwidth_kbps;
4563 struct dc_sink *sink = NULL;
4564 if (aconnector == NULL) {
4565 DRM_ERROR("aconnector is NULL!\n");
4569 drm_connector = &aconnector->base;
4571 if (!aconnector->dc_sink) {
4572 sink = create_fake_sink(aconnector);
4576 sink = aconnector->dc_sink;
4577 dc_sink_retain(sink);
4580 stream = dc_create_stream_for_sink(sink);
4582 if (stream == NULL) {
4583 DRM_ERROR("Failed to create stream for sink!\n");
4587 stream->dm_stream_context = aconnector;
4589 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4590 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4592 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4593 /* Search for preferred mode */
4594 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4595 native_mode_found = true;
4599 if (!native_mode_found)
4600 preferred_mode = list_first_entry_or_null(
4601 &aconnector->base.modes,
4602 struct drm_display_mode,
4605 mode_refresh = drm_mode_vrefresh(&mode);
4607 if (preferred_mode == NULL) {
4609 * This may not be an error, the use case is when we have no
4610 * usermode calls to reset and set mode upon hotplug. In this
4611 * case, we call set mode ourselves to restore the previous mode
4612 * and the modelist may not be filled in in time.
4614 DRM_DEBUG_DRIVER("No preferred mode found\n");
4616 decide_crtc_timing_for_drm_display_mode(
4617 &mode, preferred_mode,
4618 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4619 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4623 drm_mode_set_crtcinfo(&mode, 0);
4626 * If scaling is enabled and refresh rate didn't change
4627 * we copy the vic and polarities of the old timings
4629 if (!scale || mode_refresh != preferred_refresh)
4630 fill_stream_properties_from_drm_display_mode(stream,
4631 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4633 fill_stream_properties_from_drm_display_mode(stream,
4634 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4636 stream->timing.flags.DSC = 0;
4638 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4639 #if defined(CONFIG_DRM_AMD_DC_DCN)
4640 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4641 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4642 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4645 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4646 dc_link_get_link_cap(aconnector->dc_link));
4648 #if defined(CONFIG_DRM_AMD_DC_DCN)
4649 if (dsc_caps.is_dsc_supported) {
4650 /* Set DSC policy according to dsc_clock_en */
4651 dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
4653 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4655 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4656 link_bandwidth_kbps,
4658 &stream->timing.dsc_cfg))
4659 stream->timing.flags.DSC = 1;
4660 /* Overwrite the stream flag if DSC is enabled through debugfs */
4661 if (aconnector->dsc_settings.dsc_clock_en)
4662 stream->timing.flags.DSC = 1;
4664 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
4665 stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
4666 aconnector->dsc_settings.dsc_slice_width);
4668 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
4669 stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
4670 aconnector->dsc_settings.dsc_slice_height);
4672 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4673 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4678 update_stream_scaling_settings(&mode, dm_state, stream);
4681 &stream->audio_info,
4685 update_stream_signal(stream, sink);
4687 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4688 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4689 if (stream->link->psr_settings.psr_feature_enabled) {
4691 // should decide stream support vsc sdp colorimetry capability
4692 // before building vsc info packet
4694 stream->use_vsc_sdp_for_colorimetry = false;
4695 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4696 stream->use_vsc_sdp_for_colorimetry =
4697 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4699 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4700 stream->use_vsc_sdp_for_colorimetry = true;
4702 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4705 dc_sink_release(sink);
4710 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4712 drm_crtc_cleanup(crtc);
4716 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4717 struct drm_crtc_state *state)
4719 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4721 /* TODO Destroy dc_stream objects are stream object is flattened */
4723 dc_stream_release(cur->stream);
4726 __drm_atomic_helper_crtc_destroy_state(state);
4732 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4734 struct dm_crtc_state *state;
4737 dm_crtc_destroy_state(crtc, crtc->state);
4739 state = kzalloc(sizeof(*state), GFP_KERNEL);
4740 if (WARN_ON(!state))
4743 crtc->state = &state->base;
4744 crtc->state->crtc = crtc;
4748 static struct drm_crtc_state *
4749 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4751 struct dm_crtc_state *state, *cur;
4753 cur = to_dm_crtc_state(crtc->state);
4755 if (WARN_ON(!crtc->state))
4758 state = kzalloc(sizeof(*state), GFP_KERNEL);
4762 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4765 state->stream = cur->stream;
4766 dc_stream_retain(state->stream);
4769 state->active_planes = cur->active_planes;
4770 state->vrr_params = cur->vrr_params;
4771 state->vrr_infopacket = cur->vrr_infopacket;
4772 state->abm_level = cur->abm_level;
4773 state->vrr_supported = cur->vrr_supported;
4774 state->freesync_config = cur->freesync_config;
4775 state->crc_src = cur->crc_src;
4776 state->cm_has_degamma = cur->cm_has_degamma;
4777 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4779 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4781 return &state->base;
4784 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4786 enum dc_irq_source irq_source;
4787 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4788 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4791 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4793 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4795 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4796 acrtc->crtc_id, enable ? "en" : "dis", rc);
4800 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4802 enum dc_irq_source irq_source;
4803 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4804 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4805 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4809 /* vblank irq on -> Only need vupdate irq in vrr mode */
4810 if (amdgpu_dm_vrr_active(acrtc_state))
4811 rc = dm_set_vupdate_irq(crtc, true);
4813 /* vblank irq off -> vupdate irq off */
4814 rc = dm_set_vupdate_irq(crtc, false);
4820 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4821 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4824 static int dm_enable_vblank(struct drm_crtc *crtc)
4826 return dm_set_vblank(crtc, true);
4829 static void dm_disable_vblank(struct drm_crtc *crtc)
4831 dm_set_vblank(crtc, false);
4834 /* Implemented only the options currently availible for the driver */
4835 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4836 .reset = dm_crtc_reset_state,
4837 .destroy = amdgpu_dm_crtc_destroy,
4838 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4839 .set_config = drm_atomic_helper_set_config,
4840 .page_flip = drm_atomic_helper_page_flip,
4841 .atomic_duplicate_state = dm_crtc_duplicate_state,
4842 .atomic_destroy_state = dm_crtc_destroy_state,
4843 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4844 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4845 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4846 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4847 .enable_vblank = dm_enable_vblank,
4848 .disable_vblank = dm_disable_vblank,
4849 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4852 static enum drm_connector_status
4853 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4856 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4860 * 1. This interface is NOT called in context of HPD irq.
4861 * 2. This interface *is called* in context of user-mode ioctl. Which
4862 * makes it a bad place for *any* MST-related activity.
4865 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4866 !aconnector->fake_enable)
4867 connected = (aconnector->dc_sink != NULL);
4869 connected = (aconnector->base.force == DRM_FORCE_ON);
4871 return (connected ? connector_status_connected :
4872 connector_status_disconnected);
4875 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4876 struct drm_connector_state *connector_state,
4877 struct drm_property *property,
4880 struct drm_device *dev = connector->dev;
4881 struct amdgpu_device *adev = drm_to_adev(dev);
4882 struct dm_connector_state *dm_old_state =
4883 to_dm_connector_state(connector->state);
4884 struct dm_connector_state *dm_new_state =
4885 to_dm_connector_state(connector_state);
4889 if (property == dev->mode_config.scaling_mode_property) {
4890 enum amdgpu_rmx_type rmx_type;
4893 case DRM_MODE_SCALE_CENTER:
4894 rmx_type = RMX_CENTER;
4896 case DRM_MODE_SCALE_ASPECT:
4897 rmx_type = RMX_ASPECT;
4899 case DRM_MODE_SCALE_FULLSCREEN:
4900 rmx_type = RMX_FULL;
4902 case DRM_MODE_SCALE_NONE:
4908 if (dm_old_state->scaling == rmx_type)
4911 dm_new_state->scaling = rmx_type;
4913 } else if (property == adev->mode_info.underscan_hborder_property) {
4914 dm_new_state->underscan_hborder = val;
4916 } else if (property == adev->mode_info.underscan_vborder_property) {
4917 dm_new_state->underscan_vborder = val;
4919 } else if (property == adev->mode_info.underscan_property) {
4920 dm_new_state->underscan_enable = val;
4922 } else if (property == adev->mode_info.abm_level_property) {
4923 dm_new_state->abm_level = val;
4930 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4931 const struct drm_connector_state *state,
4932 struct drm_property *property,
4935 struct drm_device *dev = connector->dev;
4936 struct amdgpu_device *adev = drm_to_adev(dev);
4937 struct dm_connector_state *dm_state =
4938 to_dm_connector_state(state);
4941 if (property == dev->mode_config.scaling_mode_property) {
4942 switch (dm_state->scaling) {
4944 *val = DRM_MODE_SCALE_CENTER;
4947 *val = DRM_MODE_SCALE_ASPECT;
4950 *val = DRM_MODE_SCALE_FULLSCREEN;
4954 *val = DRM_MODE_SCALE_NONE;
4958 } else if (property == adev->mode_info.underscan_hborder_property) {
4959 *val = dm_state->underscan_hborder;
4961 } else if (property == adev->mode_info.underscan_vborder_property) {
4962 *val = dm_state->underscan_vborder;
4964 } else if (property == adev->mode_info.underscan_property) {
4965 *val = dm_state->underscan_enable;
4967 } else if (property == adev->mode_info.abm_level_property) {
4968 *val = dm_state->abm_level;
4975 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4977 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4979 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4982 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4984 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4985 const struct dc_link *link = aconnector->dc_link;
4986 struct amdgpu_device *adev = drm_to_adev(connector->dev);
4987 struct amdgpu_display_manager *dm = &adev->dm;
4989 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4990 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4992 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4993 link->type != dc_connection_none &&
4994 dm->backlight_dev) {
4995 backlight_device_unregister(dm->backlight_dev);
4996 dm->backlight_dev = NULL;
5000 if (aconnector->dc_em_sink)
5001 dc_sink_release(aconnector->dc_em_sink);
5002 aconnector->dc_em_sink = NULL;
5003 if (aconnector->dc_sink)
5004 dc_sink_release(aconnector->dc_sink);
5005 aconnector->dc_sink = NULL;
5007 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5008 drm_connector_unregister(connector);
5009 drm_connector_cleanup(connector);
5010 if (aconnector->i2c) {
5011 i2c_del_adapter(&aconnector->i2c->base);
5012 kfree(aconnector->i2c);
5014 kfree(aconnector->dm_dp_aux.aux.name);
5019 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5021 struct dm_connector_state *state =
5022 to_dm_connector_state(connector->state);
5024 if (connector->state)
5025 __drm_atomic_helper_connector_destroy_state(connector->state);
5029 state = kzalloc(sizeof(*state), GFP_KERNEL);
5032 state->scaling = RMX_OFF;
5033 state->underscan_enable = false;
5034 state->underscan_hborder = 0;
5035 state->underscan_vborder = 0;
5036 state->base.max_requested_bpc = 8;
5037 state->vcpi_slots = 0;
5039 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5040 state->abm_level = amdgpu_dm_abm_level;
5042 __drm_atomic_helper_connector_reset(connector, &state->base);
5046 struct drm_connector_state *
5047 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5049 struct dm_connector_state *state =
5050 to_dm_connector_state(connector->state);
5052 struct dm_connector_state *new_state =
5053 kmemdup(state, sizeof(*state), GFP_KERNEL);
5058 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5060 new_state->freesync_capable = state->freesync_capable;
5061 new_state->abm_level = state->abm_level;
5062 new_state->scaling = state->scaling;
5063 new_state->underscan_enable = state->underscan_enable;
5064 new_state->underscan_hborder = state->underscan_hborder;
5065 new_state->underscan_vborder = state->underscan_vborder;
5066 new_state->vcpi_slots = state->vcpi_slots;
5067 new_state->pbn = state->pbn;
5068 return &new_state->base;
5072 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5074 struct amdgpu_dm_connector *amdgpu_dm_connector =
5075 to_amdgpu_dm_connector(connector);
5078 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5079 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5080 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5081 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5086 #if defined(CONFIG_DEBUG_FS)
5087 connector_debugfs_init(amdgpu_dm_connector);
5093 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5094 .reset = amdgpu_dm_connector_funcs_reset,
5095 .detect = amdgpu_dm_connector_detect,
5096 .fill_modes = drm_helper_probe_single_connector_modes,
5097 .destroy = amdgpu_dm_connector_destroy,
5098 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5099 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5100 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5101 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5102 .late_register = amdgpu_dm_connector_late_register,
5103 .early_unregister = amdgpu_dm_connector_unregister
5106 static int get_modes(struct drm_connector *connector)
5108 return amdgpu_dm_connector_get_modes(connector);
5111 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5113 struct dc_sink_init_data init_params = {
5114 .link = aconnector->dc_link,
5115 .sink_signal = SIGNAL_TYPE_VIRTUAL
5119 if (!aconnector->base.edid_blob_ptr) {
5120 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5121 aconnector->base.name);
5123 aconnector->base.force = DRM_FORCE_OFF;
5124 aconnector->base.override_edid = false;
5128 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5130 aconnector->edid = edid;
5132 aconnector->dc_em_sink = dc_link_add_remote_sink(
5133 aconnector->dc_link,
5135 (edid->extensions + 1) * EDID_LENGTH,
5138 if (aconnector->base.force == DRM_FORCE_ON) {
5139 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5140 aconnector->dc_link->local_sink :
5141 aconnector->dc_em_sink;
5142 dc_sink_retain(aconnector->dc_sink);
5146 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5148 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5151 * In case of headless boot with force on for DP managed connector
5152 * Those settings have to be != 0 to get initial modeset
5154 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5155 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5156 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5160 aconnector->base.override_edid = true;
5161 create_eml_sink(aconnector);
5164 static struct dc_stream_state *
5165 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5166 const struct drm_display_mode *drm_mode,
5167 const struct dm_connector_state *dm_state,
5168 const struct dc_stream_state *old_stream)
5170 struct drm_connector *connector = &aconnector->base;
5171 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5172 struct dc_stream_state *stream;
5173 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5174 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5175 enum dc_status dc_result = DC_OK;
5178 stream = create_stream_for_sink(aconnector, drm_mode,
5179 dm_state, old_stream,
5181 if (stream == NULL) {
5182 DRM_ERROR("Failed to create stream for sink!\n");
5186 dc_result = dc_validate_stream(adev->dm.dc, stream);
5188 if (dc_result != DC_OK) {
5189 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5194 dc_status_to_str(dc_result));
5196 dc_stream_release(stream);
5198 requested_bpc -= 2; /* lower bpc to retry validation */
5201 } while (stream == NULL && requested_bpc >= 6);
5206 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5207 struct drm_display_mode *mode)
5209 int result = MODE_ERROR;
5210 struct dc_sink *dc_sink;
5211 /* TODO: Unhardcode stream count */
5212 struct dc_stream_state *stream;
5213 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5215 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5216 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5220 * Only run this the first time mode_valid is called to initilialize
5223 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5224 !aconnector->dc_em_sink)
5225 handle_edid_mgmt(aconnector);
5227 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5229 if (dc_sink == NULL) {
5230 DRM_ERROR("dc_sink is NULL!\n");
5234 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5236 dc_stream_release(stream);
5241 /* TODO: error handling*/
5245 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5246 struct dc_info_packet *out)
5248 struct hdmi_drm_infoframe frame;
5249 unsigned char buf[30]; /* 26 + 4 */
5253 memset(out, 0, sizeof(*out));
5255 if (!state->hdr_output_metadata)
5258 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5262 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5266 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5270 /* Prepare the infopacket for DC. */
5271 switch (state->connector->connector_type) {
5272 case DRM_MODE_CONNECTOR_HDMIA:
5273 out->hb0 = 0x87; /* type */
5274 out->hb1 = 0x01; /* version */
5275 out->hb2 = 0x1A; /* length */
5276 out->sb[0] = buf[3]; /* checksum */
5280 case DRM_MODE_CONNECTOR_DisplayPort:
5281 case DRM_MODE_CONNECTOR_eDP:
5282 out->hb0 = 0x00; /* sdp id, zero */
5283 out->hb1 = 0x87; /* type */
5284 out->hb2 = 0x1D; /* payload len - 1 */
5285 out->hb3 = (0x13 << 2); /* sdp version */
5286 out->sb[0] = 0x01; /* version */
5287 out->sb[1] = 0x1A; /* length */
5295 memcpy(&out->sb[i], &buf[4], 26);
5298 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5299 sizeof(out->sb), false);
5305 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5306 const struct drm_connector_state *new_state)
5308 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5309 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5311 if (old_blob != new_blob) {
5312 if (old_blob && new_blob &&
5313 old_blob->length == new_blob->length)
5314 return memcmp(old_blob->data, new_blob->data,
5324 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5325 struct drm_atomic_state *state)
5327 struct drm_connector_state *new_con_state =
5328 drm_atomic_get_new_connector_state(state, conn);
5329 struct drm_connector_state *old_con_state =
5330 drm_atomic_get_old_connector_state(state, conn);
5331 struct drm_crtc *crtc = new_con_state->crtc;
5332 struct drm_crtc_state *new_crtc_state;
5338 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5339 struct dc_info_packet hdr_infopacket;
5341 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5345 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5346 if (IS_ERR(new_crtc_state))
5347 return PTR_ERR(new_crtc_state);
5350 * DC considers the stream backends changed if the
5351 * static metadata changes. Forcing the modeset also
5352 * gives a simple way for userspace to switch from
5353 * 8bpc to 10bpc when setting the metadata to enter
5356 * Changing the static metadata after it's been
5357 * set is permissible, however. So only force a
5358 * modeset if we're entering or exiting HDR.
5360 new_crtc_state->mode_changed =
5361 !old_con_state->hdr_output_metadata ||
5362 !new_con_state->hdr_output_metadata;
5368 static const struct drm_connector_helper_funcs
5369 amdgpu_dm_connector_helper_funcs = {
5371 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5372 * modes will be filtered by drm_mode_validate_size(), and those modes
5373 * are missing after user start lightdm. So we need to renew modes list.
5374 * in get_modes call back, not just return the modes count
5376 .get_modes = get_modes,
5377 .mode_valid = amdgpu_dm_connector_mode_valid,
5378 .atomic_check = amdgpu_dm_connector_atomic_check,
5381 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5385 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5387 struct drm_device *dev = new_crtc_state->crtc->dev;
5388 struct drm_plane *plane;
5390 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5391 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5398 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5400 struct drm_atomic_state *state = new_crtc_state->state;
5401 struct drm_plane *plane;
5404 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5405 struct drm_plane_state *new_plane_state;
5407 /* Cursor planes are "fake". */
5408 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5411 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5413 if (!new_plane_state) {
5415 * The plane is enable on the CRTC and hasn't changed
5416 * state. This means that it previously passed
5417 * validation and is therefore enabled.
5423 /* We need a framebuffer to be considered enabled. */
5424 num_active += (new_plane_state->fb != NULL);
5430 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5431 struct drm_crtc_state *new_crtc_state)
5433 struct dm_crtc_state *dm_new_crtc_state =
5434 to_dm_crtc_state(new_crtc_state);
5436 dm_new_crtc_state->active_planes = 0;
5438 if (!dm_new_crtc_state->stream)
5441 dm_new_crtc_state->active_planes =
5442 count_crtc_active_planes(new_crtc_state);
5445 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5446 struct drm_crtc_state *state)
5448 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5449 struct dc *dc = adev->dm.dc;
5450 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5453 dm_update_crtc_active_planes(crtc, state);
5455 if (unlikely(!dm_crtc_state->stream &&
5456 modeset_required(state, NULL, dm_crtc_state->stream))) {
5461 /* In some use cases, like reset, no stream is attached */
5462 if (!dm_crtc_state->stream)
5466 * We want at least one hardware plane enabled to use
5467 * the stream with a cursor enabled.
5469 if (state->enable && state->active &&
5470 does_crtc_have_active_cursor(state) &&
5471 dm_crtc_state->active_planes == 0)
5474 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5480 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5481 const struct drm_display_mode *mode,
5482 struct drm_display_mode *adjusted_mode)
5487 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5488 .disable = dm_crtc_helper_disable,
5489 .atomic_check = dm_crtc_helper_atomic_check,
5490 .mode_fixup = dm_crtc_helper_mode_fixup,
5491 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5494 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5499 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5501 switch (display_color_depth) {
5502 case COLOR_DEPTH_666:
5504 case COLOR_DEPTH_888:
5506 case COLOR_DEPTH_101010:
5508 case COLOR_DEPTH_121212:
5510 case COLOR_DEPTH_141414:
5512 case COLOR_DEPTH_161616:
5520 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5521 struct drm_crtc_state *crtc_state,
5522 struct drm_connector_state *conn_state)
5524 struct drm_atomic_state *state = crtc_state->state;
5525 struct drm_connector *connector = conn_state->connector;
5526 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5527 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5528 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5529 struct drm_dp_mst_topology_mgr *mst_mgr;
5530 struct drm_dp_mst_port *mst_port;
5531 enum dc_color_depth color_depth;
5533 bool is_y420 = false;
5535 if (!aconnector->port || !aconnector->dc_sink)
5538 mst_port = aconnector->port;
5539 mst_mgr = &aconnector->mst_port->mst_mgr;
5541 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5544 if (!state->duplicated) {
5545 int max_bpc = conn_state->max_requested_bpc;
5546 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5547 aconnector->force_yuv420_output;
5548 color_depth = convert_color_depth_from_display_info(connector,
5551 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5552 clock = adjusted_mode->clock;
5553 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5555 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5558 dm_new_connector_state->pbn,
5559 dm_mst_get_pbn_divider(aconnector->dc_link));
5560 if (dm_new_connector_state->vcpi_slots < 0) {
5561 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5562 return dm_new_connector_state->vcpi_slots;
5567 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5568 .disable = dm_encoder_helper_disable,
5569 .atomic_check = dm_encoder_helper_atomic_check
5572 #if defined(CONFIG_DRM_AMD_DC_DCN)
5573 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5574 struct dc_state *dc_state)
5576 struct dc_stream_state *stream = NULL;
5577 struct drm_connector *connector;
5578 struct drm_connector_state *new_con_state, *old_con_state;
5579 struct amdgpu_dm_connector *aconnector;
5580 struct dm_connector_state *dm_conn_state;
5581 int i, j, clock, bpp;
5582 int vcpi, pbn_div, pbn = 0;
5584 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5586 aconnector = to_amdgpu_dm_connector(connector);
5588 if (!aconnector->port)
5591 if (!new_con_state || !new_con_state->crtc)
5594 dm_conn_state = to_dm_connector_state(new_con_state);
5596 for (j = 0; j < dc_state->stream_count; j++) {
5597 stream = dc_state->streams[j];
5601 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5610 if (stream->timing.flags.DSC != 1) {
5611 drm_dp_mst_atomic_enable_dsc(state,
5619 pbn_div = dm_mst_get_pbn_divider(stream->link);
5620 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5621 clock = stream->timing.pix_clk_100hz / 10;
5622 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5623 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5630 dm_conn_state->pbn = pbn;
5631 dm_conn_state->vcpi_slots = vcpi;
5637 static void dm_drm_plane_reset(struct drm_plane *plane)
5639 struct dm_plane_state *amdgpu_state = NULL;
5642 plane->funcs->atomic_destroy_state(plane, plane->state);
5644 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5645 WARN_ON(amdgpu_state == NULL);
5648 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5651 static struct drm_plane_state *
5652 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5654 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5656 old_dm_plane_state = to_dm_plane_state(plane->state);
5657 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5658 if (!dm_plane_state)
5661 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5663 if (old_dm_plane_state->dc_state) {
5664 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5665 dc_plane_state_retain(dm_plane_state->dc_state);
5668 /* Framebuffer hasn't been updated yet, so retain old flags. */
5669 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5670 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5672 return &dm_plane_state->base;
5675 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5676 struct drm_plane_state *state)
5678 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5680 if (dm_plane_state->dc_state)
5681 dc_plane_state_release(dm_plane_state->dc_state);
5683 drm_atomic_helper_plane_destroy_state(plane, state);
5686 static const struct drm_plane_funcs dm_plane_funcs = {
5687 .update_plane = drm_atomic_helper_update_plane,
5688 .disable_plane = drm_atomic_helper_disable_plane,
5689 .destroy = drm_primary_helper_destroy,
5690 .reset = dm_drm_plane_reset,
5691 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5692 .atomic_destroy_state = dm_drm_plane_destroy_state,
5695 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5696 struct drm_plane_state *new_state)
5698 struct amdgpu_framebuffer *afb;
5699 struct drm_gem_object *obj;
5700 struct amdgpu_device *adev;
5701 struct amdgpu_bo *rbo;
5702 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5703 struct list_head list;
5704 struct ttm_validate_buffer tv;
5705 struct ww_acquire_ctx ticket;
5709 if (!new_state->fb) {
5710 DRM_DEBUG_DRIVER("No FB bound\n");
5714 afb = to_amdgpu_framebuffer(new_state->fb);
5715 obj = new_state->fb->obj[0];
5716 rbo = gem_to_amdgpu_bo(obj);
5717 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5718 INIT_LIST_HEAD(&list);
5722 list_add(&tv.head, &list);
5724 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5726 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5730 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5731 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5733 domain = AMDGPU_GEM_DOMAIN_VRAM;
5735 r = amdgpu_bo_pin(rbo, domain);
5736 if (unlikely(r != 0)) {
5737 if (r != -ERESTARTSYS)
5738 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5739 ttm_eu_backoff_reservation(&ticket, &list);
5743 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5744 if (unlikely(r != 0)) {
5745 amdgpu_bo_unpin(rbo);
5746 ttm_eu_backoff_reservation(&ticket, &list);
5747 DRM_ERROR("%p bind failed\n", rbo);
5751 ttm_eu_backoff_reservation(&ticket, &list);
5753 afb->address = amdgpu_bo_gpu_offset(rbo);
5758 * We don't do surface updates on planes that have been newly created,
5759 * but we also don't have the afb->address during atomic check.
5761 * Fill in buffer attributes depending on the address here, but only on
5762 * newly created planes since they're not being used by DC yet and this
5763 * won't modify global state.
5765 dm_plane_state_old = to_dm_plane_state(plane->state);
5766 dm_plane_state_new = to_dm_plane_state(new_state);
5768 if (dm_plane_state_new->dc_state &&
5769 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5770 struct dc_plane_state *plane_state =
5771 dm_plane_state_new->dc_state;
5772 bool force_disable_dcc = !plane_state->dcc.enable;
5774 fill_plane_buffer_attributes(
5775 adev, afb, plane_state->format, plane_state->rotation,
5776 dm_plane_state_new->tiling_flags,
5777 &plane_state->tiling_info, &plane_state->plane_size,
5778 &plane_state->dcc, &plane_state->address,
5779 dm_plane_state_new->tmz_surface, force_disable_dcc);
5785 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5786 struct drm_plane_state *old_state)
5788 struct amdgpu_bo *rbo;
5794 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5795 r = amdgpu_bo_reserve(rbo, false);
5797 DRM_ERROR("failed to reserve rbo before unpin\n");
5801 amdgpu_bo_unpin(rbo);
5802 amdgpu_bo_unreserve(rbo);
5803 amdgpu_bo_unref(&rbo);
5806 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5807 struct drm_crtc_state *new_crtc_state)
5809 int max_downscale = 0;
5810 int max_upscale = INT_MAX;
5812 /* TODO: These should be checked against DC plane caps */
5813 return drm_atomic_helper_check_plane_state(
5814 state, new_crtc_state, max_downscale, max_upscale, true, true);
5817 static int dm_plane_atomic_check(struct drm_plane *plane,
5818 struct drm_plane_state *state)
5820 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5821 struct dc *dc = adev->dm.dc;
5822 struct dm_plane_state *dm_plane_state;
5823 struct dc_scaling_info scaling_info;
5824 struct drm_crtc_state *new_crtc_state;
5827 dm_plane_state = to_dm_plane_state(state);
5829 if (!dm_plane_state->dc_state)
5833 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5834 if (!new_crtc_state)
5837 ret = dm_plane_helper_check_state(state, new_crtc_state);
5841 ret = fill_dc_scaling_info(state, &scaling_info);
5845 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5851 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5852 struct drm_plane_state *new_plane_state)
5854 /* Only support async updates on cursor planes. */
5855 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5861 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5862 struct drm_plane_state *new_state)
5864 struct drm_plane_state *old_state =
5865 drm_atomic_get_old_plane_state(new_state->state, plane);
5867 swap(plane->state->fb, new_state->fb);
5869 plane->state->src_x = new_state->src_x;
5870 plane->state->src_y = new_state->src_y;
5871 plane->state->src_w = new_state->src_w;
5872 plane->state->src_h = new_state->src_h;
5873 plane->state->crtc_x = new_state->crtc_x;
5874 plane->state->crtc_y = new_state->crtc_y;
5875 plane->state->crtc_w = new_state->crtc_w;
5876 plane->state->crtc_h = new_state->crtc_h;
5878 handle_cursor_update(plane, old_state);
5881 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5882 .prepare_fb = dm_plane_helper_prepare_fb,
5883 .cleanup_fb = dm_plane_helper_cleanup_fb,
5884 .atomic_check = dm_plane_atomic_check,
5885 .atomic_async_check = dm_plane_atomic_async_check,
5886 .atomic_async_update = dm_plane_atomic_async_update
5890 * TODO: these are currently initialized to rgb formats only.
5891 * For future use cases we should either initialize them dynamically based on
5892 * plane capabilities, or initialize this array to all formats, so internal drm
5893 * check will succeed, and let DC implement proper check
5895 static const uint32_t rgb_formats[] = {
5896 DRM_FORMAT_XRGB8888,
5897 DRM_FORMAT_ARGB8888,
5898 DRM_FORMAT_RGBA8888,
5899 DRM_FORMAT_XRGB2101010,
5900 DRM_FORMAT_XBGR2101010,
5901 DRM_FORMAT_ARGB2101010,
5902 DRM_FORMAT_ABGR2101010,
5903 DRM_FORMAT_XBGR8888,
5904 DRM_FORMAT_ABGR8888,
5908 static const uint32_t overlay_formats[] = {
5909 DRM_FORMAT_XRGB8888,
5910 DRM_FORMAT_ARGB8888,
5911 DRM_FORMAT_RGBA8888,
5912 DRM_FORMAT_XBGR8888,
5913 DRM_FORMAT_ABGR8888,
5917 static const u32 cursor_formats[] = {
5921 static int get_plane_formats(const struct drm_plane *plane,
5922 const struct dc_plane_cap *plane_cap,
5923 uint32_t *formats, int max_formats)
5925 int i, num_formats = 0;
5928 * TODO: Query support for each group of formats directly from
5929 * DC plane caps. This will require adding more formats to the
5933 switch (plane->type) {
5934 case DRM_PLANE_TYPE_PRIMARY:
5935 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5936 if (num_formats >= max_formats)
5939 formats[num_formats++] = rgb_formats[i];
5942 if (plane_cap && plane_cap->pixel_format_support.nv12)
5943 formats[num_formats++] = DRM_FORMAT_NV12;
5944 if (plane_cap && plane_cap->pixel_format_support.p010)
5945 formats[num_formats++] = DRM_FORMAT_P010;
5946 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5947 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5948 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5949 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5950 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5954 case DRM_PLANE_TYPE_OVERLAY:
5955 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5956 if (num_formats >= max_formats)
5959 formats[num_formats++] = overlay_formats[i];
5963 case DRM_PLANE_TYPE_CURSOR:
5964 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5965 if (num_formats >= max_formats)
5968 formats[num_formats++] = cursor_formats[i];
5976 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5977 struct drm_plane *plane,
5978 unsigned long possible_crtcs,
5979 const struct dc_plane_cap *plane_cap)
5981 uint32_t formats[32];
5984 unsigned int supported_rotations;
5986 num_formats = get_plane_formats(plane, plane_cap, formats,
5987 ARRAY_SIZE(formats));
5989 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
5990 &dm_plane_funcs, formats, num_formats,
5991 NULL, plane->type, NULL);
5995 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5996 plane_cap && plane_cap->per_pixel_alpha) {
5997 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5998 BIT(DRM_MODE_BLEND_PREMULTI);
6000 drm_plane_create_alpha_property(plane);
6001 drm_plane_create_blend_mode_property(plane, blend_caps);
6004 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6006 (plane_cap->pixel_format_support.nv12 ||
6007 plane_cap->pixel_format_support.p010)) {
6008 /* This only affects YUV formats. */
6009 drm_plane_create_color_properties(
6011 BIT(DRM_COLOR_YCBCR_BT601) |
6012 BIT(DRM_COLOR_YCBCR_BT709) |
6013 BIT(DRM_COLOR_YCBCR_BT2020),
6014 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6015 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6016 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6019 supported_rotations =
6020 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6021 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6023 if (dm->adev->asic_type >= CHIP_BONAIRE)
6024 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6025 supported_rotations);
6027 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6029 /* Create (reset) the plane state */
6030 if (plane->funcs->reset)
6031 plane->funcs->reset(plane);
6036 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6037 struct drm_plane *plane,
6038 uint32_t crtc_index)
6040 struct amdgpu_crtc *acrtc = NULL;
6041 struct drm_plane *cursor_plane;
6045 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6049 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6050 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6052 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6056 res = drm_crtc_init_with_planes(
6061 &amdgpu_dm_crtc_funcs, NULL);
6066 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6068 /* Create (reset) the plane state */
6069 if (acrtc->base.funcs->reset)
6070 acrtc->base.funcs->reset(&acrtc->base);
6072 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6073 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6075 acrtc->crtc_id = crtc_index;
6076 acrtc->base.enabled = false;
6077 acrtc->otg_inst = -1;
6079 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6080 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6081 true, MAX_COLOR_LUT_ENTRIES);
6082 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6088 kfree(cursor_plane);
6093 static int to_drm_connector_type(enum signal_type st)
6096 case SIGNAL_TYPE_HDMI_TYPE_A:
6097 return DRM_MODE_CONNECTOR_HDMIA;
6098 case SIGNAL_TYPE_EDP:
6099 return DRM_MODE_CONNECTOR_eDP;
6100 case SIGNAL_TYPE_LVDS:
6101 return DRM_MODE_CONNECTOR_LVDS;
6102 case SIGNAL_TYPE_RGB:
6103 return DRM_MODE_CONNECTOR_VGA;
6104 case SIGNAL_TYPE_DISPLAY_PORT:
6105 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6106 return DRM_MODE_CONNECTOR_DisplayPort;
6107 case SIGNAL_TYPE_DVI_DUAL_LINK:
6108 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6109 return DRM_MODE_CONNECTOR_DVID;
6110 case SIGNAL_TYPE_VIRTUAL:
6111 return DRM_MODE_CONNECTOR_VIRTUAL;
6114 return DRM_MODE_CONNECTOR_Unknown;
6118 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6120 struct drm_encoder *encoder;
6122 /* There is only one encoder per connector */
6123 drm_connector_for_each_possible_encoder(connector, encoder)
6129 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6131 struct drm_encoder *encoder;
6132 struct amdgpu_encoder *amdgpu_encoder;
6134 encoder = amdgpu_dm_connector_to_encoder(connector);
6136 if (encoder == NULL)
6139 amdgpu_encoder = to_amdgpu_encoder(encoder);
6141 amdgpu_encoder->native_mode.clock = 0;
6143 if (!list_empty(&connector->probed_modes)) {
6144 struct drm_display_mode *preferred_mode = NULL;
6146 list_for_each_entry(preferred_mode,
6147 &connector->probed_modes,
6149 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6150 amdgpu_encoder->native_mode = *preferred_mode;
6158 static struct drm_display_mode *
6159 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6161 int hdisplay, int vdisplay)
6163 struct drm_device *dev = encoder->dev;
6164 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6165 struct drm_display_mode *mode = NULL;
6166 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6168 mode = drm_mode_duplicate(dev, native_mode);
6173 mode->hdisplay = hdisplay;
6174 mode->vdisplay = vdisplay;
6175 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6176 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6182 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6183 struct drm_connector *connector)
6185 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6186 struct drm_display_mode *mode = NULL;
6187 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6188 struct amdgpu_dm_connector *amdgpu_dm_connector =
6189 to_amdgpu_dm_connector(connector);
6193 char name[DRM_DISPLAY_MODE_LEN];
6196 } common_modes[] = {
6197 { "640x480", 640, 480},
6198 { "800x600", 800, 600},
6199 { "1024x768", 1024, 768},
6200 { "1280x720", 1280, 720},
6201 { "1280x800", 1280, 800},
6202 {"1280x1024", 1280, 1024},
6203 { "1440x900", 1440, 900},
6204 {"1680x1050", 1680, 1050},
6205 {"1600x1200", 1600, 1200},
6206 {"1920x1080", 1920, 1080},
6207 {"1920x1200", 1920, 1200}
6210 n = ARRAY_SIZE(common_modes);
6212 for (i = 0; i < n; i++) {
6213 struct drm_display_mode *curmode = NULL;
6214 bool mode_existed = false;
6216 if (common_modes[i].w > native_mode->hdisplay ||
6217 common_modes[i].h > native_mode->vdisplay ||
6218 (common_modes[i].w == native_mode->hdisplay &&
6219 common_modes[i].h == native_mode->vdisplay))
6222 list_for_each_entry(curmode, &connector->probed_modes, head) {
6223 if (common_modes[i].w == curmode->hdisplay &&
6224 common_modes[i].h == curmode->vdisplay) {
6225 mode_existed = true;
6233 mode = amdgpu_dm_create_common_mode(encoder,
6234 common_modes[i].name, common_modes[i].w,
6236 drm_mode_probed_add(connector, mode);
6237 amdgpu_dm_connector->num_modes++;
6241 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6244 struct amdgpu_dm_connector *amdgpu_dm_connector =
6245 to_amdgpu_dm_connector(connector);
6248 /* empty probed_modes */
6249 INIT_LIST_HEAD(&connector->probed_modes);
6250 amdgpu_dm_connector->num_modes =
6251 drm_add_edid_modes(connector, edid);
6253 /* sorting the probed modes before calling function
6254 * amdgpu_dm_get_native_mode() since EDID can have
6255 * more than one preferred mode. The modes that are
6256 * later in the probed mode list could be of higher
6257 * and preferred resolution. For example, 3840x2160
6258 * resolution in base EDID preferred timing and 4096x2160
6259 * preferred resolution in DID extension block later.
6261 drm_mode_sort(&connector->probed_modes);
6262 amdgpu_dm_get_native_mode(connector);
6264 amdgpu_dm_connector->num_modes = 0;
6268 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6270 struct amdgpu_dm_connector *amdgpu_dm_connector =
6271 to_amdgpu_dm_connector(connector);
6272 struct drm_encoder *encoder;
6273 struct edid *edid = amdgpu_dm_connector->edid;
6275 encoder = amdgpu_dm_connector_to_encoder(connector);
6277 if (!edid || !drm_edid_is_valid(edid)) {
6278 amdgpu_dm_connector->num_modes =
6279 drm_add_modes_noedid(connector, 640, 480);
6281 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6282 amdgpu_dm_connector_add_common_modes(encoder, connector);
6284 amdgpu_dm_fbc_init(connector);
6286 return amdgpu_dm_connector->num_modes;
6289 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6290 struct amdgpu_dm_connector *aconnector,
6292 struct dc_link *link,
6295 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6298 * Some of the properties below require access to state, like bpc.
6299 * Allocate some default initial connector state with our reset helper.
6301 if (aconnector->base.funcs->reset)
6302 aconnector->base.funcs->reset(&aconnector->base);
6304 aconnector->connector_id = link_index;
6305 aconnector->dc_link = link;
6306 aconnector->base.interlace_allowed = false;
6307 aconnector->base.doublescan_allowed = false;
6308 aconnector->base.stereo_allowed = false;
6309 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6310 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6311 aconnector->audio_inst = -1;
6312 mutex_init(&aconnector->hpd_lock);
6315 * configure support HPD hot plug connector_>polled default value is 0
6316 * which means HPD hot plug not supported
6318 switch (connector_type) {
6319 case DRM_MODE_CONNECTOR_HDMIA:
6320 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6321 aconnector->base.ycbcr_420_allowed =
6322 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6324 case DRM_MODE_CONNECTOR_DisplayPort:
6325 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6326 aconnector->base.ycbcr_420_allowed =
6327 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6329 case DRM_MODE_CONNECTOR_DVID:
6330 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6336 drm_object_attach_property(&aconnector->base.base,
6337 dm->ddev->mode_config.scaling_mode_property,
6338 DRM_MODE_SCALE_NONE);
6340 drm_object_attach_property(&aconnector->base.base,
6341 adev->mode_info.underscan_property,
6343 drm_object_attach_property(&aconnector->base.base,
6344 adev->mode_info.underscan_hborder_property,
6346 drm_object_attach_property(&aconnector->base.base,
6347 adev->mode_info.underscan_vborder_property,
6350 if (!aconnector->mst_port)
6351 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6353 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6354 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6355 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6357 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6358 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6359 drm_object_attach_property(&aconnector->base.base,
6360 adev->mode_info.abm_level_property, 0);
6363 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6364 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6365 connector_type == DRM_MODE_CONNECTOR_eDP) {
6366 drm_object_attach_property(
6367 &aconnector->base.base,
6368 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6370 if (!aconnector->mst_port)
6371 drm_connector_attach_vrr_capable_property(&aconnector->base);
6373 #ifdef CONFIG_DRM_AMD_DC_HDCP
6374 if (adev->dm.hdcp_workqueue)
6375 drm_connector_attach_content_protection_property(&aconnector->base, true);
6380 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6381 struct i2c_msg *msgs, int num)
6383 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6384 struct ddc_service *ddc_service = i2c->ddc_service;
6385 struct i2c_command cmd;
6389 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6394 cmd.number_of_payloads = num;
6395 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6398 for (i = 0; i < num; i++) {
6399 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6400 cmd.payloads[i].address = msgs[i].addr;
6401 cmd.payloads[i].length = msgs[i].len;
6402 cmd.payloads[i].data = msgs[i].buf;
6406 ddc_service->ctx->dc,
6407 ddc_service->ddc_pin->hw_info.ddc_channel,
6411 kfree(cmd.payloads);
6415 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6417 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6420 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6421 .master_xfer = amdgpu_dm_i2c_xfer,
6422 .functionality = amdgpu_dm_i2c_func,
6425 static struct amdgpu_i2c_adapter *
6426 create_i2c(struct ddc_service *ddc_service,
6430 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6431 struct amdgpu_i2c_adapter *i2c;
6433 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6436 i2c->base.owner = THIS_MODULE;
6437 i2c->base.class = I2C_CLASS_DDC;
6438 i2c->base.dev.parent = &adev->pdev->dev;
6439 i2c->base.algo = &amdgpu_dm_i2c_algo;
6440 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6441 i2c_set_adapdata(&i2c->base, i2c);
6442 i2c->ddc_service = ddc_service;
6443 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6450 * Note: this function assumes that dc_link_detect() was called for the
6451 * dc_link which will be represented by this aconnector.
6453 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6454 struct amdgpu_dm_connector *aconnector,
6455 uint32_t link_index,
6456 struct amdgpu_encoder *aencoder)
6460 struct dc *dc = dm->dc;
6461 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6462 struct amdgpu_i2c_adapter *i2c;
6464 link->priv = aconnector;
6466 DRM_DEBUG_DRIVER("%s()\n", __func__);
6468 i2c = create_i2c(link->ddc, link->link_index, &res);
6470 DRM_ERROR("Failed to create i2c adapter data\n");
6474 aconnector->i2c = i2c;
6475 res = i2c_add_adapter(&i2c->base);
6478 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6482 connector_type = to_drm_connector_type(link->connector_signal);
6484 res = drm_connector_init_with_ddc(
6487 &amdgpu_dm_connector_funcs,
6492 DRM_ERROR("connector_init failed\n");
6493 aconnector->connector_id = -1;
6497 drm_connector_helper_add(
6499 &amdgpu_dm_connector_helper_funcs);
6501 amdgpu_dm_connector_init_helper(
6508 drm_connector_attach_encoder(
6509 &aconnector->base, &aencoder->base);
6511 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6512 || connector_type == DRM_MODE_CONNECTOR_eDP)
6513 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6518 aconnector->i2c = NULL;
6523 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6525 switch (adev->mode_info.num_crtc) {
6542 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6543 struct amdgpu_encoder *aencoder,
6544 uint32_t link_index)
6546 struct amdgpu_device *adev = drm_to_adev(dev);
6548 int res = drm_encoder_init(dev,
6550 &amdgpu_dm_encoder_funcs,
6551 DRM_MODE_ENCODER_TMDS,
6554 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6557 aencoder->encoder_id = link_index;
6559 aencoder->encoder_id = -1;
6561 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6566 static void manage_dm_interrupts(struct amdgpu_device *adev,
6567 struct amdgpu_crtc *acrtc,
6571 * We have no guarantee that the frontend index maps to the same
6572 * backend index - some even map to more than one.
6574 * TODO: Use a different interrupt or check DC itself for the mapping.
6577 amdgpu_display_crtc_idx_to_irq_type(
6582 drm_crtc_vblank_on(&acrtc->base);
6585 &adev->pageflip_irq,
6591 &adev->pageflip_irq,
6593 drm_crtc_vblank_off(&acrtc->base);
6597 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6598 struct amdgpu_crtc *acrtc)
6601 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6604 * This reads the current state for the IRQ and force reapplies
6605 * the setting to hardware.
6607 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6611 is_scaling_state_different(const struct dm_connector_state *dm_state,
6612 const struct dm_connector_state *old_dm_state)
6614 if (dm_state->scaling != old_dm_state->scaling)
6616 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6617 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6619 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6620 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6622 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6623 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6628 #ifdef CONFIG_DRM_AMD_DC_HDCP
6629 static bool is_content_protection_different(struct drm_connector_state *state,
6630 const struct drm_connector_state *old_state,
6631 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6633 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6635 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6636 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6637 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6641 /* CP is being re enabled, ignore this */
6642 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6643 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6644 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6648 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6649 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6650 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6651 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6653 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6654 * hot-plug, headless s3, dpms
6656 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6657 aconnector->dc_sink != NULL)
6660 if (old_state->content_protection == state->content_protection)
6663 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6670 static void remove_stream(struct amdgpu_device *adev,
6671 struct amdgpu_crtc *acrtc,
6672 struct dc_stream_state *stream)
6674 /* this is the update mode case */
6676 acrtc->otg_inst = -1;
6677 acrtc->enabled = false;
6680 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6681 struct dc_cursor_position *position)
6683 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6685 int xorigin = 0, yorigin = 0;
6687 position->enable = false;
6691 if (!crtc || !plane->state->fb)
6694 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6695 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6696 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6698 plane->state->crtc_w,
6699 plane->state->crtc_h);
6703 x = plane->state->crtc_x;
6704 y = plane->state->crtc_y;
6706 if (x <= -amdgpu_crtc->max_cursor_width ||
6707 y <= -amdgpu_crtc->max_cursor_height)
6711 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6715 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6718 position->enable = true;
6719 position->translate_by_source = true;
6722 position->x_hotspot = xorigin;
6723 position->y_hotspot = yorigin;
6728 static void handle_cursor_update(struct drm_plane *plane,
6729 struct drm_plane_state *old_plane_state)
6731 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6732 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6733 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6734 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6735 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6736 uint64_t address = afb ? afb->address : 0;
6737 struct dc_cursor_position position;
6738 struct dc_cursor_attributes attributes;
6741 if (!plane->state->fb && !old_plane_state->fb)
6744 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6746 amdgpu_crtc->crtc_id,
6747 plane->state->crtc_w,
6748 plane->state->crtc_h);
6750 ret = get_cursor_position(plane, crtc, &position);
6754 if (!position.enable) {
6755 /* turn off cursor */
6756 if (crtc_state && crtc_state->stream) {
6757 mutex_lock(&adev->dm.dc_lock);
6758 dc_stream_set_cursor_position(crtc_state->stream,
6760 mutex_unlock(&adev->dm.dc_lock);
6765 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6766 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6768 memset(&attributes, 0, sizeof(attributes));
6769 attributes.address.high_part = upper_32_bits(address);
6770 attributes.address.low_part = lower_32_bits(address);
6771 attributes.width = plane->state->crtc_w;
6772 attributes.height = plane->state->crtc_h;
6773 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6774 attributes.rotation_angle = 0;
6775 attributes.attribute_flags.value = 0;
6777 attributes.pitch = attributes.width;
6779 if (crtc_state->stream) {
6780 mutex_lock(&adev->dm.dc_lock);
6781 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6783 DRM_ERROR("DC failed to set cursor attributes\n");
6785 if (!dc_stream_set_cursor_position(crtc_state->stream,
6787 DRM_ERROR("DC failed to set cursor position\n");
6788 mutex_unlock(&adev->dm.dc_lock);
6792 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6795 assert_spin_locked(&acrtc->base.dev->event_lock);
6796 WARN_ON(acrtc->event);
6798 acrtc->event = acrtc->base.state->event;
6800 /* Set the flip status */
6801 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6803 /* Mark this event as consumed */
6804 acrtc->base.state->event = NULL;
6806 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6810 static void update_freesync_state_on_stream(
6811 struct amdgpu_display_manager *dm,
6812 struct dm_crtc_state *new_crtc_state,
6813 struct dc_stream_state *new_stream,
6814 struct dc_plane_state *surface,
6815 u32 flip_timestamp_in_us)
6817 struct mod_vrr_params vrr_params;
6818 struct dc_info_packet vrr_infopacket = {0};
6819 struct amdgpu_device *adev = dm->adev;
6820 unsigned long flags;
6826 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6827 * For now it's sufficient to just guard against these conditions.
6830 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6833 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6834 vrr_params = new_crtc_state->vrr_params;
6837 mod_freesync_handle_preflip(
6838 dm->freesync_module,
6841 flip_timestamp_in_us,
6844 if (adev->family < AMDGPU_FAMILY_AI &&
6845 amdgpu_dm_vrr_active(new_crtc_state)) {
6846 mod_freesync_handle_v_update(dm->freesync_module,
6847 new_stream, &vrr_params);
6849 /* Need to call this before the frame ends. */
6850 dc_stream_adjust_vmin_vmax(dm->dc,
6851 new_crtc_state->stream,
6852 &vrr_params.adjust);
6856 mod_freesync_build_vrr_infopacket(
6857 dm->freesync_module,
6861 TRANSFER_FUNC_UNKNOWN,
6864 new_crtc_state->freesync_timing_changed |=
6865 (memcmp(&new_crtc_state->vrr_params.adjust,
6867 sizeof(vrr_params.adjust)) != 0);
6869 new_crtc_state->freesync_vrr_info_changed |=
6870 (memcmp(&new_crtc_state->vrr_infopacket,
6872 sizeof(vrr_infopacket)) != 0);
6874 new_crtc_state->vrr_params = vrr_params;
6875 new_crtc_state->vrr_infopacket = vrr_infopacket;
6877 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6878 new_stream->vrr_infopacket = vrr_infopacket;
6880 if (new_crtc_state->freesync_vrr_info_changed)
6881 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6882 new_crtc_state->base.crtc->base.id,
6883 (int)new_crtc_state->base.vrr_enabled,
6884 (int)vrr_params.state);
6886 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6889 static void pre_update_freesync_state_on_stream(
6890 struct amdgpu_display_manager *dm,
6891 struct dm_crtc_state *new_crtc_state)
6893 struct dc_stream_state *new_stream = new_crtc_state->stream;
6894 struct mod_vrr_params vrr_params;
6895 struct mod_freesync_config config = new_crtc_state->freesync_config;
6896 struct amdgpu_device *adev = dm->adev;
6897 unsigned long flags;
6903 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6904 * For now it's sufficient to just guard against these conditions.
6906 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6909 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6910 vrr_params = new_crtc_state->vrr_params;
6912 if (new_crtc_state->vrr_supported &&
6913 config.min_refresh_in_uhz &&
6914 config.max_refresh_in_uhz) {
6915 config.state = new_crtc_state->base.vrr_enabled ?
6916 VRR_STATE_ACTIVE_VARIABLE :
6919 config.state = VRR_STATE_UNSUPPORTED;
6922 mod_freesync_build_vrr_params(dm->freesync_module,
6924 &config, &vrr_params);
6926 new_crtc_state->freesync_timing_changed |=
6927 (memcmp(&new_crtc_state->vrr_params.adjust,
6929 sizeof(vrr_params.adjust)) != 0);
6931 new_crtc_state->vrr_params = vrr_params;
6932 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6935 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6936 struct dm_crtc_state *new_state)
6938 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6939 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6941 if (!old_vrr_active && new_vrr_active) {
6942 /* Transition VRR inactive -> active:
6943 * While VRR is active, we must not disable vblank irq, as a
6944 * reenable after disable would compute bogus vblank/pflip
6945 * timestamps if it likely happened inside display front-porch.
6947 * We also need vupdate irq for the actual core vblank handling
6950 dm_set_vupdate_irq(new_state->base.crtc, true);
6951 drm_crtc_vblank_get(new_state->base.crtc);
6952 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6953 __func__, new_state->base.crtc->base.id);
6954 } else if (old_vrr_active && !new_vrr_active) {
6955 /* Transition VRR active -> inactive:
6956 * Allow vblank irq disable again for fixed refresh rate.
6958 dm_set_vupdate_irq(new_state->base.crtc, false);
6959 drm_crtc_vblank_put(new_state->base.crtc);
6960 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6961 __func__, new_state->base.crtc->base.id);
6965 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6967 struct drm_plane *plane;
6968 struct drm_plane_state *old_plane_state, *new_plane_state;
6972 * TODO: Make this per-stream so we don't issue redundant updates for
6973 * commits with multiple streams.
6975 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6977 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6978 handle_cursor_update(plane, old_plane_state);
6981 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6982 struct dc_state *dc_state,
6983 struct drm_device *dev,
6984 struct amdgpu_display_manager *dm,
6985 struct drm_crtc *pcrtc,
6986 bool wait_for_vblank)
6989 uint64_t timestamp_ns;
6990 struct drm_plane *plane;
6991 struct drm_plane_state *old_plane_state, *new_plane_state;
6992 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6993 struct drm_crtc_state *new_pcrtc_state =
6994 drm_atomic_get_new_crtc_state(state, pcrtc);
6995 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6996 struct dm_crtc_state *dm_old_crtc_state =
6997 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6998 int planes_count = 0, vpos, hpos;
7000 unsigned long flags;
7001 struct amdgpu_bo *abo;
7002 uint32_t target_vblank, last_flip_vblank;
7003 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7004 bool pflip_present = false;
7006 struct dc_surface_update surface_updates[MAX_SURFACES];
7007 struct dc_plane_info plane_infos[MAX_SURFACES];
7008 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7009 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7010 struct dc_stream_update stream_update;
7013 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7016 dm_error("Failed to allocate update bundle\n");
7021 * Disable the cursor first if we're disabling all the planes.
7022 * It'll remain on the screen after the planes are re-enabled
7025 if (acrtc_state->active_planes == 0)
7026 amdgpu_dm_commit_cursors(state);
7028 /* update planes when needed */
7029 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7030 struct drm_crtc *crtc = new_plane_state->crtc;
7031 struct drm_crtc_state *new_crtc_state;
7032 struct drm_framebuffer *fb = new_plane_state->fb;
7033 bool plane_needs_flip;
7034 struct dc_plane_state *dc_plane;
7035 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7037 /* Cursor plane is handled after stream updates */
7038 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7041 if (!fb || !crtc || pcrtc != crtc)
7044 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7045 if (!new_crtc_state->active)
7048 dc_plane = dm_new_plane_state->dc_state;
7050 bundle->surface_updates[planes_count].surface = dc_plane;
7051 if (new_pcrtc_state->color_mgmt_changed) {
7052 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7053 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7054 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7057 fill_dc_scaling_info(new_plane_state,
7058 &bundle->scaling_infos[planes_count]);
7060 bundle->surface_updates[planes_count].scaling_info =
7061 &bundle->scaling_infos[planes_count];
7063 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7065 pflip_present = pflip_present || plane_needs_flip;
7067 if (!plane_needs_flip) {
7072 abo = gem_to_amdgpu_bo(fb->obj[0]);
7075 * Wait for all fences on this FB. Do limited wait to avoid
7076 * deadlock during GPU reset when this fence will not signal
7077 * but we hold reservation lock for the BO.
7079 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7081 msecs_to_jiffies(5000));
7082 if (unlikely(r <= 0))
7083 DRM_ERROR("Waiting for fences timed out!");
7085 fill_dc_plane_info_and_addr(
7086 dm->adev, new_plane_state,
7087 dm_new_plane_state->tiling_flags,
7088 &bundle->plane_infos[planes_count],
7089 &bundle->flip_addrs[planes_count].address,
7090 dm_new_plane_state->tmz_surface, false);
7092 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7093 new_plane_state->plane->index,
7094 bundle->plane_infos[planes_count].dcc.enable);
7096 bundle->surface_updates[planes_count].plane_info =
7097 &bundle->plane_infos[planes_count];
7100 * Only allow immediate flips for fast updates that don't
7101 * change FB pitch, DCC state, rotation or mirroing.
7103 bundle->flip_addrs[planes_count].flip_immediate =
7104 crtc->state->async_flip &&
7105 acrtc_state->update_type == UPDATE_TYPE_FAST;
7107 timestamp_ns = ktime_get_ns();
7108 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7109 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7110 bundle->surface_updates[planes_count].surface = dc_plane;
7112 if (!bundle->surface_updates[planes_count].surface) {
7113 DRM_ERROR("No surface for CRTC: id=%d\n",
7114 acrtc_attach->crtc_id);
7118 if (plane == pcrtc->primary)
7119 update_freesync_state_on_stream(
7122 acrtc_state->stream,
7124 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7126 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7128 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7129 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7135 if (pflip_present) {
7137 /* Use old throttling in non-vrr fixed refresh rate mode
7138 * to keep flip scheduling based on target vblank counts
7139 * working in a backwards compatible way, e.g., for
7140 * clients using the GLX_OML_sync_control extension or
7141 * DRI3/Present extension with defined target_msc.
7143 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7146 /* For variable refresh rate mode only:
7147 * Get vblank of last completed flip to avoid > 1 vrr
7148 * flips per video frame by use of throttling, but allow
7149 * flip programming anywhere in the possibly large
7150 * variable vrr vblank interval for fine-grained flip
7151 * timing control and more opportunity to avoid stutter
7152 * on late submission of flips.
7154 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7155 last_flip_vblank = acrtc_attach->last_flip_vblank;
7156 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7159 target_vblank = last_flip_vblank + wait_for_vblank;
7162 * Wait until we're out of the vertical blank period before the one
7163 * targeted by the flip
7165 while ((acrtc_attach->enabled &&
7166 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7167 0, &vpos, &hpos, NULL,
7168 NULL, &pcrtc->hwmode)
7169 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7170 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7171 (int)(target_vblank -
7172 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7173 usleep_range(1000, 1100);
7177 * Prepare the flip event for the pageflip interrupt to handle.
7179 * This only works in the case where we've already turned on the
7180 * appropriate hardware blocks (eg. HUBP) so in the transition case
7181 * from 0 -> n planes we have to skip a hardware generated event
7182 * and rely on sending it from software.
7184 if (acrtc_attach->base.state->event &&
7185 acrtc_state->active_planes > 0) {
7186 drm_crtc_vblank_get(pcrtc);
7188 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7190 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7191 prepare_flip_isr(acrtc_attach);
7193 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7196 if (acrtc_state->stream) {
7197 if (acrtc_state->freesync_vrr_info_changed)
7198 bundle->stream_update.vrr_infopacket =
7199 &acrtc_state->stream->vrr_infopacket;
7203 /* Update the planes if changed or disable if we don't have any. */
7204 if ((planes_count || acrtc_state->active_planes == 0) &&
7205 acrtc_state->stream) {
7206 bundle->stream_update.stream = acrtc_state->stream;
7207 if (new_pcrtc_state->mode_changed) {
7208 bundle->stream_update.src = acrtc_state->stream->src;
7209 bundle->stream_update.dst = acrtc_state->stream->dst;
7212 if (new_pcrtc_state->color_mgmt_changed) {
7214 * TODO: This isn't fully correct since we've actually
7215 * already modified the stream in place.
7217 bundle->stream_update.gamut_remap =
7218 &acrtc_state->stream->gamut_remap_matrix;
7219 bundle->stream_update.output_csc_transform =
7220 &acrtc_state->stream->csc_color_matrix;
7221 bundle->stream_update.out_transfer_func =
7222 acrtc_state->stream->out_transfer_func;
7225 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7226 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7227 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7230 * If FreeSync state on the stream has changed then we need to
7231 * re-adjust the min/max bounds now that DC doesn't handle this
7232 * as part of commit.
7234 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7235 amdgpu_dm_vrr_active(acrtc_state)) {
7236 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7237 dc_stream_adjust_vmin_vmax(
7238 dm->dc, acrtc_state->stream,
7239 &acrtc_state->vrr_params.adjust);
7240 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7242 mutex_lock(&dm->dc_lock);
7243 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7244 acrtc_state->stream->link->psr_settings.psr_allow_active)
7245 amdgpu_dm_psr_disable(acrtc_state->stream);
7247 dc_commit_updates_for_stream(dm->dc,
7248 bundle->surface_updates,
7250 acrtc_state->stream,
7251 &bundle->stream_update,
7255 * Enable or disable the interrupts on the backend.
7257 * Most pipes are put into power gating when unused.
7259 * When power gating is enabled on a pipe we lose the
7260 * interrupt enablement state when power gating is disabled.
7262 * So we need to update the IRQ control state in hardware
7263 * whenever the pipe turns on (since it could be previously
7264 * power gated) or off (since some pipes can't be power gated
7267 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7268 dm_update_pflip_irq_state(drm_to_adev(dev),
7271 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7272 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7273 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7274 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7275 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7276 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7277 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7278 amdgpu_dm_psr_enable(acrtc_state->stream);
7281 mutex_unlock(&dm->dc_lock);
7285 * Update cursor state *after* programming all the planes.
7286 * This avoids redundant programming in the case where we're going
7287 * to be disabling a single plane - those pipes are being disabled.
7289 if (acrtc_state->active_planes)
7290 amdgpu_dm_commit_cursors(state);
7296 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7297 struct drm_atomic_state *state)
7299 struct amdgpu_device *adev = drm_to_adev(dev);
7300 struct amdgpu_dm_connector *aconnector;
7301 struct drm_connector *connector;
7302 struct drm_connector_state *old_con_state, *new_con_state;
7303 struct drm_crtc_state *new_crtc_state;
7304 struct dm_crtc_state *new_dm_crtc_state;
7305 const struct dc_stream_status *status;
7308 /* Notify device removals. */
7309 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7310 if (old_con_state->crtc != new_con_state->crtc) {
7311 /* CRTC changes require notification. */
7315 if (!new_con_state->crtc)
7318 new_crtc_state = drm_atomic_get_new_crtc_state(
7319 state, new_con_state->crtc);
7321 if (!new_crtc_state)
7324 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7328 aconnector = to_amdgpu_dm_connector(connector);
7330 mutex_lock(&adev->dm.audio_lock);
7331 inst = aconnector->audio_inst;
7332 aconnector->audio_inst = -1;
7333 mutex_unlock(&adev->dm.audio_lock);
7335 amdgpu_dm_audio_eld_notify(adev, inst);
7338 /* Notify audio device additions. */
7339 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7340 if (!new_con_state->crtc)
7343 new_crtc_state = drm_atomic_get_new_crtc_state(
7344 state, new_con_state->crtc);
7346 if (!new_crtc_state)
7349 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7352 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7353 if (!new_dm_crtc_state->stream)
7356 status = dc_stream_get_status(new_dm_crtc_state->stream);
7360 aconnector = to_amdgpu_dm_connector(connector);
7362 mutex_lock(&adev->dm.audio_lock);
7363 inst = status->audio_inst;
7364 aconnector->audio_inst = inst;
7365 mutex_unlock(&adev->dm.audio_lock);
7367 amdgpu_dm_audio_eld_notify(adev, inst);
7372 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7373 * @crtc_state: the DRM CRTC state
7374 * @stream_state: the DC stream state.
7376 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7377 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7379 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7380 struct dc_stream_state *stream_state)
7382 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7385 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7386 struct drm_atomic_state *state,
7389 struct drm_crtc *crtc;
7390 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7391 struct amdgpu_device *adev = drm_to_adev(dev);
7395 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7396 * a modeset, being disabled, or have no active planes.
7398 * It's done in atomic commit rather than commit tail for now since
7399 * some of these interrupt handlers access the current CRTC state and
7400 * potentially the stream pointer itself.
7402 * Since the atomic state is swapped within atomic commit and not within
7403 * commit tail this would leave to new state (that hasn't been committed yet)
7404 * being accesssed from within the handlers.
7406 * TODO: Fix this so we can do this in commit tail and not have to block
7409 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7410 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7412 if (old_crtc_state->active &&
7413 (!new_crtc_state->active ||
7414 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7415 manage_dm_interrupts(adev, acrtc, false);
7418 * Add check here for SoC's that support hardware cursor plane, to
7419 * unset legacy_cursor_update
7422 return drm_atomic_helper_commit(dev, state, nonblock);
7424 /*TODO Handle EINTR, reenable IRQ*/
7428 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7429 * @state: The atomic state to commit
7431 * This will tell DC to commit the constructed DC state from atomic_check,
7432 * programming the hardware. Any failures here implies a hardware failure, since
7433 * atomic check should have filtered anything non-kosher.
7435 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7437 struct drm_device *dev = state->dev;
7438 struct amdgpu_device *adev = drm_to_adev(dev);
7439 struct amdgpu_display_manager *dm = &adev->dm;
7440 struct dm_atomic_state *dm_state;
7441 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7443 struct drm_crtc *crtc;
7444 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7445 unsigned long flags;
7446 bool wait_for_vblank = true;
7447 struct drm_connector *connector;
7448 struct drm_connector_state *old_con_state, *new_con_state;
7449 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7450 int crtc_disable_count = 0;
7451 bool mode_set_reset_required = false;
7453 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7455 dm_state = dm_atomic_get_new_state(state);
7456 if (dm_state && dm_state->context) {
7457 dc_state = dm_state->context;
7459 /* No state changes, retain current state. */
7460 dc_state_temp = dc_create_state(dm->dc);
7461 ASSERT(dc_state_temp);
7462 dc_state = dc_state_temp;
7463 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7466 /* update changed items */
7467 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7468 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7470 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7471 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7474 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7475 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7476 "connectors_changed:%d\n",
7478 new_crtc_state->enable,
7479 new_crtc_state->active,
7480 new_crtc_state->planes_changed,
7481 new_crtc_state->mode_changed,
7482 new_crtc_state->active_changed,
7483 new_crtc_state->connectors_changed);
7485 /* Copy all transient state flags into dc state */
7486 if (dm_new_crtc_state->stream) {
7487 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7488 dm_new_crtc_state->stream);
7491 /* handles headless hotplug case, updating new_state and
7492 * aconnector as needed
7495 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7497 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7499 if (!dm_new_crtc_state->stream) {
7501 * this could happen because of issues with
7502 * userspace notifications delivery.
7503 * In this case userspace tries to set mode on
7504 * display which is disconnected in fact.
7505 * dc_sink is NULL in this case on aconnector.
7506 * We expect reset mode will come soon.
7508 * This can also happen when unplug is done
7509 * during resume sequence ended
7511 * In this case, we want to pretend we still
7512 * have a sink to keep the pipe running so that
7513 * hw state is consistent with the sw state
7515 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7516 __func__, acrtc->base.base.id);
7520 if (dm_old_crtc_state->stream)
7521 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7523 pm_runtime_get_noresume(dev->dev);
7525 acrtc->enabled = true;
7526 acrtc->hw_mode = new_crtc_state->mode;
7527 crtc->hwmode = new_crtc_state->mode;
7528 mode_set_reset_required = true;
7529 } else if (modereset_required(new_crtc_state)) {
7530 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7531 /* i.e. reset mode */
7532 if (dm_old_crtc_state->stream)
7533 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7534 mode_set_reset_required = true;
7536 } /* for_each_crtc_in_state() */
7539 /* if there mode set or reset, disable eDP PSR */
7540 if (mode_set_reset_required)
7541 amdgpu_dm_psr_disable_all(dm);
7543 dm_enable_per_frame_crtc_master_sync(dc_state);
7544 mutex_lock(&dm->dc_lock);
7545 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7546 mutex_unlock(&dm->dc_lock);
7549 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7550 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7552 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7554 if (dm_new_crtc_state->stream != NULL) {
7555 const struct dc_stream_status *status =
7556 dc_stream_get_status(dm_new_crtc_state->stream);
7559 status = dc_stream_get_status_from_state(dc_state,
7560 dm_new_crtc_state->stream);
7563 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7565 acrtc->otg_inst = status->primary_otg_inst;
7568 #ifdef CONFIG_DRM_AMD_DC_HDCP
7569 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7570 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7571 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7572 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7574 new_crtc_state = NULL;
7577 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7579 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7581 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7582 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7583 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7584 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7588 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7589 hdcp_update_display(
7590 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7591 new_con_state->hdcp_content_type,
7592 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7597 /* Handle connector state changes */
7598 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7599 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7600 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7601 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7602 struct dc_surface_update dummy_updates[MAX_SURFACES];
7603 struct dc_stream_update stream_update;
7604 struct dc_info_packet hdr_packet;
7605 struct dc_stream_status *status = NULL;
7606 bool abm_changed, hdr_changed, scaling_changed;
7608 memset(&dummy_updates, 0, sizeof(dummy_updates));
7609 memset(&stream_update, 0, sizeof(stream_update));
7612 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7613 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7616 /* Skip any modesets/resets */
7617 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7620 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7621 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7623 scaling_changed = is_scaling_state_different(dm_new_con_state,
7626 abm_changed = dm_new_crtc_state->abm_level !=
7627 dm_old_crtc_state->abm_level;
7630 is_hdr_metadata_different(old_con_state, new_con_state);
7632 if (!scaling_changed && !abm_changed && !hdr_changed)
7635 stream_update.stream = dm_new_crtc_state->stream;
7636 if (scaling_changed) {
7637 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7638 dm_new_con_state, dm_new_crtc_state->stream);
7640 stream_update.src = dm_new_crtc_state->stream->src;
7641 stream_update.dst = dm_new_crtc_state->stream->dst;
7645 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7647 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7651 fill_hdr_info_packet(new_con_state, &hdr_packet);
7652 stream_update.hdr_static_metadata = &hdr_packet;
7655 status = dc_stream_get_status(dm_new_crtc_state->stream);
7657 WARN_ON(!status->plane_count);
7660 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7661 * Here we create an empty update on each plane.
7662 * To fix this, DC should permit updating only stream properties.
7664 for (j = 0; j < status->plane_count; j++)
7665 dummy_updates[j].surface = status->plane_states[0];
7668 mutex_lock(&dm->dc_lock);
7669 dc_commit_updates_for_stream(dm->dc,
7671 status->plane_count,
7672 dm_new_crtc_state->stream,
7675 mutex_unlock(&dm->dc_lock);
7678 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7679 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7680 new_crtc_state, i) {
7681 if (old_crtc_state->active && !new_crtc_state->active)
7682 crtc_disable_count++;
7684 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7685 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7687 /* Update freesync active state. */
7688 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7690 /* Handle vrr on->off / off->on transitions */
7691 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7696 * Enable interrupts for CRTCs that are newly enabled or went through
7697 * a modeset. It was intentionally deferred until after the front end
7698 * state was modified to wait until the OTG was on and so the IRQ
7699 * handlers didn't access stale or invalid state.
7701 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7702 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7704 if (new_crtc_state->active &&
7705 (!old_crtc_state->active ||
7706 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7707 manage_dm_interrupts(adev, acrtc, true);
7708 #ifdef CONFIG_DEBUG_FS
7710 * Frontend may have changed so reapply the CRC capture
7711 * settings for the stream.
7713 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7715 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7716 amdgpu_dm_crtc_configure_crc_source(
7717 crtc, dm_new_crtc_state,
7718 dm_new_crtc_state->crc_src);
7724 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7725 if (new_crtc_state->async_flip)
7726 wait_for_vblank = false;
7728 /* update planes when needed per crtc*/
7729 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7730 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7732 if (dm_new_crtc_state->stream)
7733 amdgpu_dm_commit_planes(state, dc_state, dev,
7734 dm, crtc, wait_for_vblank);
7737 /* Update audio instances for each connector. */
7738 amdgpu_dm_commit_audio(dev, state);
7741 * send vblank event on all events not handled in flip and
7742 * mark consumed event for drm_atomic_helper_commit_hw_done
7744 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7745 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7747 if (new_crtc_state->event)
7748 drm_send_event_locked(dev, &new_crtc_state->event->base);
7750 new_crtc_state->event = NULL;
7752 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7754 /* Signal HW programming completion */
7755 drm_atomic_helper_commit_hw_done(state);
7757 if (wait_for_vblank)
7758 drm_atomic_helper_wait_for_flip_done(dev, state);
7760 drm_atomic_helper_cleanup_planes(dev, state);
7763 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7764 * so we can put the GPU into runtime suspend if we're not driving any
7767 for (i = 0; i < crtc_disable_count; i++)
7768 pm_runtime_put_autosuspend(dev->dev);
7769 pm_runtime_mark_last_busy(dev->dev);
7772 dc_release_state(dc_state_temp);
7776 static int dm_force_atomic_commit(struct drm_connector *connector)
7779 struct drm_device *ddev = connector->dev;
7780 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7781 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7782 struct drm_plane *plane = disconnected_acrtc->base.primary;
7783 struct drm_connector_state *conn_state;
7784 struct drm_crtc_state *crtc_state;
7785 struct drm_plane_state *plane_state;
7790 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7792 /* Construct an atomic state to restore previous display setting */
7795 * Attach connectors to drm_atomic_state
7797 conn_state = drm_atomic_get_connector_state(state, connector);
7799 ret = PTR_ERR_OR_ZERO(conn_state);
7803 /* Attach crtc to drm_atomic_state*/
7804 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7806 ret = PTR_ERR_OR_ZERO(crtc_state);
7810 /* force a restore */
7811 crtc_state->mode_changed = true;
7813 /* Attach plane to drm_atomic_state */
7814 plane_state = drm_atomic_get_plane_state(state, plane);
7816 ret = PTR_ERR_OR_ZERO(plane_state);
7821 /* Call commit internally with the state we just constructed */
7822 ret = drm_atomic_commit(state);
7827 DRM_ERROR("Restoring old state failed with %i\n", ret);
7828 drm_atomic_state_put(state);
7834 * This function handles all cases when set mode does not come upon hotplug.
7835 * This includes when a display is unplugged then plugged back into the
7836 * same port and when running without usermode desktop manager supprot
7838 void dm_restore_drm_connector_state(struct drm_device *dev,
7839 struct drm_connector *connector)
7841 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7842 struct amdgpu_crtc *disconnected_acrtc;
7843 struct dm_crtc_state *acrtc_state;
7845 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7848 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7849 if (!disconnected_acrtc)
7852 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7853 if (!acrtc_state->stream)
7857 * If the previous sink is not released and different from the current,
7858 * we deduce we are in a state where we can not rely on usermode call
7859 * to turn on the display, so we do it here
7861 if (acrtc_state->stream->sink != aconnector->dc_sink)
7862 dm_force_atomic_commit(&aconnector->base);
7866 * Grabs all modesetting locks to serialize against any blocking commits,
7867 * Waits for completion of all non blocking commits.
7869 static int do_aquire_global_lock(struct drm_device *dev,
7870 struct drm_atomic_state *state)
7872 struct drm_crtc *crtc;
7873 struct drm_crtc_commit *commit;
7877 * Adding all modeset locks to aquire_ctx will
7878 * ensure that when the framework release it the
7879 * extra locks we are locking here will get released to
7881 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7885 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7886 spin_lock(&crtc->commit_lock);
7887 commit = list_first_entry_or_null(&crtc->commit_list,
7888 struct drm_crtc_commit, commit_entry);
7890 drm_crtc_commit_get(commit);
7891 spin_unlock(&crtc->commit_lock);
7897 * Make sure all pending HW programming completed and
7900 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7903 ret = wait_for_completion_interruptible_timeout(
7904 &commit->flip_done, 10*HZ);
7907 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7908 "timed out\n", crtc->base.id, crtc->name);
7910 drm_crtc_commit_put(commit);
7913 return ret < 0 ? ret : 0;
7916 static void get_freesync_config_for_crtc(
7917 struct dm_crtc_state *new_crtc_state,
7918 struct dm_connector_state *new_con_state)
7920 struct mod_freesync_config config = {0};
7921 struct amdgpu_dm_connector *aconnector =
7922 to_amdgpu_dm_connector(new_con_state->base.connector);
7923 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7924 int vrefresh = drm_mode_vrefresh(mode);
7926 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7927 vrefresh >= aconnector->min_vfreq &&
7928 vrefresh <= aconnector->max_vfreq;
7930 if (new_crtc_state->vrr_supported) {
7931 new_crtc_state->stream->ignore_msa_timing_param = true;
7932 config.state = new_crtc_state->base.vrr_enabled ?
7933 VRR_STATE_ACTIVE_VARIABLE :
7935 config.min_refresh_in_uhz =
7936 aconnector->min_vfreq * 1000000;
7937 config.max_refresh_in_uhz =
7938 aconnector->max_vfreq * 1000000;
7939 config.vsif_supported = true;
7943 new_crtc_state->freesync_config = config;
7946 static void reset_freesync_config_for_crtc(
7947 struct dm_crtc_state *new_crtc_state)
7949 new_crtc_state->vrr_supported = false;
7951 memset(&new_crtc_state->vrr_params, 0,
7952 sizeof(new_crtc_state->vrr_params));
7953 memset(&new_crtc_state->vrr_infopacket, 0,
7954 sizeof(new_crtc_state->vrr_infopacket));
7957 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7958 struct drm_atomic_state *state,
7959 struct drm_crtc *crtc,
7960 struct drm_crtc_state *old_crtc_state,
7961 struct drm_crtc_state *new_crtc_state,
7963 bool *lock_and_validation_needed)
7965 struct dm_atomic_state *dm_state = NULL;
7966 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7967 struct dc_stream_state *new_stream;
7971 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7972 * update changed items
7974 struct amdgpu_crtc *acrtc = NULL;
7975 struct amdgpu_dm_connector *aconnector = NULL;
7976 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7977 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7981 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7982 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7983 acrtc = to_amdgpu_crtc(crtc);
7984 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7986 /* TODO This hack should go away */
7987 if (aconnector && enable) {
7988 /* Make sure fake sink is created in plug-in scenario */
7989 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7991 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7994 if (IS_ERR(drm_new_conn_state)) {
7995 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7999 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8000 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8002 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8005 new_stream = create_validate_stream_for_sink(aconnector,
8006 &new_crtc_state->mode,
8008 dm_old_crtc_state->stream);
8011 * we can have no stream on ACTION_SET if a display
8012 * was disconnected during S3, in this case it is not an
8013 * error, the OS will be updated after detection, and
8014 * will do the right thing on next atomic commit
8018 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8019 __func__, acrtc->base.base.id);
8025 * TODO: Check VSDB bits to decide whether this should
8026 * be enabled or not.
8028 new_stream->triggered_crtc_reset.enabled =
8029 dm->force_timing_sync;
8031 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8033 ret = fill_hdr_info_packet(drm_new_conn_state,
8034 &new_stream->hdr_static_metadata);
8039 * If we already removed the old stream from the context
8040 * (and set the new stream to NULL) then we can't reuse
8041 * the old stream even if the stream and scaling are unchanged.
8042 * We'll hit the BUG_ON and black screen.
8044 * TODO: Refactor this function to allow this check to work
8045 * in all conditions.
8047 if (dm_new_crtc_state->stream &&
8048 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8049 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8050 new_crtc_state->mode_changed = false;
8051 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8052 new_crtc_state->mode_changed);
8056 /* mode_changed flag may get updated above, need to check again */
8057 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8061 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8062 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8063 "connectors_changed:%d\n",
8065 new_crtc_state->enable,
8066 new_crtc_state->active,
8067 new_crtc_state->planes_changed,
8068 new_crtc_state->mode_changed,
8069 new_crtc_state->active_changed,
8070 new_crtc_state->connectors_changed);
8072 /* Remove stream for any changed/disabled CRTC */
8075 if (!dm_old_crtc_state->stream)
8078 ret = dm_atomic_get_state(state, &dm_state);
8082 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8085 /* i.e. reset mode */
8086 if (dc_remove_stream_from_ctx(
8089 dm_old_crtc_state->stream) != DC_OK) {
8094 dc_stream_release(dm_old_crtc_state->stream);
8095 dm_new_crtc_state->stream = NULL;
8097 reset_freesync_config_for_crtc(dm_new_crtc_state);
8099 *lock_and_validation_needed = true;
8101 } else {/* Add stream for any updated/enabled CRTC */
8103 * Quick fix to prevent NULL pointer on new_stream when
8104 * added MST connectors not found in existing crtc_state in the chained mode
8105 * TODO: need to dig out the root cause of that
8107 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8110 if (modereset_required(new_crtc_state))
8113 if (modeset_required(new_crtc_state, new_stream,
8114 dm_old_crtc_state->stream)) {
8116 WARN_ON(dm_new_crtc_state->stream);
8118 ret = dm_atomic_get_state(state, &dm_state);
8122 dm_new_crtc_state->stream = new_stream;
8124 dc_stream_retain(new_stream);
8126 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8129 if (dc_add_stream_to_ctx(
8132 dm_new_crtc_state->stream) != DC_OK) {
8137 *lock_and_validation_needed = true;
8142 /* Release extra reference */
8144 dc_stream_release(new_stream);
8147 * We want to do dc stream updates that do not require a
8148 * full modeset below.
8150 if (!(enable && aconnector && new_crtc_state->active))
8153 * Given above conditions, the dc state cannot be NULL because:
8154 * 1. We're in the process of enabling CRTCs (just been added
8155 * to the dc context, or already is on the context)
8156 * 2. Has a valid connector attached, and
8157 * 3. Is currently active and enabled.
8158 * => The dc stream state currently exists.
8160 BUG_ON(dm_new_crtc_state->stream == NULL);
8162 /* Scaling or underscan settings */
8163 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8164 update_stream_scaling_settings(
8165 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8168 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8171 * Color management settings. We also update color properties
8172 * when a modeset is needed, to ensure it gets reprogrammed.
8174 if (dm_new_crtc_state->base.color_mgmt_changed ||
8175 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8176 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8181 /* Update Freesync settings. */
8182 get_freesync_config_for_crtc(dm_new_crtc_state,
8189 dc_stream_release(new_stream);
8193 static bool should_reset_plane(struct drm_atomic_state *state,
8194 struct drm_plane *plane,
8195 struct drm_plane_state *old_plane_state,
8196 struct drm_plane_state *new_plane_state)
8198 struct drm_plane *other;
8199 struct drm_plane_state *old_other_state, *new_other_state;
8200 struct drm_crtc_state *new_crtc_state;
8204 * TODO: Remove this hack once the checks below are sufficient
8205 * enough to determine when we need to reset all the planes on
8208 if (state->allow_modeset)
8211 /* Exit early if we know that we're adding or removing the plane. */
8212 if (old_plane_state->crtc != new_plane_state->crtc)
8215 /* old crtc == new_crtc == NULL, plane not in context. */
8216 if (!new_plane_state->crtc)
8220 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8222 if (!new_crtc_state)
8225 /* CRTC Degamma changes currently require us to recreate planes. */
8226 if (new_crtc_state->color_mgmt_changed)
8229 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8233 * If there are any new primary or overlay planes being added or
8234 * removed then the z-order can potentially change. To ensure
8235 * correct z-order and pipe acquisition the current DC architecture
8236 * requires us to remove and recreate all existing planes.
8238 * TODO: Come up with a more elegant solution for this.
8240 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8241 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8243 if (other->type == DRM_PLANE_TYPE_CURSOR)
8246 if (old_other_state->crtc != new_plane_state->crtc &&
8247 new_other_state->crtc != new_plane_state->crtc)
8250 if (old_other_state->crtc != new_other_state->crtc)
8253 /* Src/dst size and scaling updates. */
8254 if (old_other_state->src_w != new_other_state->src_w ||
8255 old_other_state->src_h != new_other_state->src_h ||
8256 old_other_state->crtc_w != new_other_state->crtc_w ||
8257 old_other_state->crtc_h != new_other_state->crtc_h)
8260 /* Rotation / mirroring updates. */
8261 if (old_other_state->rotation != new_other_state->rotation)
8264 /* Blending updates. */
8265 if (old_other_state->pixel_blend_mode !=
8266 new_other_state->pixel_blend_mode)
8269 /* Alpha updates. */
8270 if (old_other_state->alpha != new_other_state->alpha)
8273 /* Colorspace changes. */
8274 if (old_other_state->color_range != new_other_state->color_range ||
8275 old_other_state->color_encoding != new_other_state->color_encoding)
8278 /* Framebuffer checks fall at the end. */
8279 if (!old_other_state->fb || !new_other_state->fb)
8282 /* Pixel format changes can require bandwidth updates. */
8283 if (old_other_state->fb->format != new_other_state->fb->format)
8286 old_dm_plane_state = to_dm_plane_state(old_other_state);
8287 new_dm_plane_state = to_dm_plane_state(new_other_state);
8289 /* Tiling and DCC changes also require bandwidth updates. */
8290 if (old_dm_plane_state->tiling_flags !=
8291 new_dm_plane_state->tiling_flags)
8298 static int dm_update_plane_state(struct dc *dc,
8299 struct drm_atomic_state *state,
8300 struct drm_plane *plane,
8301 struct drm_plane_state *old_plane_state,
8302 struct drm_plane_state *new_plane_state,
8304 bool *lock_and_validation_needed)
8307 struct dm_atomic_state *dm_state = NULL;
8308 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8309 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8310 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8311 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8312 struct amdgpu_crtc *new_acrtc;
8317 new_plane_crtc = new_plane_state->crtc;
8318 old_plane_crtc = old_plane_state->crtc;
8319 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8320 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8322 /*TODO Implement better atomic check for cursor plane */
8323 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8324 if (!enable || !new_plane_crtc ||
8325 drm_atomic_plane_disabling(plane->state, new_plane_state))
8328 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8330 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8331 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8332 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8333 new_plane_state->crtc_w, new_plane_state->crtc_h);
8340 needs_reset = should_reset_plane(state, plane, old_plane_state,
8343 /* Remove any changed/removed planes */
8348 if (!old_plane_crtc)
8351 old_crtc_state = drm_atomic_get_old_crtc_state(
8352 state, old_plane_crtc);
8353 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8355 if (!dm_old_crtc_state->stream)
8358 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8359 plane->base.id, old_plane_crtc->base.id);
8361 ret = dm_atomic_get_state(state, &dm_state);
8365 if (!dc_remove_plane_from_context(
8367 dm_old_crtc_state->stream,
8368 dm_old_plane_state->dc_state,
8369 dm_state->context)) {
8375 dc_plane_state_release(dm_old_plane_state->dc_state);
8376 dm_new_plane_state->dc_state = NULL;
8378 *lock_and_validation_needed = true;
8380 } else { /* Add new planes */
8381 struct dc_plane_state *dc_new_plane_state;
8383 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8386 if (!new_plane_crtc)
8389 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8390 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8392 if (!dm_new_crtc_state->stream)
8398 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8402 WARN_ON(dm_new_plane_state->dc_state);
8404 dc_new_plane_state = dc_create_plane_state(dc);
8405 if (!dc_new_plane_state)
8408 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8409 plane->base.id, new_plane_crtc->base.id);
8411 ret = fill_dc_plane_attributes(
8412 drm_to_adev(new_plane_crtc->dev),
8417 dc_plane_state_release(dc_new_plane_state);
8421 ret = dm_atomic_get_state(state, &dm_state);
8423 dc_plane_state_release(dc_new_plane_state);
8428 * Any atomic check errors that occur after this will
8429 * not need a release. The plane state will be attached
8430 * to the stream, and therefore part of the atomic
8431 * state. It'll be released when the atomic state is
8434 if (!dc_add_plane_to_context(
8436 dm_new_crtc_state->stream,
8438 dm_state->context)) {
8440 dc_plane_state_release(dc_new_plane_state);
8444 dm_new_plane_state->dc_state = dc_new_plane_state;
8446 /* Tell DC to do a full surface update every time there
8447 * is a plane change. Inefficient, but works for now.
8449 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8451 *lock_and_validation_needed = true;
8458 #if defined(CONFIG_DRM_AMD_DC_DCN)
8459 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8461 struct drm_connector *connector;
8462 struct drm_connector_state *conn_state;
8463 struct amdgpu_dm_connector *aconnector = NULL;
8465 for_each_new_connector_in_state(state, connector, conn_state, i) {
8466 if (conn_state->crtc != crtc)
8469 aconnector = to_amdgpu_dm_connector(connector);
8470 if (!aconnector->port || !aconnector->mst_port)
8479 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8484 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8485 * @dev: The DRM device
8486 * @state: The atomic state to commit
8488 * Validate that the given atomic state is programmable by DC into hardware.
8489 * This involves constructing a &struct dc_state reflecting the new hardware
8490 * state we wish to commit, then querying DC to see if it is programmable. It's
8491 * important not to modify the existing DC state. Otherwise, atomic_check
8492 * may unexpectedly commit hardware changes.
8494 * When validating the DC state, it's important that the right locks are
8495 * acquired. For full updates case which removes/adds/updates streams on one
8496 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8497 * that any such full update commit will wait for completion of any outstanding
8498 * flip using DRMs synchronization events.
8500 * Note that DM adds the affected connectors for all CRTCs in state, when that
8501 * might not seem necessary. This is because DC stream creation requires the
8502 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8503 * be possible but non-trivial - a possible TODO item.
8505 * Return: -Error code if validation failed.
8507 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8508 struct drm_atomic_state *state)
8510 struct amdgpu_device *adev = drm_to_adev(dev);
8511 struct dm_atomic_state *dm_state = NULL;
8512 struct dc *dc = adev->dm.dc;
8513 struct drm_connector *connector;
8514 struct drm_connector_state *old_con_state, *new_con_state;
8515 struct drm_crtc *crtc;
8516 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8517 struct drm_plane *plane;
8518 struct drm_plane_state *old_plane_state, *new_plane_state;
8519 enum dc_status status;
8521 bool lock_and_validation_needed = false;
8523 ret = drm_atomic_helper_check_modeset(dev, state);
8527 /* Check connector changes */
8528 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8529 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8530 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8532 /* Skip connectors that are disabled or part of modeset already. */
8533 if (!old_con_state->crtc && !new_con_state->crtc)
8536 if (!new_con_state->crtc)
8539 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8540 if (IS_ERR(new_crtc_state)) {
8541 ret = PTR_ERR(new_crtc_state);
8545 if (dm_old_con_state->abm_level !=
8546 dm_new_con_state->abm_level)
8547 new_crtc_state->connectors_changed = true;
8550 #if defined(CONFIG_DRM_AMD_DC_DCN)
8551 if (adev->asic_type >= CHIP_NAVI10) {
8552 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8553 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8554 ret = add_affected_mst_dsc_crtcs(state, crtc);
8561 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8562 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8563 !new_crtc_state->color_mgmt_changed &&
8564 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8567 if (!new_crtc_state->enable)
8570 ret = drm_atomic_add_affected_connectors(state, crtc);
8574 ret = drm_atomic_add_affected_planes(state, crtc);
8580 * Add all primary and overlay planes on the CRTC to the state
8581 * whenever a plane is enabled to maintain correct z-ordering
8582 * and to enable fast surface updates.
8584 drm_for_each_crtc(crtc, dev) {
8585 bool modified = false;
8587 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8588 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8591 if (new_plane_state->crtc == crtc ||
8592 old_plane_state->crtc == crtc) {
8601 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8602 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8606 drm_atomic_get_plane_state(state, plane);
8608 if (IS_ERR(new_plane_state)) {
8609 ret = PTR_ERR(new_plane_state);
8615 /* Prepass for updating tiling flags on new planes. */
8616 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8617 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8618 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8620 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8621 &new_dm_plane_state->tmz_surface);
8626 /* Remove exiting planes if they are modified */
8627 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8628 ret = dm_update_plane_state(dc, state, plane,
8632 &lock_and_validation_needed);
8637 /* Disable all crtcs which require disable */
8638 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8639 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8643 &lock_and_validation_needed);
8648 /* Enable all crtcs which require enable */
8649 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8650 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8654 &lock_and_validation_needed);
8659 /* Add new/modified planes */
8660 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8661 ret = dm_update_plane_state(dc, state, plane,
8665 &lock_and_validation_needed);
8670 /* Run this here since we want to validate the streams we created */
8671 ret = drm_atomic_helper_check_planes(dev, state);
8675 if (state->legacy_cursor_update) {
8677 * This is a fast cursor update coming from the plane update
8678 * helper, check if it can be done asynchronously for better
8681 state->async_update =
8682 !drm_atomic_helper_async_check(dev, state);
8685 * Skip the remaining global validation if this is an async
8686 * update. Cursor updates can be done without affecting
8687 * state or bandwidth calcs and this avoids the performance
8688 * penalty of locking the private state object and
8689 * allocating a new dc_state.
8691 if (state->async_update)
8695 /* Check scaling and underscan changes*/
8696 /* TODO Removed scaling changes validation due to inability to commit
8697 * new stream into context w\o causing full reset. Need to
8698 * decide how to handle.
8700 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8701 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8702 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8703 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8705 /* Skip any modesets/resets */
8706 if (!acrtc || drm_atomic_crtc_needs_modeset(
8707 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8710 /* Skip any thing not scale or underscan changes */
8711 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8714 lock_and_validation_needed = true;
8718 * Streams and planes are reset when there are changes that affect
8719 * bandwidth. Anything that affects bandwidth needs to go through
8720 * DC global validation to ensure that the configuration can be applied
8723 * We have to currently stall out here in atomic_check for outstanding
8724 * commits to finish in this case because our IRQ handlers reference
8725 * DRM state directly - we can end up disabling interrupts too early
8728 * TODO: Remove this stall and drop DM state private objects.
8730 if (lock_and_validation_needed) {
8731 ret = dm_atomic_get_state(state, &dm_state);
8735 ret = do_aquire_global_lock(dev, state);
8739 #if defined(CONFIG_DRM_AMD_DC_DCN)
8740 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8743 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8749 * Perform validation of MST topology in the state:
8750 * We need to perform MST atomic check before calling
8751 * dc_validate_global_state(), or there is a chance
8752 * to get stuck in an infinite loop and hang eventually.
8754 ret = drm_dp_mst_atomic_check(state);
8757 status = dc_validate_global_state(dc, dm_state->context, false);
8758 if (status != DC_OK) {
8759 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8760 dc_status_to_str(status), status);
8766 * The commit is a fast update. Fast updates shouldn't change
8767 * the DC context, affect global validation, and can have their
8768 * commit work done in parallel with other commits not touching
8769 * the same resource. If we have a new DC context as part of
8770 * the DM atomic state from validation we need to free it and
8771 * retain the existing one instead.
8773 * Furthermore, since the DM atomic state only contains the DC
8774 * context and can safely be annulled, we can free the state
8775 * and clear the associated private object now to free
8776 * some memory and avoid a possible use-after-free later.
8779 for (i = 0; i < state->num_private_objs; i++) {
8780 struct drm_private_obj *obj = state->private_objs[i].ptr;
8782 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8783 int j = state->num_private_objs-1;
8785 dm_atomic_destroy_state(obj,
8786 state->private_objs[i].state);
8788 /* If i is not at the end of the array then the
8789 * last element needs to be moved to where i was
8790 * before the array can safely be truncated.
8793 state->private_objs[i] =
8794 state->private_objs[j];
8796 state->private_objs[j].ptr = NULL;
8797 state->private_objs[j].state = NULL;
8798 state->private_objs[j].old_state = NULL;
8799 state->private_objs[j].new_state = NULL;
8801 state->num_private_objs = j;
8807 /* Store the overall update type for use later in atomic check. */
8808 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8809 struct dm_crtc_state *dm_new_crtc_state =
8810 to_dm_crtc_state(new_crtc_state);
8812 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8817 /* Must be success */
8822 if (ret == -EDEADLK)
8823 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8824 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8825 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8827 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8832 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8833 struct amdgpu_dm_connector *amdgpu_dm_connector)
8836 bool capable = false;
8838 if (amdgpu_dm_connector->dc_link &&
8839 dm_helpers_dp_read_dpcd(
8841 amdgpu_dm_connector->dc_link,
8842 DP_DOWN_STREAM_PORT_COUNT,
8844 sizeof(dpcd_data))) {
8845 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8850 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8854 bool edid_check_required;
8855 struct detailed_timing *timing;
8856 struct detailed_non_pixel *data;
8857 struct detailed_data_monitor_range *range;
8858 struct amdgpu_dm_connector *amdgpu_dm_connector =
8859 to_amdgpu_dm_connector(connector);
8860 struct dm_connector_state *dm_con_state = NULL;
8862 struct drm_device *dev = connector->dev;
8863 struct amdgpu_device *adev = drm_to_adev(dev);
8864 bool freesync_capable = false;
8866 if (!connector->state) {
8867 DRM_ERROR("%s - Connector has no state", __func__);
8872 dm_con_state = to_dm_connector_state(connector->state);
8874 amdgpu_dm_connector->min_vfreq = 0;
8875 amdgpu_dm_connector->max_vfreq = 0;
8876 amdgpu_dm_connector->pixel_clock_mhz = 0;
8881 dm_con_state = to_dm_connector_state(connector->state);
8883 edid_check_required = false;
8884 if (!amdgpu_dm_connector->dc_sink) {
8885 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8888 if (!adev->dm.freesync_module)
8891 * if edid non zero restrict freesync only for dp and edp
8894 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8895 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8896 edid_check_required = is_dp_capable_without_timing_msa(
8898 amdgpu_dm_connector);
8901 if (edid_check_required == true && (edid->version > 1 ||
8902 (edid->version == 1 && edid->revision > 1))) {
8903 for (i = 0; i < 4; i++) {
8905 timing = &edid->detailed_timings[i];
8906 data = &timing->data.other_data;
8907 range = &data->data.range;
8909 * Check if monitor has continuous frequency mode
8911 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8914 * Check for flag range limits only. If flag == 1 then
8915 * no additional timing information provided.
8916 * Default GTF, GTF Secondary curve and CVT are not
8919 if (range->flags != 1)
8922 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8923 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8924 amdgpu_dm_connector->pixel_clock_mhz =
8925 range->pixel_clock_mhz * 10;
8929 if (amdgpu_dm_connector->max_vfreq -
8930 amdgpu_dm_connector->min_vfreq > 10) {
8932 freesync_capable = true;
8938 dm_con_state->freesync_capable = freesync_capable;
8940 if (connector->vrr_capable_property)
8941 drm_connector_set_vrr_capable_property(connector,
8945 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8947 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8949 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8951 if (link->type == dc_connection_none)
8953 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8954 dpcd_data, sizeof(dpcd_data))) {
8955 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8957 if (dpcd_data[0] == 0) {
8958 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8959 link->psr_settings.psr_feature_enabled = false;
8961 link->psr_settings.psr_version = DC_PSR_VERSION_1;
8962 link->psr_settings.psr_feature_enabled = true;
8965 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8970 * amdgpu_dm_link_setup_psr() - configure psr link
8971 * @stream: stream state
8973 * Return: true if success
8975 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8977 struct dc_link *link = NULL;
8978 struct psr_config psr_config = {0};
8979 struct psr_context psr_context = {0};
8985 link = stream->link;
8987 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8989 if (psr_config.psr_version > 0) {
8990 psr_config.psr_exit_link_training_required = 0x1;
8991 psr_config.psr_frame_capture_indication_req = 0;
8992 psr_config.psr_rfb_setup_time = 0x37;
8993 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8994 psr_config.allow_smu_optimizations = 0x0;
8996 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8999 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9005 * amdgpu_dm_psr_enable() - enable psr f/w
9006 * @stream: stream state
9008 * Return: true if success
9010 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9012 struct dc_link *link = stream->link;
9013 unsigned int vsync_rate_hz = 0;
9014 struct dc_static_screen_params params = {0};
9015 /* Calculate number of static frames before generating interrupt to
9018 // Init fail safe of 2 frames static
9019 unsigned int num_frames_static = 2;
9021 DRM_DEBUG_DRIVER("Enabling psr...\n");
9023 vsync_rate_hz = div64_u64(div64_u64((
9024 stream->timing.pix_clk_100hz * 100),
9025 stream->timing.v_total),
9026 stream->timing.h_total);
9029 * Calculate number of frames such that at least 30 ms of time has
9032 if (vsync_rate_hz != 0) {
9033 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9034 num_frames_static = (30000 / frame_time_microsec) + 1;
9037 params.triggers.cursor_update = true;
9038 params.triggers.overlay_update = true;
9039 params.triggers.surface_update = true;
9040 params.num_frames = num_frames_static;
9042 dc_stream_set_static_screen_params(link->ctx->dc,
9046 return dc_link_set_psr_allow_active(link, true, false);
9050 * amdgpu_dm_psr_disable() - disable psr f/w
9051 * @stream: stream state
9053 * Return: true if success
9055 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9058 DRM_DEBUG_DRIVER("Disabling psr...\n");
9060 return dc_link_set_psr_allow_active(stream->link, false, true);
9064 * amdgpu_dm_psr_disable() - disable psr f/w
9065 * if psr is enabled on any stream
9067 * Return: true if success
9069 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9071 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9072 return dc_set_psr_allow_active(dm->dc, false);
9075 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9077 struct amdgpu_device *adev = drm_to_adev(dev);
9078 struct dc *dc = adev->dm.dc;
9081 mutex_lock(&adev->dm.dc_lock);
9082 if (dc->current_state) {
9083 for (i = 0; i < dc->current_state->stream_count; ++i)
9084 dc->current_state->streams[i]
9085 ->triggered_crtc_reset.enabled =
9086 adev->dm.force_timing_sync;
9088 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9089 dc_trigger_sync(dc, dc->current_state);
9091 mutex_unlock(&adev->dm.dc_lock);