2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
107 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
119 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121 * requests into DC requests, and DC responses into DRM responses.
123 * The root control structure is &struct amdgpu_display_manager.
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 * initializes drm_device display related structures, based on the information
132 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133 * drm_encoder, drm_mode_config
135 * Returns 0 on success
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 struct drm_plane *plane,
143 unsigned long possible_crtcs,
144 const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 struct drm_plane *plane,
147 uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 struct amdgpu_dm_connector *amdgpu_dm_connector,
151 struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 struct amdgpu_encoder *aencoder,
154 uint32_t link_index);
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 struct drm_atomic_state *state,
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 struct drm_atomic_state *state);
167 static void handle_cursor_update(struct drm_plane *plane,
168 struct drm_plane_state *old_plane_state);
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
177 * dm_vblank_get_counter
180 * Get counter for number of vertical blanks
183 * struct amdgpu_device *adev - [in] desired amdgpu device
184 * int disp_idx - [in] which CRTC to get the counter from
187 * Counter for vertical blanks
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
191 if (crtc >= adev->mode_info.num_crtc)
194 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
199 if (acrtc_state->stream == NULL) {
200 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
205 return dc_stream_get_vblank_counter(acrtc_state->stream);
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 u32 *vbl, u32 *position)
212 uint32_t v_blank_start, v_blank_end, h_position, v_position;
214 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
217 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
221 if (acrtc_state->stream == NULL) {
222 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
228 * TODO rework base driver to use values directly.
229 * for now parse it back into reg-format
231 dc_stream_get_scanoutpos(acrtc_state->stream,
237 *position = v_position | (h_position << 16);
238 *vbl = v_blank_start | (v_blank_end << 16);
244 static bool dm_is_idle(void *handle)
250 static int dm_wait_for_idle(void *handle)
256 static bool dm_check_soft_reset(void *handle)
261 static int dm_soft_reset(void *handle)
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
271 struct drm_device *dev = adev_to_drm(adev);
272 struct drm_crtc *crtc;
273 struct amdgpu_crtc *amdgpu_crtc;
275 if (otg_inst == -1) {
277 return adev->mode_info.crtcs[0];
280 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 amdgpu_crtc = to_amdgpu_crtc(crtc);
283 if (amdgpu_crtc->otg_inst == otg_inst)
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
292 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
297 * dm_pflip_high_irq() - Handle pageflip interrupt
298 * @interrupt_params: ignored
300 * Handles the pageflip interrupt by notifying all interested parties
301 * that the pageflip has been completed.
303 static void dm_pflip_high_irq(void *interrupt_params)
305 struct amdgpu_crtc *amdgpu_crtc;
306 struct common_irq_params *irq_params = interrupt_params;
307 struct amdgpu_device *adev = irq_params->adev;
309 struct drm_pending_vblank_event *e;
310 struct dm_crtc_state *acrtc_state;
311 uint32_t vpos, hpos, v_blank_start, v_blank_end;
314 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
316 /* IRQ could occur when in initial stage */
317 /* TODO work and BO cleanup */
318 if (amdgpu_crtc == NULL) {
319 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
323 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
325 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 amdgpu_crtc->pflip_status,
328 AMDGPU_FLIP_SUBMITTED,
329 amdgpu_crtc->crtc_id,
331 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
335 /* page flip completed. */
336 e = amdgpu_crtc->event;
337 amdgpu_crtc->event = NULL;
342 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
345 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
347 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 &v_blank_end, &hpos, &vpos) ||
349 (vpos < v_blank_start)) {
350 /* Update to correct count and vblank timestamp if racing with
351 * vblank irq. This also updates to the correct vblank timestamp
352 * even in VRR mode, as scanout is past the front-porch atm.
354 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
356 /* Wake up userspace by sending the pageflip event with proper
357 * count and timestamp of vblank of flip completion.
360 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
362 /* Event sent, so done with vblank for this flip */
363 drm_crtc_vblank_put(&amdgpu_crtc->base);
366 /* VRR active and inside front-porch: vblank count and
367 * timestamp for pageflip event will only be up to date after
368 * drm_crtc_handle_vblank() has been executed from late vblank
369 * irq handler after start of back-porch (vline 0). We queue the
370 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 * updated timestamp and count, once it runs after us.
373 * We need to open-code this instead of using the helper
374 * drm_crtc_arm_vblank_event(), as that helper would
375 * call drm_crtc_accurate_vblank_count(), which we must
376 * not call in VRR mode while we are in front-porch!
379 /* sequence will be replaced by real count during send-out. */
380 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 e->pipe = amdgpu_crtc->crtc_id;
383 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
387 /* Keep track of vblank of this flip for flip throttling. We use the
388 * cooked hw counter, as that one incremented at start of this vblank
389 * of pageflip completion, so last_flip_vblank is the forbidden count
390 * for queueing new pageflips if vsync + VRR is enabled.
392 amdgpu_crtc->last_flip_vblank =
393 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
395 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
398 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 vrr_active, (int) !e);
403 static void dm_vupdate_high_irq(void *interrupt_params)
405 struct common_irq_params *irq_params = interrupt_params;
406 struct amdgpu_device *adev = irq_params->adev;
407 struct amdgpu_crtc *acrtc;
408 struct dm_crtc_state *acrtc_state;
411 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
414 acrtc_state = to_dm_crtc_state(acrtc->base.state);
416 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
418 amdgpu_dm_vrr_active(acrtc_state));
420 /* Core vblank handling is done here after end of front-porch in
421 * vrr mode, as vblank timestamping will give valid results
422 * while now done after front-porch. This will also deliver
423 * page-flip completion events that have been queued to us
424 * if a pageflip happened inside front-porch.
426 if (amdgpu_dm_vrr_active(acrtc_state)) {
427 drm_crtc_handle_vblank(&acrtc->base);
429 /* BTR processing for pre-DCE12 ASICs */
430 if (acrtc_state->stream &&
431 adev->family < AMDGPU_FAMILY_AI) {
432 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
433 mod_freesync_handle_v_update(
434 adev->dm.freesync_module,
436 &acrtc_state->vrr_params);
438 dc_stream_adjust_vmin_vmax(
441 &acrtc_state->vrr_params.adjust);
442 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
449 * dm_crtc_high_irq() - Handles CRTC interrupt
450 * @interrupt_params: used for determining the CRTC instance
452 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
455 static void dm_crtc_high_irq(void *interrupt_params)
457 struct common_irq_params *irq_params = interrupt_params;
458 struct amdgpu_device *adev = irq_params->adev;
459 struct amdgpu_crtc *acrtc;
460 struct dm_crtc_state *acrtc_state;
463 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
467 acrtc_state = to_dm_crtc_state(acrtc->base.state);
469 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 amdgpu_dm_vrr_active(acrtc_state),
471 acrtc_state->active_planes);
474 * Core vblank handling at start of front-porch is only possible
475 * in non-vrr mode, as only there vblank timestamping will give
476 * valid results while done in front-porch. Otherwise defer it
477 * to dm_vupdate_high_irq after end of front-porch.
479 if (!amdgpu_dm_vrr_active(acrtc_state))
480 drm_crtc_handle_vblank(&acrtc->base);
483 * Following stuff must happen at start of vblank, for crc
484 * computation and below-the-range btr support in vrr mode.
486 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
488 /* BTR updates need to happen before VUPDATE on Vega and above. */
489 if (adev->family < AMDGPU_FAMILY_AI)
492 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
494 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 mod_freesync_handle_v_update(adev->dm.freesync_module,
498 &acrtc_state->vrr_params);
500 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 &acrtc_state->vrr_params.adjust);
505 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 * In that case, pageflip completion interrupts won't fire and pageflip
507 * completion events won't get delivered. Prevent this by sending
508 * pending pageflip events from here if a flip is still pending.
510 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 * avoid race conditions between flip programming and completion,
512 * which could cause too early flip completion events.
514 if (adev->family >= AMDGPU_FAMILY_RV &&
515 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 acrtc_state->active_planes == 0) {
518 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
520 drm_crtc_vblank_put(&acrtc->base);
522 acrtc->pflip_status = AMDGPU_FLIP_NONE;
525 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
528 static int dm_set_clockgating_state(void *handle,
529 enum amd_clockgating_state state)
534 static int dm_set_powergating_state(void *handle,
535 enum amd_powergating_state state)
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
543 /* Allocate memory for FBC compressed data */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
546 struct drm_device *dev = connector->dev;
547 struct amdgpu_device *adev = drm_to_adev(dev);
548 struct dm_comressor_info *compressor = &adev->dm.compressor;
549 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 struct drm_display_mode *mode;
551 unsigned long max_size = 0;
553 if (adev->dm.dc->fbc_compressor == NULL)
556 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
559 if (compressor->bo_ptr)
563 list_for_each_entry(mode, &connector->modes, head) {
564 if (max_size < mode->htotal * mode->vtotal)
565 max_size = mode->htotal * mode->vtotal;
569 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 &compressor->gpu_addr, &compressor->cpu_addr);
574 DRM_ERROR("DM: Failed to initialize FBC\n");
576 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 int pipe, bool *enabled,
586 unsigned char *buf, int max_bytes)
588 struct drm_device *dev = dev_get_drvdata(kdev);
589 struct amdgpu_device *adev = drm_to_adev(dev);
590 struct drm_connector *connector;
591 struct drm_connector_list_iter conn_iter;
592 struct amdgpu_dm_connector *aconnector;
597 mutex_lock(&adev->dm.audio_lock);
599 drm_connector_list_iter_begin(dev, &conn_iter);
600 drm_for_each_connector_iter(connector, &conn_iter) {
601 aconnector = to_amdgpu_dm_connector(connector);
602 if (aconnector->audio_inst != port)
606 ret = drm_eld_size(connector->eld);
607 memcpy(buf, connector->eld, min(max_bytes, ret));
611 drm_connector_list_iter_end(&conn_iter);
613 mutex_unlock(&adev->dm.audio_lock);
615 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 .get_eld = amdgpu_dm_audio_component_get_eld,
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 struct device *hda_kdev, void *data)
627 struct drm_device *dev = dev_get_drvdata(kdev);
628 struct amdgpu_device *adev = drm_to_adev(dev);
629 struct drm_audio_component *acomp = data;
631 acomp->ops = &amdgpu_dm_audio_component_ops;
633 adev->dm.audio_component = acomp;
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 struct device *hda_kdev, void *data)
641 struct drm_device *dev = dev_get_drvdata(kdev);
642 struct amdgpu_device *adev = drm_to_adev(dev);
643 struct drm_audio_component *acomp = data;
647 adev->dm.audio_component = NULL;
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 .bind = amdgpu_dm_audio_component_bind,
652 .unbind = amdgpu_dm_audio_component_unbind,
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
662 adev->mode_info.audio.enabled = true;
664 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
666 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 adev->mode_info.audio.pin[i].channels = -1;
668 adev->mode_info.audio.pin[i].rate = -1;
669 adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 adev->mode_info.audio.pin[i].status_bits = 0;
671 adev->mode_info.audio.pin[i].category_code = 0;
672 adev->mode_info.audio.pin[i].connected = false;
673 adev->mode_info.audio.pin[i].id =
674 adev->dm.dc->res_pool->audios[i]->inst;
675 adev->mode_info.audio.pin[i].offset = 0;
678 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
682 adev->dm.audio_registered = true;
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
692 if (!adev->mode_info.audio.enabled)
695 if (adev->dm.audio_registered) {
696 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 adev->dm.audio_registered = false;
700 /* TODO: Disable audio? */
702 adev->mode_info.audio.enabled = false;
705 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
707 struct drm_audio_component *acomp = adev->dm.audio_component;
709 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
712 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
719 const struct dmcub_firmware_header_v1_0 *hdr;
720 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 struct abm *abm = adev->dm.dc->res_pool->abm;
725 struct dmub_srv_hw_params hw_params;
726 enum dmub_status status;
727 const unsigned char *fw_inst_const, *fw_bss_data;
728 uint32_t i, fw_inst_const_size, fw_bss_data_size;
732 /* DMUB isn't supported on the ASIC. */
736 DRM_ERROR("No framebuffer info for DMUB service.\n");
741 /* Firmware required for DMUB support. */
742 DRM_ERROR("No firmware provided for DMUB.\n");
746 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 if (status != DMUB_STATUS_OK) {
748 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
752 if (!has_hw_support) {
753 DRM_INFO("DMUB unsupported on ASIC\n");
757 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
759 fw_inst_const = dmub_fw->data +
760 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 fw_bss_data = dmub_fw->data +
764 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 le32_to_cpu(hdr->inst_const_bytes);
767 /* Copy firmware and bios info into FB memory. */
768 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
771 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
773 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 * amdgpu_ucode_init_single_fw will load dmub firmware
775 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 * will be done by dm_dmub_hw_init
778 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
783 if (fw_bss_data_size)
784 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 fw_bss_data, fw_bss_data_size);
787 /* Copy firmware bios info into FB memory. */
788 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
791 /* Reset regions that need to be reset. */
792 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
795 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
798 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
801 /* Initialize hardware. */
802 memset(&hw_params, 0, sizeof(hw_params));
803 hw_params.fb_base = adev->gmc.fb_start;
804 hw_params.fb_offset = adev->gmc.aper_base;
806 /* backdoor load firmware and trigger dmub running */
807 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 hw_params.load_inst_const = true;
811 hw_params.psp_version = dmcu->psp_version;
813 for (i = 0; i < fb_info->num_fb; ++i)
814 hw_params.fb[i] = &fb_info->fb[i];
816 status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 if (status != DMUB_STATUS_OK) {
818 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
822 /* Wait for firmware load to finish. */
823 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 if (status != DMUB_STATUS_OK)
825 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
827 /* Init DMCU and ABM if available. */
829 dmcu->funcs->dmcu_init(dmcu);
830 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
833 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 if (!adev->dm.dc->ctx->dmub_srv) {
835 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
839 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 adev->dm.dmcub_fw_version);
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
847 struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 struct dc_callback_init init_params;
853 adev->dm.ddev = adev_to_drm(adev);
854 adev->dm.adev = adev;
856 /* Zero all the fields */
857 memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 memset(&init_params, 0, sizeof(init_params));
862 mutex_init(&adev->dm.dc_lock);
863 mutex_init(&adev->dm.audio_lock);
865 if(amdgpu_dm_irq_init(adev)) {
866 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
870 init_data.asic_id.chip_family = adev->family;
872 init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
875 init_data.asic_id.vram_width = adev->gmc.vram_width;
876 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 init_data.asic_id.atombios_base_address =
878 adev->mode_info.atom_context->bios;
880 init_data.driver = adev;
882 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
884 if (!adev->dm.cgs_device) {
885 DRM_ERROR("amdgpu: failed to create cgs device.\n");
889 init_data.cgs_device = adev->dm.cgs_device;
891 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
893 switch (adev->asic_type) {
898 init_data.flags.gpu_vm_support = true;
904 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 init_data.flags.fbc_support = true;
907 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 init_data.flags.multi_mon_pp_mclk_switch = true;
910 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 init_data.flags.disable_fractional_pwm = true;
913 init_data.flags.power_down_display_on_boot = true;
915 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
917 /* Display Core create. */
918 adev->dm.dc = dc_create(&init_data);
921 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
923 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
927 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
932 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
935 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 adev->dm.dc->debug.disable_stutter = true;
938 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 adev->dm.dc->debug.disable_dsc = true;
941 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 adev->dm.dc->debug.disable_clock_gate = true;
944 r = dm_dmub_hw_init(adev);
946 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
950 dc_hardware_init(adev->dm.dc);
952 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 if (!adev->dm.freesync_module) {
955 "amdgpu: failed to initialize freesync_module.\n");
957 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 adev->dm.freesync_module);
960 amdgpu_dm_init_color_mod();
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 if (adev->asic_type >= CHIP_RAVEN) {
964 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
966 if (!adev->dm.hdcp_workqueue)
967 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
969 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
971 dc_init_callbacks(adev->dm.dc, &init_params);
974 if (amdgpu_dm_initialize_drm_device(adev)) {
976 "amdgpu: failed to initialize sw for display support.\n");
980 /* Update the actual used number of crtc */
981 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
983 /* create fake encoders for MST */
984 dm_dp_create_fake_mst_encoders(adev);
986 /* TODO: Add_display_info? */
988 /* TODO use dynamic cursor width */
989 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
992 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
994 "amdgpu: failed to initialize sw for display support.\n");
998 DRM_DEBUG_DRIVER("KMS initialized.\n");
1002 amdgpu_dm_fini(adev);
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1011 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1015 amdgpu_dm_audio_fini(adev);
1017 amdgpu_dm_destroy_drm_device(&adev->dm);
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 if (adev->dm.hdcp_workqueue) {
1021 hdcp_destroy(adev->dm.hdcp_workqueue);
1022 adev->dm.hdcp_workqueue = NULL;
1026 dc_deinit_callbacks(adev->dm.dc);
1028 if (adev->dm.dc->ctx->dmub_srv) {
1029 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 adev->dm.dc->ctx->dmub_srv = NULL;
1033 if (adev->dm.dmub_bo)
1034 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 &adev->dm.dmub_bo_gpu_addr,
1036 &adev->dm.dmub_bo_cpu_addr);
1038 /* DC Destroy TODO: Replace destroy DAL */
1040 dc_destroy(&adev->dm.dc);
1042 * TODO: pageflip, vlank interrupt
1044 * amdgpu_dm_irq_fini(adev);
1047 if (adev->dm.cgs_device) {
1048 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 adev->dm.cgs_device = NULL;
1051 if (adev->dm.freesync_module) {
1052 mod_freesync_destroy(adev->dm.freesync_module);
1053 adev->dm.freesync_module = NULL;
1056 mutex_destroy(&adev->dm.audio_lock);
1057 mutex_destroy(&adev->dm.dc_lock);
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1064 const char *fw_name_dmcu = NULL;
1066 const struct dmcu_firmware_header_v1_0 *hdr;
1068 switch(adev->asic_type) {
1069 #if defined(CONFIG_DRM_AMD_DC_SI)
1084 case CHIP_POLARIS11:
1085 case CHIP_POLARIS10:
1086 case CHIP_POLARIS12:
1094 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095 case CHIP_SIENNA_CICHLID:
1096 case CHIP_NAVY_FLOUNDER:
1100 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1103 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1104 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1106 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1111 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1115 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1116 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1120 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1122 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124 adev->dm.fw_dmcu = NULL;
1128 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1133 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1135 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1137 release_firmware(adev->dm.fw_dmcu);
1138 adev->dm.fw_dmcu = NULL;
1142 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1143 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1144 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1145 adev->firmware.fw_size +=
1146 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1148 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1149 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1150 adev->firmware.fw_size +=
1151 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1153 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1155 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1160 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1162 struct amdgpu_device *adev = ctx;
1164 return dm_read_reg(adev->dm.dc->ctx, address);
1167 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1170 struct amdgpu_device *adev = ctx;
1172 return dm_write_reg(adev->dm.dc->ctx, address, value);
1175 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1177 struct dmub_srv_create_params create_params;
1178 struct dmub_srv_region_params region_params;
1179 struct dmub_srv_region_info region_info;
1180 struct dmub_srv_fb_params fb_params;
1181 struct dmub_srv_fb_info *fb_info;
1182 struct dmub_srv *dmub_srv;
1183 const struct dmcub_firmware_header_v1_0 *hdr;
1184 const char *fw_name_dmub;
1185 enum dmub_asic dmub_asic;
1186 enum dmub_status status;
1189 switch (adev->asic_type) {
1191 dmub_asic = DMUB_ASIC_DCN21;
1192 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1194 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195 case CHIP_SIENNA_CICHLID:
1196 dmub_asic = DMUB_ASIC_DCN30;
1197 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1199 case CHIP_NAVY_FLOUNDER:
1200 dmub_asic = DMUB_ASIC_DCN30;
1201 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1206 /* ASIC doesn't support DMUB. */
1210 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1212 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1216 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1218 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1222 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1224 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1225 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1226 AMDGPU_UCODE_ID_DMCUB;
1227 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1229 adev->firmware.fw_size +=
1230 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1232 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233 adev->dm.dmcub_fw_version);
1236 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1238 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1239 dmub_srv = adev->dm.dmub_srv;
1242 DRM_ERROR("Failed to allocate DMUB service!\n");
1246 memset(&create_params, 0, sizeof(create_params));
1247 create_params.user_ctx = adev;
1248 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1249 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1250 create_params.asic = dmub_asic;
1252 /* Create the DMUB service. */
1253 status = dmub_srv_create(dmub_srv, &create_params);
1254 if (status != DMUB_STATUS_OK) {
1255 DRM_ERROR("Error creating DMUB service: %d\n", status);
1259 /* Calculate the size of all the regions for the DMUB service. */
1260 memset(®ion_params, 0, sizeof(region_params));
1262 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1263 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1264 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1265 region_params.vbios_size = adev->bios_size;
1266 region_params.fw_bss_data = region_params.bss_data_size ?
1267 adev->dm.dmub_fw->data +
1268 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1269 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1270 region_params.fw_inst_const =
1271 adev->dm.dmub_fw->data +
1272 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1275 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1278 if (status != DMUB_STATUS_OK) {
1279 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1284 * Allocate a framebuffer based on the total size of all the regions.
1285 * TODO: Move this into GART.
1287 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1288 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1289 &adev->dm.dmub_bo_gpu_addr,
1290 &adev->dm.dmub_bo_cpu_addr);
1294 /* Rebase the regions on the framebuffer address. */
1295 memset(&fb_params, 0, sizeof(fb_params));
1296 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1297 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1298 fb_params.region_info = ®ion_info;
1300 adev->dm.dmub_fb_info =
1301 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1302 fb_info = adev->dm.dmub_fb_info;
1306 "Failed to allocate framebuffer info for DMUB service!\n");
1310 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1311 if (status != DMUB_STATUS_OK) {
1312 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1319 static int dm_sw_init(void *handle)
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1324 r = dm_dmub_sw_init(adev);
1328 return load_dmcu_fw(adev);
1331 static int dm_sw_fini(void *handle)
1333 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1335 kfree(adev->dm.dmub_fb_info);
1336 adev->dm.dmub_fb_info = NULL;
1338 if (adev->dm.dmub_srv) {
1339 dmub_srv_destroy(adev->dm.dmub_srv);
1340 adev->dm.dmub_srv = NULL;
1343 release_firmware(adev->dm.dmub_fw);
1344 adev->dm.dmub_fw = NULL;
1346 release_firmware(adev->dm.fw_dmcu);
1347 adev->dm.fw_dmcu = NULL;
1352 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1354 struct amdgpu_dm_connector *aconnector;
1355 struct drm_connector *connector;
1356 struct drm_connector_list_iter iter;
1359 drm_connector_list_iter_begin(dev, &iter);
1360 drm_for_each_connector_iter(connector, &iter) {
1361 aconnector = to_amdgpu_dm_connector(connector);
1362 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1363 aconnector->mst_mgr.aux) {
1364 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1366 aconnector->base.base.id);
1368 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1370 DRM_ERROR("DM_MST: Failed to start MST\n");
1371 aconnector->dc_link->type =
1372 dc_connection_single;
1377 drm_connector_list_iter_end(&iter);
1382 static int dm_late_init(void *handle)
1384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1386 struct dmcu_iram_parameters params;
1387 unsigned int linear_lut[16];
1389 struct dmcu *dmcu = NULL;
1392 if (!adev->dm.fw_dmcu)
1393 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1395 dmcu = adev->dm.dc->res_pool->dmcu;
1397 for (i = 0; i < 16; i++)
1398 linear_lut[i] = 0xFFFF * i / 15;
1401 params.backlight_ramping_start = 0xCCCC;
1402 params.backlight_ramping_reduction = 0xCCCCCCCC;
1403 params.backlight_lut_array_size = 16;
1404 params.backlight_lut_array = linear_lut;
1406 /* Min backlight level after ABM reduction, Don't allow below 1%
1407 * 0xFFFF x 0.01 = 0x28F
1409 params.min_abm_backlight = 0x28F;
1411 /* In the case where abm is implemented on dmcub,
1412 * dmcu object will be null.
1413 * ABM 2.4 and up are implemented on dmcub.
1416 ret = dmcu_load_iram(dmcu, params);
1417 else if (adev->dm.dc->ctx->dmub_srv)
1418 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1423 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1426 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1428 struct amdgpu_dm_connector *aconnector;
1429 struct drm_connector *connector;
1430 struct drm_connector_list_iter iter;
1431 struct drm_dp_mst_topology_mgr *mgr;
1433 bool need_hotplug = false;
1435 drm_connector_list_iter_begin(dev, &iter);
1436 drm_for_each_connector_iter(connector, &iter) {
1437 aconnector = to_amdgpu_dm_connector(connector);
1438 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1439 aconnector->mst_port)
1442 mgr = &aconnector->mst_mgr;
1445 drm_dp_mst_topology_mgr_suspend(mgr);
1447 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1449 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1450 need_hotplug = true;
1454 drm_connector_list_iter_end(&iter);
1457 drm_kms_helper_hotplug_event(dev);
1460 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1462 struct smu_context *smu = &adev->smu;
1465 if (!is_support_sw_smu(adev))
1468 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469 * on window driver dc implementation.
1470 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471 * should be passed to smu during boot up and resume from s3.
1472 * boot up: dc calculate dcn watermark clock settings within dc_create,
1473 * dcn20_resource_construct
1474 * then call pplib functions below to pass the settings to smu:
1475 * smu_set_watermarks_for_clock_ranges
1476 * smu_set_watermarks_table
1477 * navi10_set_watermarks_table
1478 * smu_write_watermarks_table
1480 * For Renoir, clock settings of dcn watermark are also fixed values.
1481 * dc has implemented different flow for window driver:
1482 * dc_hardware_init / dc_set_power_state
1487 * smu_set_watermarks_for_clock_ranges
1488 * renoir_set_watermarks_table
1489 * smu_write_watermarks_table
1492 * dc_hardware_init -> amdgpu_dm_init
1493 * dc_set_power_state --> dm_resume
1495 * therefore, this function apply to navi10/12/14 but not Renoir
1498 switch(adev->asic_type) {
1507 ret = smu_write_watermarks_table(smu);
1509 DRM_ERROR("Failed to update WMTABLE!\n");
1517 * dm_hw_init() - Initialize DC device
1518 * @handle: The base driver device containing the amdgpu_dm device.
1520 * Initialize the &struct amdgpu_display_manager device. This involves calling
1521 * the initializers of each DM component, then populating the struct with them.
1523 * Although the function implies hardware initialization, both hardware and
1524 * software are initialized here. Splitting them out to their relevant init
1525 * hooks is a future TODO item.
1527 * Some notable things that are initialized here:
1529 * - Display Core, both software and hardware
1530 * - DC modules that we need (freesync and color management)
1531 * - DRM software states
1532 * - Interrupt sources and handlers
1534 * - Debug FS entries, if enabled
1536 static int dm_hw_init(void *handle)
1538 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 /* Create DAL display manager */
1540 amdgpu_dm_init(adev);
1541 amdgpu_dm_hpd_init(adev);
1547 * dm_hw_fini() - Teardown DC device
1548 * @handle: The base driver device containing the amdgpu_dm device.
1550 * Teardown components within &struct amdgpu_display_manager that require
1551 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552 * were loaded. Also flush IRQ workqueues and disable them.
1554 static int dm_hw_fini(void *handle)
1556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1558 amdgpu_dm_hpd_fini(adev);
1560 amdgpu_dm_irq_fini(adev);
1561 amdgpu_dm_fini(adev);
1566 static int dm_enable_vblank(struct drm_crtc *crtc);
1567 static void dm_disable_vblank(struct drm_crtc *crtc);
1569 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1570 struct dc_state *state, bool enable)
1572 enum dc_irq_source irq_source;
1573 struct amdgpu_crtc *acrtc;
1577 for (i = 0; i < state->stream_count; i++) {
1578 acrtc = get_crtc_by_otg_inst(
1579 adev, state->stream_status[i].primary_otg_inst);
1581 if (acrtc && state->stream_status[i].plane_count != 0) {
1582 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1583 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1584 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585 acrtc->crtc_id, enable ? "en" : "dis", rc);
1587 DRM_WARN("Failed to %s pflip interrupts\n",
1588 enable ? "enable" : "disable");
1591 rc = dm_enable_vblank(&acrtc->base);
1593 DRM_WARN("Failed to enable vblank interrupts\n");
1595 dm_disable_vblank(&acrtc->base);
1603 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1605 struct dc_state *context = NULL;
1606 enum dc_status res = DC_ERROR_UNEXPECTED;
1608 struct dc_stream_state *del_streams[MAX_PIPES];
1609 int del_streams_count = 0;
1611 memset(del_streams, 0, sizeof(del_streams));
1613 context = dc_create_state(dc);
1614 if (context == NULL)
1615 goto context_alloc_fail;
1617 dc_resource_state_copy_construct_current(dc, context);
1619 /* First remove from context all streams */
1620 for (i = 0; i < context->stream_count; i++) {
1621 struct dc_stream_state *stream = context->streams[i];
1623 del_streams[del_streams_count++] = stream;
1626 /* Remove all planes for removed streams and then remove the streams */
1627 for (i = 0; i < del_streams_count; i++) {
1628 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1629 res = DC_FAIL_DETACH_SURFACES;
1633 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1639 res = dc_validate_global_state(dc, context, false);
1642 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1646 res = dc_commit_state(dc, context);
1649 dc_release_state(context);
1655 static int dm_suspend(void *handle)
1657 struct amdgpu_device *adev = handle;
1658 struct amdgpu_display_manager *dm = &adev->dm;
1661 if (amdgpu_in_reset(adev)) {
1662 mutex_lock(&dm->dc_lock);
1663 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1665 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1667 amdgpu_dm_commit_zero_streams(dm->dc);
1669 amdgpu_dm_irq_suspend(adev);
1674 WARN_ON(adev->dm.cached_state);
1675 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1677 s3_handle_mst(adev_to_drm(adev), true);
1679 amdgpu_dm_irq_suspend(adev);
1682 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1687 static struct amdgpu_dm_connector *
1688 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1689 struct drm_crtc *crtc)
1692 struct drm_connector_state *new_con_state;
1693 struct drm_connector *connector;
1694 struct drm_crtc *crtc_from_state;
1696 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1697 crtc_from_state = new_con_state->crtc;
1699 if (crtc_from_state == crtc)
1700 return to_amdgpu_dm_connector(connector);
1706 static void emulated_link_detect(struct dc_link *link)
1708 struct dc_sink_init_data sink_init_data = { 0 };
1709 struct display_sink_capability sink_caps = { 0 };
1710 enum dc_edid_status edid_status;
1711 struct dc_context *dc_ctx = link->ctx;
1712 struct dc_sink *sink = NULL;
1713 struct dc_sink *prev_sink = NULL;
1715 link->type = dc_connection_none;
1716 prev_sink = link->local_sink;
1718 if (prev_sink != NULL)
1719 dc_sink_retain(prev_sink);
1721 switch (link->connector_signal) {
1722 case SIGNAL_TYPE_HDMI_TYPE_A: {
1723 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1728 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1729 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1734 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1735 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1740 case SIGNAL_TYPE_LVDS: {
1741 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1742 sink_caps.signal = SIGNAL_TYPE_LVDS;
1746 case SIGNAL_TYPE_EDP: {
1747 sink_caps.transaction_type =
1748 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749 sink_caps.signal = SIGNAL_TYPE_EDP;
1753 case SIGNAL_TYPE_DISPLAY_PORT: {
1754 sink_caps.transaction_type =
1755 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1756 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1761 DC_ERROR("Invalid connector type! signal:%d\n",
1762 link->connector_signal);
1766 sink_init_data.link = link;
1767 sink_init_data.sink_signal = sink_caps.signal;
1769 sink = dc_sink_create(&sink_init_data);
1771 DC_ERROR("Failed to create sink!\n");
1775 /* dc_sink_create returns a new reference */
1776 link->local_sink = sink;
1778 edid_status = dm_helpers_read_local_edid(
1783 if (edid_status != EDID_OK)
1784 DC_ERROR("Failed to read EDID");
1788 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1789 struct amdgpu_display_manager *dm)
1792 struct dc_surface_update surface_updates[MAX_SURFACES];
1793 struct dc_plane_info plane_infos[MAX_SURFACES];
1794 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1795 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1796 struct dc_stream_update stream_update;
1800 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1803 dm_error("Failed to allocate update bundle\n");
1807 for (k = 0; k < dc_state->stream_count; k++) {
1808 bundle->stream_update.stream = dc_state->streams[k];
1810 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1811 bundle->surface_updates[m].surface =
1812 dc_state->stream_status->plane_states[m];
1813 bundle->surface_updates[m].surface->force_full_update =
1816 dc_commit_updates_for_stream(
1817 dm->dc, bundle->surface_updates,
1818 dc_state->stream_status->plane_count,
1819 dc_state->streams[k], &bundle->stream_update, dc_state);
1828 static int dm_resume(void *handle)
1830 struct amdgpu_device *adev = handle;
1831 struct drm_device *ddev = adev_to_drm(adev);
1832 struct amdgpu_display_manager *dm = &adev->dm;
1833 struct amdgpu_dm_connector *aconnector;
1834 struct drm_connector *connector;
1835 struct drm_connector_list_iter iter;
1836 struct drm_crtc *crtc;
1837 struct drm_crtc_state *new_crtc_state;
1838 struct dm_crtc_state *dm_new_crtc_state;
1839 struct drm_plane *plane;
1840 struct drm_plane_state *new_plane_state;
1841 struct dm_plane_state *dm_new_plane_state;
1842 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1843 enum dc_connection_type new_connection_type = dc_connection_none;
1844 struct dc_state *dc_state;
1847 if (amdgpu_in_reset(adev)) {
1848 dc_state = dm->cached_dc_state;
1850 r = dm_dmub_hw_init(adev);
1852 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1854 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1857 amdgpu_dm_irq_resume_early(adev);
1859 for (i = 0; i < dc_state->stream_count; i++) {
1860 dc_state->streams[i]->mode_changed = true;
1861 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1862 dc_state->stream_status->plane_states[j]->update_flags.raw
1867 WARN_ON(!dc_commit_state(dm->dc, dc_state));
1869 dm_gpureset_commit_state(dm->cached_dc_state, dm);
1871 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1873 dc_release_state(dm->cached_dc_state);
1874 dm->cached_dc_state = NULL;
1876 amdgpu_dm_irq_resume_late(adev);
1878 mutex_unlock(&dm->dc_lock);
1882 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883 dc_release_state(dm_state->context);
1884 dm_state->context = dc_create_state(dm->dc);
1885 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886 dc_resource_state_construct(dm->dc, dm_state->context);
1888 /* Before powering on DC we need to re-initialize DMUB. */
1889 r = dm_dmub_hw_init(adev);
1891 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1893 /* power on hardware */
1894 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1896 /* program HPD filter */
1900 * early enable HPD Rx IRQ, should be done before set mode as short
1901 * pulse interrupts are used for MST
1903 amdgpu_dm_irq_resume_early(adev);
1905 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1906 s3_handle_mst(ddev, false);
1909 drm_connector_list_iter_begin(ddev, &iter);
1910 drm_for_each_connector_iter(connector, &iter) {
1911 aconnector = to_amdgpu_dm_connector(connector);
1914 * this is the case when traversing through already created
1915 * MST connectors, should be skipped
1917 if (aconnector->mst_port)
1920 mutex_lock(&aconnector->hpd_lock);
1921 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1922 DRM_ERROR("KMS: Failed to detect connector\n");
1924 if (aconnector->base.force && new_connection_type == dc_connection_none)
1925 emulated_link_detect(aconnector->dc_link);
1927 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1929 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1930 aconnector->fake_enable = false;
1932 if (aconnector->dc_sink)
1933 dc_sink_release(aconnector->dc_sink);
1934 aconnector->dc_sink = NULL;
1935 amdgpu_dm_update_connector_after_detect(aconnector);
1936 mutex_unlock(&aconnector->hpd_lock);
1938 drm_connector_list_iter_end(&iter);
1940 /* Force mode set in atomic commit */
1941 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1942 new_crtc_state->active_changed = true;
1945 * atomic_check is expected to create the dc states. We need to release
1946 * them here, since they were duplicated as part of the suspend
1949 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1950 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1951 if (dm_new_crtc_state->stream) {
1952 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1953 dc_stream_release(dm_new_crtc_state->stream);
1954 dm_new_crtc_state->stream = NULL;
1958 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1959 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1960 if (dm_new_plane_state->dc_state) {
1961 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1962 dc_plane_state_release(dm_new_plane_state->dc_state);
1963 dm_new_plane_state->dc_state = NULL;
1967 drm_atomic_helper_resume(ddev, dm->cached_state);
1969 dm->cached_state = NULL;
1971 amdgpu_dm_irq_resume_late(adev);
1973 amdgpu_dm_smu_write_watermarks_table(adev);
1981 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983 * the base driver's device list to be initialized and torn down accordingly.
1985 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1988 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1990 .early_init = dm_early_init,
1991 .late_init = dm_late_init,
1992 .sw_init = dm_sw_init,
1993 .sw_fini = dm_sw_fini,
1994 .hw_init = dm_hw_init,
1995 .hw_fini = dm_hw_fini,
1996 .suspend = dm_suspend,
1997 .resume = dm_resume,
1998 .is_idle = dm_is_idle,
1999 .wait_for_idle = dm_wait_for_idle,
2000 .check_soft_reset = dm_check_soft_reset,
2001 .soft_reset = dm_soft_reset,
2002 .set_clockgating_state = dm_set_clockgating_state,
2003 .set_powergating_state = dm_set_powergating_state,
2006 const struct amdgpu_ip_block_version dm_ip_block =
2008 .type = AMD_IP_BLOCK_TYPE_DCE,
2012 .funcs = &amdgpu_dm_funcs,
2022 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2023 .fb_create = amdgpu_display_user_framebuffer_create,
2024 .output_poll_changed = drm_fb_helper_output_poll_changed,
2025 .atomic_check = amdgpu_dm_atomic_check,
2026 .atomic_commit = amdgpu_dm_atomic_commit,
2029 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2030 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2033 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2035 u32 max_cll, min_cll, max, min, q, r;
2036 struct amdgpu_dm_backlight_caps *caps;
2037 struct amdgpu_display_manager *dm;
2038 struct drm_connector *conn_base;
2039 struct amdgpu_device *adev;
2040 struct dc_link *link = NULL;
2041 static const u8 pre_computed_values[] = {
2042 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2045 if (!aconnector || !aconnector->dc_link)
2048 link = aconnector->dc_link;
2049 if (link->connector_signal != SIGNAL_TYPE_EDP)
2052 conn_base = &aconnector->base;
2053 adev = drm_to_adev(conn_base->dev);
2055 caps = &dm->backlight_caps;
2056 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2057 caps->aux_support = false;
2058 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2059 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2061 if (caps->ext_caps->bits.oled == 1 ||
2062 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2063 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2064 caps->aux_support = true;
2066 /* From the specification (CTA-861-G), for calculating the maximum
2067 * luminance we need to use:
2068 * Luminance = 50*2**(CV/32)
2069 * Where CV is a one-byte value.
2070 * For calculating this expression we may need float point precision;
2071 * to avoid this complexity level, we take advantage that CV is divided
2072 * by a constant. From the Euclids division algorithm, we know that CV
2073 * can be written as: CV = 32*q + r. Next, we replace CV in the
2074 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075 * need to pre-compute the value of r/32. For pre-computing the values
2076 * We just used the following Ruby line:
2077 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078 * The results of the above expressions can be verified at
2079 * pre_computed_values.
2083 max = (1 << q) * pre_computed_values[r];
2085 // min luminance: maxLum * (CV/255)^2 / 100
2086 q = DIV_ROUND_CLOSEST(min_cll, 255);
2087 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2089 caps->aux_max_input_signal = max;
2090 caps->aux_min_input_signal = min;
2093 void amdgpu_dm_update_connector_after_detect(
2094 struct amdgpu_dm_connector *aconnector)
2096 struct drm_connector *connector = &aconnector->base;
2097 struct drm_device *dev = connector->dev;
2098 struct dc_sink *sink;
2100 /* MST handled by drm_mst framework */
2101 if (aconnector->mst_mgr.mst_state == true)
2105 sink = aconnector->dc_link->local_sink;
2107 dc_sink_retain(sink);
2110 * Edid mgmt connector gets first update only in mode_valid hook and then
2111 * the connector sink is set to either fake or physical sink depends on link status.
2112 * Skip if already done during boot.
2114 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2115 && aconnector->dc_em_sink) {
2118 * For S3 resume with headless use eml_sink to fake stream
2119 * because on resume connector->sink is set to NULL
2121 mutex_lock(&dev->mode_config.mutex);
2124 if (aconnector->dc_sink) {
2125 amdgpu_dm_update_freesync_caps(connector, NULL);
2127 * retain and release below are used to
2128 * bump up refcount for sink because the link doesn't point
2129 * to it anymore after disconnect, so on next crtc to connector
2130 * reshuffle by UMD we will get into unwanted dc_sink release
2132 dc_sink_release(aconnector->dc_sink);
2134 aconnector->dc_sink = sink;
2135 dc_sink_retain(aconnector->dc_sink);
2136 amdgpu_dm_update_freesync_caps(connector,
2139 amdgpu_dm_update_freesync_caps(connector, NULL);
2140 if (!aconnector->dc_sink) {
2141 aconnector->dc_sink = aconnector->dc_em_sink;
2142 dc_sink_retain(aconnector->dc_sink);
2146 mutex_unlock(&dev->mode_config.mutex);
2149 dc_sink_release(sink);
2154 * TODO: temporary guard to look for proper fix
2155 * if this sink is MST sink, we should not do anything
2157 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2158 dc_sink_release(sink);
2162 if (aconnector->dc_sink == sink) {
2164 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2167 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2168 aconnector->connector_id);
2170 dc_sink_release(sink);
2174 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2175 aconnector->connector_id, aconnector->dc_sink, sink);
2177 mutex_lock(&dev->mode_config.mutex);
2180 * 1. Update status of the drm connector
2181 * 2. Send an event and let userspace tell us what to do
2185 * TODO: check if we still need the S3 mode update workaround.
2186 * If yes, put it here.
2188 if (aconnector->dc_sink)
2189 amdgpu_dm_update_freesync_caps(connector, NULL);
2191 aconnector->dc_sink = sink;
2192 dc_sink_retain(aconnector->dc_sink);
2193 if (sink->dc_edid.length == 0) {
2194 aconnector->edid = NULL;
2195 if (aconnector->dc_link->aux_mode) {
2196 drm_dp_cec_unset_edid(
2197 &aconnector->dm_dp_aux.aux);
2201 (struct edid *)sink->dc_edid.raw_edid;
2203 drm_connector_update_edid_property(connector,
2205 drm_add_edid_modes(connector, aconnector->edid);
2207 if (aconnector->dc_link->aux_mode)
2208 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2212 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2213 update_connector_ext_caps(aconnector);
2215 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2216 amdgpu_dm_update_freesync_caps(connector, NULL);
2217 drm_connector_update_edid_property(connector, NULL);
2218 aconnector->num_modes = 0;
2219 dc_sink_release(aconnector->dc_sink);
2220 aconnector->dc_sink = NULL;
2221 aconnector->edid = NULL;
2222 #ifdef CONFIG_DRM_AMD_DC_HDCP
2223 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2224 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2225 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2229 mutex_unlock(&dev->mode_config.mutex);
2232 dc_sink_release(sink);
2235 static void handle_hpd_irq(void *param)
2237 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2238 struct drm_connector *connector = &aconnector->base;
2239 struct drm_device *dev = connector->dev;
2240 enum dc_connection_type new_connection_type = dc_connection_none;
2241 #ifdef CONFIG_DRM_AMD_DC_HDCP
2242 struct amdgpu_device *adev = drm_to_adev(dev);
2246 * In case of failure or MST no need to update connector status or notify the OS
2247 * since (for MST case) MST does this in its own context.
2249 mutex_lock(&aconnector->hpd_lock);
2251 #ifdef CONFIG_DRM_AMD_DC_HDCP
2252 if (adev->dm.hdcp_workqueue)
2253 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2255 if (aconnector->fake_enable)
2256 aconnector->fake_enable = false;
2258 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2259 DRM_ERROR("KMS: Failed to detect connector\n");
2261 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2262 emulated_link_detect(aconnector->dc_link);
2265 drm_modeset_lock_all(dev);
2266 dm_restore_drm_connector_state(dev, connector);
2267 drm_modeset_unlock_all(dev);
2269 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2270 drm_kms_helper_hotplug_event(dev);
2272 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2273 amdgpu_dm_update_connector_after_detect(aconnector);
2276 drm_modeset_lock_all(dev);
2277 dm_restore_drm_connector_state(dev, connector);
2278 drm_modeset_unlock_all(dev);
2280 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2281 drm_kms_helper_hotplug_event(dev);
2283 mutex_unlock(&aconnector->hpd_lock);
2287 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2289 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2291 bool new_irq_handled = false;
2293 int dpcd_bytes_to_read;
2295 const int max_process_count = 30;
2296 int process_count = 0;
2298 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2300 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2301 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2302 /* DPCD 0x200 - 0x201 for downstream IRQ */
2303 dpcd_addr = DP_SINK_COUNT;
2305 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2306 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2307 dpcd_addr = DP_SINK_COUNT_ESI;
2310 dret = drm_dp_dpcd_read(
2311 &aconnector->dm_dp_aux.aux,
2314 dpcd_bytes_to_read);
2316 while (dret == dpcd_bytes_to_read &&
2317 process_count < max_process_count) {
2323 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2324 /* handle HPD short pulse irq */
2325 if (aconnector->mst_mgr.mst_state)
2327 &aconnector->mst_mgr,
2331 if (new_irq_handled) {
2332 /* ACK at DPCD to notify down stream */
2333 const int ack_dpcd_bytes_to_write =
2334 dpcd_bytes_to_read - 1;
2336 for (retry = 0; retry < 3; retry++) {
2339 wret = drm_dp_dpcd_write(
2340 &aconnector->dm_dp_aux.aux,
2343 ack_dpcd_bytes_to_write);
2344 if (wret == ack_dpcd_bytes_to_write)
2348 /* check if there is new irq to be handled */
2349 dret = drm_dp_dpcd_read(
2350 &aconnector->dm_dp_aux.aux,
2353 dpcd_bytes_to_read);
2355 new_irq_handled = false;
2361 if (process_count == max_process_count)
2362 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2365 static void handle_hpd_rx_irq(void *param)
2367 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2368 struct drm_connector *connector = &aconnector->base;
2369 struct drm_device *dev = connector->dev;
2370 struct dc_link *dc_link = aconnector->dc_link;
2371 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2372 enum dc_connection_type new_connection_type = dc_connection_none;
2373 #ifdef CONFIG_DRM_AMD_DC_HDCP
2374 union hpd_irq_data hpd_irq_data;
2375 struct amdgpu_device *adev = drm_to_adev(dev);
2377 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2381 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2382 * conflict, after implement i2c helper, this mutex should be
2385 if (dc_link->type != dc_connection_mst_branch)
2386 mutex_lock(&aconnector->hpd_lock);
2389 #ifdef CONFIG_DRM_AMD_DC_HDCP
2390 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2392 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2394 !is_mst_root_connector) {
2395 /* Downstream Port status changed. */
2396 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2397 DRM_ERROR("KMS: Failed to detect connector\n");
2399 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2400 emulated_link_detect(dc_link);
2402 if (aconnector->fake_enable)
2403 aconnector->fake_enable = false;
2405 amdgpu_dm_update_connector_after_detect(aconnector);
2408 drm_modeset_lock_all(dev);
2409 dm_restore_drm_connector_state(dev, connector);
2410 drm_modeset_unlock_all(dev);
2412 drm_kms_helper_hotplug_event(dev);
2413 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2415 if (aconnector->fake_enable)
2416 aconnector->fake_enable = false;
2418 amdgpu_dm_update_connector_after_detect(aconnector);
2421 drm_modeset_lock_all(dev);
2422 dm_restore_drm_connector_state(dev, connector);
2423 drm_modeset_unlock_all(dev);
2425 drm_kms_helper_hotplug_event(dev);
2428 #ifdef CONFIG_DRM_AMD_DC_HDCP
2429 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2430 if (adev->dm.hdcp_workqueue)
2431 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2434 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2435 (dc_link->type == dc_connection_mst_branch))
2436 dm_handle_hpd_rx_irq(aconnector);
2438 if (dc_link->type != dc_connection_mst_branch) {
2439 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2440 mutex_unlock(&aconnector->hpd_lock);
2444 static void register_hpd_handlers(struct amdgpu_device *adev)
2446 struct drm_device *dev = adev_to_drm(adev);
2447 struct drm_connector *connector;
2448 struct amdgpu_dm_connector *aconnector;
2449 const struct dc_link *dc_link;
2450 struct dc_interrupt_params int_params = {0};
2452 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2453 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2455 list_for_each_entry(connector,
2456 &dev->mode_config.connector_list, head) {
2458 aconnector = to_amdgpu_dm_connector(connector);
2459 dc_link = aconnector->dc_link;
2461 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2462 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2463 int_params.irq_source = dc_link->irq_source_hpd;
2465 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467 (void *) aconnector);
2470 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2472 /* Also register for DP short pulse (hpd_rx). */
2473 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2474 int_params.irq_source = dc_link->irq_source_hpd_rx;
2476 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2478 (void *) aconnector);
2483 #if defined(CONFIG_DRM_AMD_DC_SI)
2484 /* Register IRQ sources and initialize IRQ callbacks */
2485 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2487 struct dc *dc = adev->dm.dc;
2488 struct common_irq_params *c_irq_params;
2489 struct dc_interrupt_params int_params = {0};
2492 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2494 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2495 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2498 * Actions of amdgpu_irq_add_id():
2499 * 1. Register a set() function with base driver.
2500 * Base driver will call set() function to enable/disable an
2501 * interrupt in DC hardware.
2502 * 2. Register amdgpu_dm_irq_handler().
2503 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2504 * coming from DC hardware.
2505 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2506 * for acknowledging and handling. */
2508 /* Use VBLANK interrupt */
2509 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2510 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2512 DRM_ERROR("Failed to add crtc irq id!\n");
2516 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2517 int_params.irq_source =
2518 dc_interrupt_to_irq_source(dc, i+1 , 0);
2520 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2522 c_irq_params->adev = adev;
2523 c_irq_params->irq_src = int_params.irq_source;
2525 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2526 dm_crtc_high_irq, c_irq_params);
2529 /* Use GRPH_PFLIP interrupt */
2530 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2531 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2532 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2534 DRM_ERROR("Failed to add page flip irq id!\n");
2538 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2539 int_params.irq_source =
2540 dc_interrupt_to_irq_source(dc, i, 0);
2542 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2544 c_irq_params->adev = adev;
2545 c_irq_params->irq_src = int_params.irq_source;
2547 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2548 dm_pflip_high_irq, c_irq_params);
2553 r = amdgpu_irq_add_id(adev, client_id,
2554 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2556 DRM_ERROR("Failed to add hpd irq id!\n");
2560 register_hpd_handlers(adev);
2566 /* Register IRQ sources and initialize IRQ callbacks */
2567 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2569 struct dc *dc = adev->dm.dc;
2570 struct common_irq_params *c_irq_params;
2571 struct dc_interrupt_params int_params = {0};
2574 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2576 if (adev->asic_type >= CHIP_VEGA10)
2577 client_id = SOC15_IH_CLIENTID_DCE;
2579 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2580 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2583 * Actions of amdgpu_irq_add_id():
2584 * 1. Register a set() function with base driver.
2585 * Base driver will call set() function to enable/disable an
2586 * interrupt in DC hardware.
2587 * 2. Register amdgpu_dm_irq_handler().
2588 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2589 * coming from DC hardware.
2590 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2591 * for acknowledging and handling. */
2593 /* Use VBLANK interrupt */
2594 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2595 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2597 DRM_ERROR("Failed to add crtc irq id!\n");
2601 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2602 int_params.irq_source =
2603 dc_interrupt_to_irq_source(dc, i, 0);
2605 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2607 c_irq_params->adev = adev;
2608 c_irq_params->irq_src = int_params.irq_source;
2610 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2611 dm_crtc_high_irq, c_irq_params);
2614 /* Use VUPDATE interrupt */
2615 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2616 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2618 DRM_ERROR("Failed to add vupdate irq id!\n");
2622 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2623 int_params.irq_source =
2624 dc_interrupt_to_irq_source(dc, i, 0);
2626 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2628 c_irq_params->adev = adev;
2629 c_irq_params->irq_src = int_params.irq_source;
2631 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2632 dm_vupdate_high_irq, c_irq_params);
2635 /* Use GRPH_PFLIP interrupt */
2636 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2637 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2638 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2640 DRM_ERROR("Failed to add page flip irq id!\n");
2644 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2645 int_params.irq_source =
2646 dc_interrupt_to_irq_source(dc, i, 0);
2648 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2650 c_irq_params->adev = adev;
2651 c_irq_params->irq_src = int_params.irq_source;
2653 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2654 dm_pflip_high_irq, c_irq_params);
2659 r = amdgpu_irq_add_id(adev, client_id,
2660 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2662 DRM_ERROR("Failed to add hpd irq id!\n");
2666 register_hpd_handlers(adev);
2671 #if defined(CONFIG_DRM_AMD_DC_DCN)
2672 /* Register IRQ sources and initialize IRQ callbacks */
2673 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2675 struct dc *dc = adev->dm.dc;
2676 struct common_irq_params *c_irq_params;
2677 struct dc_interrupt_params int_params = {0};
2681 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2682 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2685 * Actions of amdgpu_irq_add_id():
2686 * 1. Register a set() function with base driver.
2687 * Base driver will call set() function to enable/disable an
2688 * interrupt in DC hardware.
2689 * 2. Register amdgpu_dm_irq_handler().
2690 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2691 * coming from DC hardware.
2692 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2693 * for acknowledging and handling.
2696 /* Use VSTARTUP interrupt */
2697 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2698 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2700 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2703 DRM_ERROR("Failed to add crtc irq id!\n");
2707 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2708 int_params.irq_source =
2709 dc_interrupt_to_irq_source(dc, i, 0);
2711 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2713 c_irq_params->adev = adev;
2714 c_irq_params->irq_src = int_params.irq_source;
2716 amdgpu_dm_irq_register_interrupt(
2717 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2720 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2721 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2722 * to trigger at end of each vblank, regardless of state of the lock,
2723 * matching DCE behaviour.
2725 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2726 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2728 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2731 DRM_ERROR("Failed to add vupdate irq id!\n");
2735 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2736 int_params.irq_source =
2737 dc_interrupt_to_irq_source(dc, i, 0);
2739 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2741 c_irq_params->adev = adev;
2742 c_irq_params->irq_src = int_params.irq_source;
2744 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2745 dm_vupdate_high_irq, c_irq_params);
2748 /* Use GRPH_PFLIP interrupt */
2749 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2750 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2752 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2754 DRM_ERROR("Failed to add page flip irq id!\n");
2758 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2759 int_params.irq_source =
2760 dc_interrupt_to_irq_source(dc, i, 0);
2762 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2764 c_irq_params->adev = adev;
2765 c_irq_params->irq_src = int_params.irq_source;
2767 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2768 dm_pflip_high_irq, c_irq_params);
2773 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2776 DRM_ERROR("Failed to add hpd irq id!\n");
2780 register_hpd_handlers(adev);
2787 * Acquires the lock for the atomic state object and returns
2788 * the new atomic state.
2790 * This should only be called during atomic check.
2792 static int dm_atomic_get_state(struct drm_atomic_state *state,
2793 struct dm_atomic_state **dm_state)
2795 struct drm_device *dev = state->dev;
2796 struct amdgpu_device *adev = drm_to_adev(dev);
2797 struct amdgpu_display_manager *dm = &adev->dm;
2798 struct drm_private_state *priv_state;
2803 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2804 if (IS_ERR(priv_state))
2805 return PTR_ERR(priv_state);
2807 *dm_state = to_dm_atomic_state(priv_state);
2812 static struct dm_atomic_state *
2813 dm_atomic_get_new_state(struct drm_atomic_state *state)
2815 struct drm_device *dev = state->dev;
2816 struct amdgpu_device *adev = drm_to_adev(dev);
2817 struct amdgpu_display_manager *dm = &adev->dm;
2818 struct drm_private_obj *obj;
2819 struct drm_private_state *new_obj_state;
2822 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2823 if (obj->funcs == dm->atomic_obj.funcs)
2824 return to_dm_atomic_state(new_obj_state);
2830 static struct drm_private_state *
2831 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2833 struct dm_atomic_state *old_state, *new_state;
2835 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2839 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2841 old_state = to_dm_atomic_state(obj->state);
2843 if (old_state && old_state->context)
2844 new_state->context = dc_copy_state(old_state->context);
2846 if (!new_state->context) {
2851 return &new_state->base;
2854 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2855 struct drm_private_state *state)
2857 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2859 if (dm_state && dm_state->context)
2860 dc_release_state(dm_state->context);
2865 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2866 .atomic_duplicate_state = dm_atomic_duplicate_state,
2867 .atomic_destroy_state = dm_atomic_destroy_state,
2870 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2872 struct dm_atomic_state *state;
2875 adev->mode_info.mode_config_initialized = true;
2877 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2878 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2880 adev_to_drm(adev)->mode_config.max_width = 16384;
2881 adev_to_drm(adev)->mode_config.max_height = 16384;
2883 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2884 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2885 /* indicates support for immediate flip */
2886 adev_to_drm(adev)->mode_config.async_page_flip = true;
2888 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2890 state = kzalloc(sizeof(*state), GFP_KERNEL);
2894 state->context = dc_create_state(adev->dm.dc);
2895 if (!state->context) {
2900 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2902 drm_atomic_private_obj_init(adev_to_drm(adev),
2903 &adev->dm.atomic_obj,
2905 &dm_atomic_state_funcs);
2907 r = amdgpu_display_modeset_create_props(adev);
2909 dc_release_state(state->context);
2914 r = amdgpu_dm_audio_init(adev);
2916 dc_release_state(state->context);
2924 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2925 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2926 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2928 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2929 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2931 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2933 #if defined(CONFIG_ACPI)
2934 struct amdgpu_dm_backlight_caps caps;
2936 memset(&caps, 0, sizeof(caps));
2938 if (dm->backlight_caps.caps_valid)
2941 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2942 if (caps.caps_valid) {
2943 dm->backlight_caps.caps_valid = true;
2944 if (caps.aux_support)
2946 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2947 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2949 dm->backlight_caps.min_input_signal =
2950 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2951 dm->backlight_caps.max_input_signal =
2952 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2955 if (dm->backlight_caps.aux_support)
2958 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2959 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2963 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2970 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2971 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2976 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
2977 unsigned *min, unsigned *max)
2982 if (caps->aux_support) {
2983 // Firmware limits are in nits, DC API wants millinits.
2984 *max = 1000 * caps->aux_max_input_signal;
2985 *min = 1000 * caps->aux_min_input_signal;
2987 // Firmware limits are 8-bit, PWM control is 16-bit.
2988 *max = 0x101 * caps->max_input_signal;
2989 *min = 0x101 * caps->min_input_signal;
2994 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
2995 uint32_t brightness)
2999 if (!get_brightness_range(caps, &min, &max))
3002 // Rescale 0..255 to min..max
3003 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3004 AMDGPU_MAX_BL_LEVEL);
3007 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3008 uint32_t brightness)
3012 if (!get_brightness_range(caps, &min, &max))
3015 if (brightness < min)
3017 // Rescale min..max to 0..255
3018 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3022 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3024 struct amdgpu_display_manager *dm = bl_get_data(bd);
3025 struct amdgpu_dm_backlight_caps caps;
3026 struct dc_link *link = NULL;
3030 amdgpu_dm_update_backlight_caps(dm);
3031 caps = dm->backlight_caps;
3033 link = (struct dc_link *)dm->backlight_link;
3035 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3036 // Change brightness based on AUX property
3037 if (caps.aux_support)
3038 return set_backlight_via_aux(link, brightness);
3040 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3045 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3047 struct amdgpu_display_manager *dm = bl_get_data(bd);
3048 int ret = dc_link_get_backlight_level(dm->backlight_link);
3050 if (ret == DC_ERROR_UNEXPECTED)
3051 return bd->props.brightness;
3052 return convert_brightness_to_user(&dm->backlight_caps, ret);
3055 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3056 .options = BL_CORE_SUSPENDRESUME,
3057 .get_brightness = amdgpu_dm_backlight_get_brightness,
3058 .update_status = amdgpu_dm_backlight_update_status,
3062 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3065 struct backlight_properties props = { 0 };
3067 amdgpu_dm_update_backlight_caps(dm);
3069 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3070 props.brightness = AMDGPU_MAX_BL_LEVEL;
3071 props.type = BACKLIGHT_RAW;
3073 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3074 adev_to_drm(dm->adev)->primary->index);
3076 dm->backlight_dev = backlight_device_register(bl_name,
3077 adev_to_drm(dm->adev)->dev,
3079 &amdgpu_dm_backlight_ops,
3082 if (IS_ERR(dm->backlight_dev))
3083 DRM_ERROR("DM: Backlight registration failed!\n");
3085 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3090 static int initialize_plane(struct amdgpu_display_manager *dm,
3091 struct amdgpu_mode_info *mode_info, int plane_id,
3092 enum drm_plane_type plane_type,
3093 const struct dc_plane_cap *plane_cap)
3095 struct drm_plane *plane;
3096 unsigned long possible_crtcs;
3099 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3101 DRM_ERROR("KMS: Failed to allocate plane\n");
3104 plane->type = plane_type;
3107 * HACK: IGT tests expect that the primary plane for a CRTC
3108 * can only have one possible CRTC. Only expose support for
3109 * any CRTC if they're not going to be used as a primary plane
3110 * for a CRTC - like overlay or underlay planes.
3112 possible_crtcs = 1 << plane_id;
3113 if (plane_id >= dm->dc->caps.max_streams)
3114 possible_crtcs = 0xff;
3116 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3119 DRM_ERROR("KMS: Failed to initialize plane\n");
3125 mode_info->planes[plane_id] = plane;
3131 static void register_backlight_device(struct amdgpu_display_manager *dm,
3132 struct dc_link *link)
3134 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3135 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3137 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3138 link->type != dc_connection_none) {
3140 * Event if registration failed, we should continue with
3141 * DM initialization because not having a backlight control
3142 * is better then a black screen.
3144 amdgpu_dm_register_backlight_device(dm);
3146 if (dm->backlight_dev)
3147 dm->backlight_link = link;
3154 * In this architecture, the association
3155 * connector -> encoder -> crtc
3156 * id not really requried. The crtc and connector will hold the
3157 * display_index as an abstraction to use with DAL component
3159 * Returns 0 on success
3161 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3163 struct amdgpu_display_manager *dm = &adev->dm;
3165 struct amdgpu_dm_connector *aconnector = NULL;
3166 struct amdgpu_encoder *aencoder = NULL;
3167 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3169 int32_t primary_planes;
3170 enum dc_connection_type new_connection_type = dc_connection_none;
3171 const struct dc_plane_cap *plane;
3173 link_cnt = dm->dc->caps.max_links;
3174 if (amdgpu_dm_mode_config_init(dm->adev)) {
3175 DRM_ERROR("DM: Failed to initialize mode config\n");
3179 /* There is one primary plane per CRTC */
3180 primary_planes = dm->dc->caps.max_streams;
3181 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3184 * Initialize primary planes, implicit planes for legacy IOCTLS.
3185 * Order is reversed to match iteration order in atomic check.
3187 for (i = (primary_planes - 1); i >= 0; i--) {
3188 plane = &dm->dc->caps.planes[i];
3190 if (initialize_plane(dm, mode_info, i,
3191 DRM_PLANE_TYPE_PRIMARY, plane)) {
3192 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3198 * Initialize overlay planes, index starting after primary planes.
3199 * These planes have a higher DRM index than the primary planes since
3200 * they should be considered as having a higher z-order.
3201 * Order is reversed to match iteration order in atomic check.
3203 * Only support DCN for now, and only expose one so we don't encourage
3204 * userspace to use up all the pipes.
3206 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3207 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3209 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3212 if (!plane->blends_with_above || !plane->blends_with_below)
3215 if (!plane->pixel_format_support.argb8888)
3218 if (initialize_plane(dm, NULL, primary_planes + i,
3219 DRM_PLANE_TYPE_OVERLAY, plane)) {
3220 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3224 /* Only create one overlay plane. */
3228 for (i = 0; i < dm->dc->caps.max_streams; i++)
3229 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3230 DRM_ERROR("KMS: Failed to initialize crtc\n");
3234 dm->display_indexes_num = dm->dc->caps.max_streams;
3236 /* loops over all connectors on the board */
3237 for (i = 0; i < link_cnt; i++) {
3238 struct dc_link *link = NULL;
3240 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3242 "KMS: Cannot support more than %d display indexes\n",
3243 AMDGPU_DM_MAX_DISPLAY_INDEX);
3247 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3251 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3255 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3256 DRM_ERROR("KMS: Failed to initialize encoder\n");
3260 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3261 DRM_ERROR("KMS: Failed to initialize connector\n");
3265 link = dc_get_link_at_index(dm->dc, i);
3267 if (!dc_link_detect_sink(link, &new_connection_type))
3268 DRM_ERROR("KMS: Failed to detect connector\n");
3270 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3271 emulated_link_detect(link);
3272 amdgpu_dm_update_connector_after_detect(aconnector);
3274 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3275 amdgpu_dm_update_connector_after_detect(aconnector);
3276 register_backlight_device(dm, link);
3277 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3278 amdgpu_dm_set_psr_caps(link);
3284 /* Software is initialized. Now we can register interrupt handlers. */
3285 switch (adev->asic_type) {
3286 #if defined(CONFIG_DRM_AMD_DC_SI)
3291 if (dce60_register_irq_handlers(dm->adev)) {
3292 DRM_ERROR("DM: Failed to initialize IRQ\n");
3306 case CHIP_POLARIS11:
3307 case CHIP_POLARIS10:
3308 case CHIP_POLARIS12:
3313 if (dce110_register_irq_handlers(dm->adev)) {
3314 DRM_ERROR("DM: Failed to initialize IRQ\n");
3318 #if defined(CONFIG_DRM_AMD_DC_DCN)
3324 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3325 case CHIP_SIENNA_CICHLID:
3326 case CHIP_NAVY_FLOUNDER:
3328 if (dcn10_register_irq_handlers(dm->adev)) {
3329 DRM_ERROR("DM: Failed to initialize IRQ\n");
3335 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3339 /* No userspace support. */
3340 dm->dc->debug.disable_tri_buf = true;
3350 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3352 drm_mode_config_cleanup(dm->ddev);
3353 drm_atomic_private_obj_fini(&dm->atomic_obj);
3357 /******************************************************************************
3358 * amdgpu_display_funcs functions
3359 *****************************************************************************/
3362 * dm_bandwidth_update - program display watermarks
3364 * @adev: amdgpu_device pointer
3366 * Calculate and program the display watermarks and line buffer allocation.
3368 static void dm_bandwidth_update(struct amdgpu_device *adev)
3370 /* TODO: implement later */
3373 static const struct amdgpu_display_funcs dm_display_funcs = {
3374 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3375 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3376 .backlight_set_level = NULL, /* never called for DC */
3377 .backlight_get_level = NULL, /* never called for DC */
3378 .hpd_sense = NULL,/* called unconditionally */
3379 .hpd_set_polarity = NULL, /* called unconditionally */
3380 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3381 .page_flip_get_scanoutpos =
3382 dm_crtc_get_scanoutpos,/* called unconditionally */
3383 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3384 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3387 #if defined(CONFIG_DEBUG_KERNEL_DC)
3389 static ssize_t s3_debug_store(struct device *device,
3390 struct device_attribute *attr,
3396 struct drm_device *drm_dev = dev_get_drvdata(device);
3397 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3399 ret = kstrtoint(buf, 0, &s3_state);
3404 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3409 return ret == 0 ? count : 0;
3412 DEVICE_ATTR_WO(s3_debug);
3416 static int dm_early_init(void *handle)
3418 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3420 switch (adev->asic_type) {
3421 #if defined(CONFIG_DRM_AMD_DC_SI)
3425 adev->mode_info.num_crtc = 6;
3426 adev->mode_info.num_hpd = 6;
3427 adev->mode_info.num_dig = 6;
3430 adev->mode_info.num_crtc = 2;
3431 adev->mode_info.num_hpd = 2;
3432 adev->mode_info.num_dig = 2;
3437 adev->mode_info.num_crtc = 6;
3438 adev->mode_info.num_hpd = 6;
3439 adev->mode_info.num_dig = 6;
3442 adev->mode_info.num_crtc = 4;
3443 adev->mode_info.num_hpd = 6;
3444 adev->mode_info.num_dig = 7;
3448 adev->mode_info.num_crtc = 2;
3449 adev->mode_info.num_hpd = 6;
3450 adev->mode_info.num_dig = 6;
3454 adev->mode_info.num_crtc = 6;
3455 adev->mode_info.num_hpd = 6;
3456 adev->mode_info.num_dig = 7;
3459 adev->mode_info.num_crtc = 3;
3460 adev->mode_info.num_hpd = 6;
3461 adev->mode_info.num_dig = 9;
3464 adev->mode_info.num_crtc = 2;
3465 adev->mode_info.num_hpd = 6;
3466 adev->mode_info.num_dig = 9;
3468 case CHIP_POLARIS11:
3469 case CHIP_POLARIS12:
3470 adev->mode_info.num_crtc = 5;
3471 adev->mode_info.num_hpd = 5;
3472 adev->mode_info.num_dig = 5;
3474 case CHIP_POLARIS10:
3476 adev->mode_info.num_crtc = 6;
3477 adev->mode_info.num_hpd = 6;
3478 adev->mode_info.num_dig = 6;
3483 adev->mode_info.num_crtc = 6;
3484 adev->mode_info.num_hpd = 6;
3485 adev->mode_info.num_dig = 6;
3487 #if defined(CONFIG_DRM_AMD_DC_DCN)
3489 adev->mode_info.num_crtc = 4;
3490 adev->mode_info.num_hpd = 4;
3491 adev->mode_info.num_dig = 4;
3496 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3497 case CHIP_SIENNA_CICHLID:
3498 case CHIP_NAVY_FLOUNDER:
3500 adev->mode_info.num_crtc = 6;
3501 adev->mode_info.num_hpd = 6;
3502 adev->mode_info.num_dig = 6;
3505 adev->mode_info.num_crtc = 5;
3506 adev->mode_info.num_hpd = 5;
3507 adev->mode_info.num_dig = 5;
3510 adev->mode_info.num_crtc = 4;
3511 adev->mode_info.num_hpd = 4;
3512 adev->mode_info.num_dig = 4;
3515 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3519 amdgpu_dm_set_irq_funcs(adev);
3521 if (adev->mode_info.funcs == NULL)
3522 adev->mode_info.funcs = &dm_display_funcs;
3525 * Note: Do NOT change adev->audio_endpt_rreg and
3526 * adev->audio_endpt_wreg because they are initialised in
3527 * amdgpu_device_init()
3529 #if defined(CONFIG_DEBUG_KERNEL_DC)
3531 adev_to_drm(adev)->dev,
3532 &dev_attr_s3_debug);
3538 static bool modeset_required(struct drm_crtc_state *crtc_state,
3539 struct dc_stream_state *new_stream,
3540 struct dc_stream_state *old_stream)
3542 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3545 static bool modereset_required(struct drm_crtc_state *crtc_state)
3547 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3550 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3552 drm_encoder_cleanup(encoder);
3556 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3557 .destroy = amdgpu_dm_encoder_destroy,
3561 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3562 struct dc_scaling_info *scaling_info)
3564 int scale_w, scale_h;
3566 memset(scaling_info, 0, sizeof(*scaling_info));
3568 /* Source is fixed 16.16 but we ignore mantissa for now... */
3569 scaling_info->src_rect.x = state->src_x >> 16;
3570 scaling_info->src_rect.y = state->src_y >> 16;
3572 scaling_info->src_rect.width = state->src_w >> 16;
3573 if (scaling_info->src_rect.width == 0)
3576 scaling_info->src_rect.height = state->src_h >> 16;
3577 if (scaling_info->src_rect.height == 0)
3580 scaling_info->dst_rect.x = state->crtc_x;
3581 scaling_info->dst_rect.y = state->crtc_y;
3583 if (state->crtc_w == 0)
3586 scaling_info->dst_rect.width = state->crtc_w;
3588 if (state->crtc_h == 0)
3591 scaling_info->dst_rect.height = state->crtc_h;
3593 /* DRM doesn't specify clipping on destination output. */
3594 scaling_info->clip_rect = scaling_info->dst_rect;
3596 /* TODO: Validate scaling per-format with DC plane caps */
3597 scale_w = scaling_info->dst_rect.width * 1000 /
3598 scaling_info->src_rect.width;
3600 if (scale_w < 250 || scale_w > 16000)
3603 scale_h = scaling_info->dst_rect.height * 1000 /
3604 scaling_info->src_rect.height;
3606 if (scale_h < 250 || scale_h > 16000)
3610 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3611 * assume reasonable defaults based on the format.
3617 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3618 uint64_t *tiling_flags, bool *tmz_surface)
3620 struct amdgpu_bo *rbo;
3625 *tmz_surface = false;
3629 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3630 r = amdgpu_bo_reserve(rbo, false);
3633 /* Don't show error message when returning -ERESTARTSYS */
3634 if (r != -ERESTARTSYS)
3635 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3640 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3643 *tmz_surface = amdgpu_bo_encrypted(rbo);
3645 amdgpu_bo_unreserve(rbo);
3650 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3652 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3654 return offset ? (address + offset * 256) : 0;
3658 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3659 const struct amdgpu_framebuffer *afb,
3660 const enum surface_pixel_format format,
3661 const enum dc_rotation_angle rotation,
3662 const struct plane_size *plane_size,
3663 const union dc_tiling_info *tiling_info,
3664 const uint64_t info,
3665 struct dc_plane_dcc_param *dcc,
3666 struct dc_plane_address *address,
3667 bool force_disable_dcc)
3669 struct dc *dc = adev->dm.dc;
3670 struct dc_dcc_surface_param input;
3671 struct dc_surface_dcc_cap output;
3672 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3673 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3674 uint64_t dcc_address;
3676 memset(&input, 0, sizeof(input));
3677 memset(&output, 0, sizeof(output));
3679 if (force_disable_dcc)
3685 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3688 if (!dc->cap_funcs.get_dcc_compression_cap)
3691 input.format = format;
3692 input.surface_size.width = plane_size->surface_size.width;
3693 input.surface_size.height = plane_size->surface_size.height;
3694 input.swizzle_mode = tiling_info->gfx9.swizzle;
3696 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3697 input.scan = SCAN_DIRECTION_HORIZONTAL;
3698 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3699 input.scan = SCAN_DIRECTION_VERTICAL;
3701 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3704 if (!output.capable)
3707 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3712 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3713 dcc->independent_64b_blks = i64b;
3715 dcc_address = get_dcc_address(afb->address, info);
3716 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3717 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3723 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3724 const struct amdgpu_framebuffer *afb,
3725 const enum surface_pixel_format format,
3726 const enum dc_rotation_angle rotation,
3727 const uint64_t tiling_flags,
3728 union dc_tiling_info *tiling_info,
3729 struct plane_size *plane_size,
3730 struct dc_plane_dcc_param *dcc,
3731 struct dc_plane_address *address,
3733 bool force_disable_dcc)
3735 const struct drm_framebuffer *fb = &afb->base;
3738 memset(tiling_info, 0, sizeof(*tiling_info));
3739 memset(plane_size, 0, sizeof(*plane_size));
3740 memset(dcc, 0, sizeof(*dcc));
3741 memset(address, 0, sizeof(*address));
3743 address->tmz_surface = tmz_surface;
3745 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3746 plane_size->surface_size.x = 0;
3747 plane_size->surface_size.y = 0;
3748 plane_size->surface_size.width = fb->width;
3749 plane_size->surface_size.height = fb->height;
3750 plane_size->surface_pitch =
3751 fb->pitches[0] / fb->format->cpp[0];
3753 address->type = PLN_ADDR_TYPE_GRAPHICS;
3754 address->grph.addr.low_part = lower_32_bits(afb->address);
3755 address->grph.addr.high_part = upper_32_bits(afb->address);
3756 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3757 uint64_t chroma_addr = afb->address + fb->offsets[1];
3759 plane_size->surface_size.x = 0;
3760 plane_size->surface_size.y = 0;
3761 plane_size->surface_size.width = fb->width;
3762 plane_size->surface_size.height = fb->height;
3763 plane_size->surface_pitch =
3764 fb->pitches[0] / fb->format->cpp[0];
3766 plane_size->chroma_size.x = 0;
3767 plane_size->chroma_size.y = 0;
3768 /* TODO: set these based on surface format */
3769 plane_size->chroma_size.width = fb->width / 2;
3770 plane_size->chroma_size.height = fb->height / 2;
3772 plane_size->chroma_pitch =
3773 fb->pitches[1] / fb->format->cpp[1];
3775 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3776 address->video_progressive.luma_addr.low_part =
3777 lower_32_bits(afb->address);
3778 address->video_progressive.luma_addr.high_part =
3779 upper_32_bits(afb->address);
3780 address->video_progressive.chroma_addr.low_part =
3781 lower_32_bits(chroma_addr);
3782 address->video_progressive.chroma_addr.high_part =
3783 upper_32_bits(chroma_addr);
3786 /* Fill GFX8 params */
3787 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3788 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3790 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3791 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3792 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3793 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3794 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3796 /* XXX fix me for VI */
3797 tiling_info->gfx8.num_banks = num_banks;
3798 tiling_info->gfx8.array_mode =
3799 DC_ARRAY_2D_TILED_THIN1;
3800 tiling_info->gfx8.tile_split = tile_split;
3801 tiling_info->gfx8.bank_width = bankw;
3802 tiling_info->gfx8.bank_height = bankh;
3803 tiling_info->gfx8.tile_aspect = mtaspect;
3804 tiling_info->gfx8.tile_mode =
3805 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3806 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3807 == DC_ARRAY_1D_TILED_THIN1) {
3808 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3811 tiling_info->gfx8.pipe_config =
3812 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3814 if (adev->asic_type == CHIP_VEGA10 ||
3815 adev->asic_type == CHIP_VEGA12 ||
3816 adev->asic_type == CHIP_VEGA20 ||
3817 adev->asic_type == CHIP_NAVI10 ||
3818 adev->asic_type == CHIP_NAVI14 ||
3819 adev->asic_type == CHIP_NAVI12 ||
3820 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3821 adev->asic_type == CHIP_SIENNA_CICHLID ||
3822 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3824 adev->asic_type == CHIP_RENOIR ||
3825 adev->asic_type == CHIP_RAVEN) {
3826 /* Fill GFX9 params */
3827 tiling_info->gfx9.num_pipes =
3828 adev->gfx.config.gb_addr_config_fields.num_pipes;
3829 tiling_info->gfx9.num_banks =
3830 adev->gfx.config.gb_addr_config_fields.num_banks;
3831 tiling_info->gfx9.pipe_interleave =
3832 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3833 tiling_info->gfx9.num_shader_engines =
3834 adev->gfx.config.gb_addr_config_fields.num_se;
3835 tiling_info->gfx9.max_compressed_frags =
3836 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3837 tiling_info->gfx9.num_rb_per_se =
3838 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3839 tiling_info->gfx9.swizzle =
3840 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3841 tiling_info->gfx9.shaderEnable = 1;
3843 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3844 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3845 adev->asic_type == CHIP_NAVY_FLOUNDER)
3846 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3848 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3849 plane_size, tiling_info,
3850 tiling_flags, dcc, address,
3860 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3861 bool *per_pixel_alpha, bool *global_alpha,
3862 int *global_alpha_value)
3864 *per_pixel_alpha = false;
3865 *global_alpha = false;
3866 *global_alpha_value = 0xff;
3868 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3871 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3872 static const uint32_t alpha_formats[] = {
3873 DRM_FORMAT_ARGB8888,
3874 DRM_FORMAT_RGBA8888,
3875 DRM_FORMAT_ABGR8888,
3877 uint32_t format = plane_state->fb->format->format;
3880 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3881 if (format == alpha_formats[i]) {
3882 *per_pixel_alpha = true;
3888 if (plane_state->alpha < 0xffff) {
3889 *global_alpha = true;
3890 *global_alpha_value = plane_state->alpha >> 8;
3895 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3896 const enum surface_pixel_format format,
3897 enum dc_color_space *color_space)
3901 *color_space = COLOR_SPACE_SRGB;
3903 /* DRM color properties only affect non-RGB formats. */
3904 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3907 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3909 switch (plane_state->color_encoding) {
3910 case DRM_COLOR_YCBCR_BT601:
3912 *color_space = COLOR_SPACE_YCBCR601;
3914 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3917 case DRM_COLOR_YCBCR_BT709:
3919 *color_space = COLOR_SPACE_YCBCR709;
3921 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3924 case DRM_COLOR_YCBCR_BT2020:
3926 *color_space = COLOR_SPACE_2020_YCBCR;
3939 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3940 const struct drm_plane_state *plane_state,
3941 const uint64_t tiling_flags,
3942 struct dc_plane_info *plane_info,
3943 struct dc_plane_address *address,
3945 bool force_disable_dcc)
3947 const struct drm_framebuffer *fb = plane_state->fb;
3948 const struct amdgpu_framebuffer *afb =
3949 to_amdgpu_framebuffer(plane_state->fb);
3950 struct drm_format_name_buf format_name;
3953 memset(plane_info, 0, sizeof(*plane_info));
3955 switch (fb->format->format) {
3957 plane_info->format =
3958 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3960 case DRM_FORMAT_RGB565:
3961 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3963 case DRM_FORMAT_XRGB8888:
3964 case DRM_FORMAT_ARGB8888:
3965 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3967 case DRM_FORMAT_XRGB2101010:
3968 case DRM_FORMAT_ARGB2101010:
3969 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3971 case DRM_FORMAT_XBGR2101010:
3972 case DRM_FORMAT_ABGR2101010:
3973 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3975 case DRM_FORMAT_XBGR8888:
3976 case DRM_FORMAT_ABGR8888:
3977 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3979 case DRM_FORMAT_NV21:
3980 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3982 case DRM_FORMAT_NV12:
3983 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3985 case DRM_FORMAT_P010:
3986 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3988 case DRM_FORMAT_XRGB16161616F:
3989 case DRM_FORMAT_ARGB16161616F:
3990 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3992 case DRM_FORMAT_XBGR16161616F:
3993 case DRM_FORMAT_ABGR16161616F:
3994 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3998 "Unsupported screen format %s\n",
3999 drm_get_format_name(fb->format->format, &format_name));
4003 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4004 case DRM_MODE_ROTATE_0:
4005 plane_info->rotation = ROTATION_ANGLE_0;
4007 case DRM_MODE_ROTATE_90:
4008 plane_info->rotation = ROTATION_ANGLE_90;
4010 case DRM_MODE_ROTATE_180:
4011 plane_info->rotation = ROTATION_ANGLE_180;
4013 case DRM_MODE_ROTATE_270:
4014 plane_info->rotation = ROTATION_ANGLE_270;
4017 plane_info->rotation = ROTATION_ANGLE_0;
4021 plane_info->visible = true;
4022 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4024 plane_info->layer_index = 0;
4026 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4027 &plane_info->color_space);
4031 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4032 plane_info->rotation, tiling_flags,
4033 &plane_info->tiling_info,
4034 &plane_info->plane_size,
4035 &plane_info->dcc, address, tmz_surface,
4040 fill_blending_from_plane_state(
4041 plane_state, &plane_info->per_pixel_alpha,
4042 &plane_info->global_alpha, &plane_info->global_alpha_value);
4047 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4048 struct dc_plane_state *dc_plane_state,
4049 struct drm_plane_state *plane_state,
4050 struct drm_crtc_state *crtc_state)
4052 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4053 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4054 struct dc_scaling_info scaling_info;
4055 struct dc_plane_info plane_info;
4057 bool force_disable_dcc = false;
4059 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4063 dc_plane_state->src_rect = scaling_info.src_rect;
4064 dc_plane_state->dst_rect = scaling_info.dst_rect;
4065 dc_plane_state->clip_rect = scaling_info.clip_rect;
4066 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4068 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4069 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4070 dm_plane_state->tiling_flags,
4072 &dc_plane_state->address,
4073 dm_plane_state->tmz_surface,
4078 dc_plane_state->format = plane_info.format;
4079 dc_plane_state->color_space = plane_info.color_space;
4080 dc_plane_state->format = plane_info.format;
4081 dc_plane_state->plane_size = plane_info.plane_size;
4082 dc_plane_state->rotation = plane_info.rotation;
4083 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4084 dc_plane_state->stereo_format = plane_info.stereo_format;
4085 dc_plane_state->tiling_info = plane_info.tiling_info;
4086 dc_plane_state->visible = plane_info.visible;
4087 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4088 dc_plane_state->global_alpha = plane_info.global_alpha;
4089 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4090 dc_plane_state->dcc = plane_info.dcc;
4091 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4094 * Always set input transfer function, since plane state is refreshed
4097 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4104 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4105 const struct dm_connector_state *dm_state,
4106 struct dc_stream_state *stream)
4108 enum amdgpu_rmx_type rmx_type;
4110 struct rect src = { 0 }; /* viewport in composition space*/
4111 struct rect dst = { 0 }; /* stream addressable area */
4113 /* no mode. nothing to be done */
4117 /* Full screen scaling by default */
4118 src.width = mode->hdisplay;
4119 src.height = mode->vdisplay;
4120 dst.width = stream->timing.h_addressable;
4121 dst.height = stream->timing.v_addressable;
4124 rmx_type = dm_state->scaling;
4125 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4126 if (src.width * dst.height <
4127 src.height * dst.width) {
4128 /* height needs less upscaling/more downscaling */
4129 dst.width = src.width *
4130 dst.height / src.height;
4132 /* width needs less upscaling/more downscaling */
4133 dst.height = src.height *
4134 dst.width / src.width;
4136 } else if (rmx_type == RMX_CENTER) {
4140 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4141 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4143 if (dm_state->underscan_enable) {
4144 dst.x += dm_state->underscan_hborder / 2;
4145 dst.y += dm_state->underscan_vborder / 2;
4146 dst.width -= dm_state->underscan_hborder;
4147 dst.height -= dm_state->underscan_vborder;
4154 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4155 dst.x, dst.y, dst.width, dst.height);
4159 static enum dc_color_depth
4160 convert_color_depth_from_display_info(const struct drm_connector *connector,
4161 bool is_y420, int requested_bpc)
4168 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4169 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4171 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4173 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4176 bpc = (uint8_t)connector->display_info.bpc;
4177 /* Assume 8 bpc by default if no bpc is specified. */
4178 bpc = bpc ? bpc : 8;
4181 if (requested_bpc > 0) {
4183 * Cap display bpc based on the user requested value.
4185 * The value for state->max_bpc may not correctly updated
4186 * depending on when the connector gets added to the state
4187 * or if this was called outside of atomic check, so it
4188 * can't be used directly.
4190 bpc = min_t(u8, bpc, requested_bpc);
4192 /* Round down to the nearest even number. */
4193 bpc = bpc - (bpc & 1);
4199 * Temporary Work around, DRM doesn't parse color depth for
4200 * EDID revision before 1.4
4201 * TODO: Fix edid parsing
4203 return COLOR_DEPTH_888;
4205 return COLOR_DEPTH_666;
4207 return COLOR_DEPTH_888;
4209 return COLOR_DEPTH_101010;
4211 return COLOR_DEPTH_121212;
4213 return COLOR_DEPTH_141414;
4215 return COLOR_DEPTH_161616;
4217 return COLOR_DEPTH_UNDEFINED;
4221 static enum dc_aspect_ratio
4222 get_aspect_ratio(const struct drm_display_mode *mode_in)
4224 /* 1-1 mapping, since both enums follow the HDMI spec. */
4225 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4228 static enum dc_color_space
4229 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4231 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4233 switch (dc_crtc_timing->pixel_encoding) {
4234 case PIXEL_ENCODING_YCBCR422:
4235 case PIXEL_ENCODING_YCBCR444:
4236 case PIXEL_ENCODING_YCBCR420:
4239 * 27030khz is the separation point between HDTV and SDTV
4240 * according to HDMI spec, we use YCbCr709 and YCbCr601
4243 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4244 if (dc_crtc_timing->flags.Y_ONLY)
4246 COLOR_SPACE_YCBCR709_LIMITED;
4248 color_space = COLOR_SPACE_YCBCR709;
4250 if (dc_crtc_timing->flags.Y_ONLY)
4252 COLOR_SPACE_YCBCR601_LIMITED;
4254 color_space = COLOR_SPACE_YCBCR601;
4259 case PIXEL_ENCODING_RGB:
4260 color_space = COLOR_SPACE_SRGB;
4271 static bool adjust_colour_depth_from_display_info(
4272 struct dc_crtc_timing *timing_out,
4273 const struct drm_display_info *info)
4275 enum dc_color_depth depth = timing_out->display_color_depth;
4278 normalized_clk = timing_out->pix_clk_100hz / 10;
4279 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4280 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4281 normalized_clk /= 2;
4282 /* Adjusting pix clock following on HDMI spec based on colour depth */
4284 case COLOR_DEPTH_888:
4286 case COLOR_DEPTH_101010:
4287 normalized_clk = (normalized_clk * 30) / 24;
4289 case COLOR_DEPTH_121212:
4290 normalized_clk = (normalized_clk * 36) / 24;
4292 case COLOR_DEPTH_161616:
4293 normalized_clk = (normalized_clk * 48) / 24;
4296 /* The above depths are the only ones valid for HDMI. */
4299 if (normalized_clk <= info->max_tmds_clock) {
4300 timing_out->display_color_depth = depth;
4303 } while (--depth > COLOR_DEPTH_666);
4307 static void fill_stream_properties_from_drm_display_mode(
4308 struct dc_stream_state *stream,
4309 const struct drm_display_mode *mode_in,
4310 const struct drm_connector *connector,
4311 const struct drm_connector_state *connector_state,
4312 const struct dc_stream_state *old_stream,
4315 struct dc_crtc_timing *timing_out = &stream->timing;
4316 const struct drm_display_info *info = &connector->display_info;
4317 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4318 struct hdmi_vendor_infoframe hv_frame;
4319 struct hdmi_avi_infoframe avi_frame;
4321 memset(&hv_frame, 0, sizeof(hv_frame));
4322 memset(&avi_frame, 0, sizeof(avi_frame));
4324 timing_out->h_border_left = 0;
4325 timing_out->h_border_right = 0;
4326 timing_out->v_border_top = 0;
4327 timing_out->v_border_bottom = 0;
4328 /* TODO: un-hardcode */
4329 if (drm_mode_is_420_only(info, mode_in)
4330 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4331 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4332 else if (drm_mode_is_420_also(info, mode_in)
4333 && aconnector->force_yuv420_output)
4334 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4335 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4336 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4337 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4339 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4341 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4342 timing_out->display_color_depth = convert_color_depth_from_display_info(
4344 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4346 timing_out->scan_type = SCANNING_TYPE_NODATA;
4347 timing_out->hdmi_vic = 0;
4350 timing_out->vic = old_stream->timing.vic;
4351 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4352 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4354 timing_out->vic = drm_match_cea_mode(mode_in);
4355 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4356 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4357 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4358 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4361 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4362 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4363 timing_out->vic = avi_frame.video_code;
4364 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4365 timing_out->hdmi_vic = hv_frame.vic;
4368 timing_out->h_addressable = mode_in->crtc_hdisplay;
4369 timing_out->h_total = mode_in->crtc_htotal;
4370 timing_out->h_sync_width =
4371 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4372 timing_out->h_front_porch =
4373 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4374 timing_out->v_total = mode_in->crtc_vtotal;
4375 timing_out->v_addressable = mode_in->crtc_vdisplay;
4376 timing_out->v_front_porch =
4377 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4378 timing_out->v_sync_width =
4379 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4380 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4381 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4383 stream->output_color_space = get_output_color_space(timing_out);
4385 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4386 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4387 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4388 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4389 drm_mode_is_420_also(info, mode_in) &&
4390 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4391 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4392 adjust_colour_depth_from_display_info(timing_out, info);
4397 static void fill_audio_info(struct audio_info *audio_info,
4398 const struct drm_connector *drm_connector,
4399 const struct dc_sink *dc_sink)
4402 int cea_revision = 0;
4403 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4405 audio_info->manufacture_id = edid_caps->manufacturer_id;
4406 audio_info->product_id = edid_caps->product_id;
4408 cea_revision = drm_connector->display_info.cea_rev;
4410 strscpy(audio_info->display_name,
4411 edid_caps->display_name,
4412 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4414 if (cea_revision >= 3) {
4415 audio_info->mode_count = edid_caps->audio_mode_count;
4417 for (i = 0; i < audio_info->mode_count; ++i) {
4418 audio_info->modes[i].format_code =
4419 (enum audio_format_code)
4420 (edid_caps->audio_modes[i].format_code);
4421 audio_info->modes[i].channel_count =
4422 edid_caps->audio_modes[i].channel_count;
4423 audio_info->modes[i].sample_rates.all =
4424 edid_caps->audio_modes[i].sample_rate;
4425 audio_info->modes[i].sample_size =
4426 edid_caps->audio_modes[i].sample_size;
4430 audio_info->flags.all = edid_caps->speaker_flags;
4432 /* TODO: We only check for the progressive mode, check for interlace mode too */
4433 if (drm_connector->latency_present[0]) {
4434 audio_info->video_latency = drm_connector->video_latency[0];
4435 audio_info->audio_latency = drm_connector->audio_latency[0];
4438 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4443 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4444 struct drm_display_mode *dst_mode)
4446 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4447 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4448 dst_mode->crtc_clock = src_mode->crtc_clock;
4449 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4450 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4451 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4452 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4453 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4454 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4455 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4456 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4457 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4458 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4459 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4463 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4464 const struct drm_display_mode *native_mode,
4467 if (scale_enabled) {
4468 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4469 } else if (native_mode->clock == drm_mode->clock &&
4470 native_mode->htotal == drm_mode->htotal &&
4471 native_mode->vtotal == drm_mode->vtotal) {
4472 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4474 /* no scaling nor amdgpu inserted, no need to patch */
4478 static struct dc_sink *
4479 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4481 struct dc_sink_init_data sink_init_data = { 0 };
4482 struct dc_sink *sink = NULL;
4483 sink_init_data.link = aconnector->dc_link;
4484 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4486 sink = dc_sink_create(&sink_init_data);
4488 DRM_ERROR("Failed to create sink!\n");
4491 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4496 static void set_multisync_trigger_params(
4497 struct dc_stream_state *stream)
4499 if (stream->triggered_crtc_reset.enabled) {
4500 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4501 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4505 static void set_master_stream(struct dc_stream_state *stream_set[],
4508 int j, highest_rfr = 0, master_stream = 0;
4510 for (j = 0; j < stream_count; j++) {
4511 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4512 int refresh_rate = 0;
4514 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4515 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4516 if (refresh_rate > highest_rfr) {
4517 highest_rfr = refresh_rate;
4522 for (j = 0; j < stream_count; j++) {
4524 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4528 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4532 if (context->stream_count < 2)
4534 for (i = 0; i < context->stream_count ; i++) {
4535 if (!context->streams[i])
4538 * TODO: add a function to read AMD VSDB bits and set
4539 * crtc_sync_master.multi_sync_enabled flag
4540 * For now it's set to false
4542 set_multisync_trigger_params(context->streams[i]);
4544 set_master_stream(context->streams, context->stream_count);
4547 static struct dc_stream_state *
4548 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4549 const struct drm_display_mode *drm_mode,
4550 const struct dm_connector_state *dm_state,
4551 const struct dc_stream_state *old_stream,
4554 struct drm_display_mode *preferred_mode = NULL;
4555 struct drm_connector *drm_connector;
4556 const struct drm_connector_state *con_state =
4557 dm_state ? &dm_state->base : NULL;
4558 struct dc_stream_state *stream = NULL;
4559 struct drm_display_mode mode = *drm_mode;
4560 bool native_mode_found = false;
4561 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4563 int preferred_refresh = 0;
4564 #if defined(CONFIG_DRM_AMD_DC_DCN)
4565 struct dsc_dec_dpcd_caps dsc_caps;
4567 uint32_t link_bandwidth_kbps;
4569 struct dc_sink *sink = NULL;
4570 if (aconnector == NULL) {
4571 DRM_ERROR("aconnector is NULL!\n");
4575 drm_connector = &aconnector->base;
4577 if (!aconnector->dc_sink) {
4578 sink = create_fake_sink(aconnector);
4582 sink = aconnector->dc_sink;
4583 dc_sink_retain(sink);
4586 stream = dc_create_stream_for_sink(sink);
4588 if (stream == NULL) {
4589 DRM_ERROR("Failed to create stream for sink!\n");
4593 stream->dm_stream_context = aconnector;
4595 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4596 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4598 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4599 /* Search for preferred mode */
4600 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4601 native_mode_found = true;
4605 if (!native_mode_found)
4606 preferred_mode = list_first_entry_or_null(
4607 &aconnector->base.modes,
4608 struct drm_display_mode,
4611 mode_refresh = drm_mode_vrefresh(&mode);
4613 if (preferred_mode == NULL) {
4615 * This may not be an error, the use case is when we have no
4616 * usermode calls to reset and set mode upon hotplug. In this
4617 * case, we call set mode ourselves to restore the previous mode
4618 * and the modelist may not be filled in in time.
4620 DRM_DEBUG_DRIVER("No preferred mode found\n");
4622 decide_crtc_timing_for_drm_display_mode(
4623 &mode, preferred_mode,
4624 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4625 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4629 drm_mode_set_crtcinfo(&mode, 0);
4632 * If scaling is enabled and refresh rate didn't change
4633 * we copy the vic and polarities of the old timings
4635 if (!scale || mode_refresh != preferred_refresh)
4636 fill_stream_properties_from_drm_display_mode(stream,
4637 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4639 fill_stream_properties_from_drm_display_mode(stream,
4640 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4642 stream->timing.flags.DSC = 0;
4644 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4645 #if defined(CONFIG_DRM_AMD_DC_DCN)
4646 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4647 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4648 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4651 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4652 dc_link_get_link_cap(aconnector->dc_link));
4654 #if defined(CONFIG_DRM_AMD_DC_DCN)
4655 if (dsc_caps.is_dsc_supported) {
4656 /* Set DSC policy according to dsc_clock_en */
4657 dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
4659 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4661 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4662 link_bandwidth_kbps,
4664 &stream->timing.dsc_cfg))
4665 stream->timing.flags.DSC = 1;
4666 /* Overwrite the stream flag if DSC is enabled through debugfs */
4667 if (aconnector->dsc_settings.dsc_clock_en)
4668 stream->timing.flags.DSC = 1;
4670 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
4671 stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
4672 aconnector->dsc_settings.dsc_slice_width);
4674 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
4675 stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
4676 aconnector->dsc_settings.dsc_slice_height);
4678 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4679 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4684 update_stream_scaling_settings(&mode, dm_state, stream);
4687 &stream->audio_info,
4691 update_stream_signal(stream, sink);
4693 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4694 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4696 if (stream->link->psr_settings.psr_feature_enabled) {
4698 // should decide stream support vsc sdp colorimetry capability
4699 // before building vsc info packet
4701 stream->use_vsc_sdp_for_colorimetry = false;
4702 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4703 stream->use_vsc_sdp_for_colorimetry =
4704 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4706 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4707 stream->use_vsc_sdp_for_colorimetry = true;
4709 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4712 dc_sink_release(sink);
4717 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4719 drm_crtc_cleanup(crtc);
4723 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4724 struct drm_crtc_state *state)
4726 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4728 /* TODO Destroy dc_stream objects are stream object is flattened */
4730 dc_stream_release(cur->stream);
4733 __drm_atomic_helper_crtc_destroy_state(state);
4739 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4741 struct dm_crtc_state *state;
4744 dm_crtc_destroy_state(crtc, crtc->state);
4746 state = kzalloc(sizeof(*state), GFP_KERNEL);
4747 if (WARN_ON(!state))
4750 crtc->state = &state->base;
4751 crtc->state->crtc = crtc;
4755 static struct drm_crtc_state *
4756 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4758 struct dm_crtc_state *state, *cur;
4760 cur = to_dm_crtc_state(crtc->state);
4762 if (WARN_ON(!crtc->state))
4765 state = kzalloc(sizeof(*state), GFP_KERNEL);
4769 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4772 state->stream = cur->stream;
4773 dc_stream_retain(state->stream);
4776 state->active_planes = cur->active_planes;
4777 state->vrr_params = cur->vrr_params;
4778 state->vrr_infopacket = cur->vrr_infopacket;
4779 state->abm_level = cur->abm_level;
4780 state->vrr_supported = cur->vrr_supported;
4781 state->freesync_config = cur->freesync_config;
4782 state->crc_src = cur->crc_src;
4783 state->cm_has_degamma = cur->cm_has_degamma;
4784 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4786 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4788 return &state->base;
4791 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4793 enum dc_irq_source irq_source;
4794 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4795 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4798 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4800 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4802 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4803 acrtc->crtc_id, enable ? "en" : "dis", rc);
4807 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4809 enum dc_irq_source irq_source;
4810 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4811 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4812 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4816 /* vblank irq on -> Only need vupdate irq in vrr mode */
4817 if (amdgpu_dm_vrr_active(acrtc_state))
4818 rc = dm_set_vupdate_irq(crtc, true);
4820 /* vblank irq off -> vupdate irq off */
4821 rc = dm_set_vupdate_irq(crtc, false);
4827 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4828 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4831 static int dm_enable_vblank(struct drm_crtc *crtc)
4833 return dm_set_vblank(crtc, true);
4836 static void dm_disable_vblank(struct drm_crtc *crtc)
4838 dm_set_vblank(crtc, false);
4841 /* Implemented only the options currently availible for the driver */
4842 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4843 .reset = dm_crtc_reset_state,
4844 .destroy = amdgpu_dm_crtc_destroy,
4845 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4846 .set_config = drm_atomic_helper_set_config,
4847 .page_flip = drm_atomic_helper_page_flip,
4848 .atomic_duplicate_state = dm_crtc_duplicate_state,
4849 .atomic_destroy_state = dm_crtc_destroy_state,
4850 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4851 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4852 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4853 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4854 .enable_vblank = dm_enable_vblank,
4855 .disable_vblank = dm_disable_vblank,
4856 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4859 static enum drm_connector_status
4860 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4863 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4867 * 1. This interface is NOT called in context of HPD irq.
4868 * 2. This interface *is called* in context of user-mode ioctl. Which
4869 * makes it a bad place for *any* MST-related activity.
4872 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4873 !aconnector->fake_enable)
4874 connected = (aconnector->dc_sink != NULL);
4876 connected = (aconnector->base.force == DRM_FORCE_ON);
4878 return (connected ? connector_status_connected :
4879 connector_status_disconnected);
4882 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4883 struct drm_connector_state *connector_state,
4884 struct drm_property *property,
4887 struct drm_device *dev = connector->dev;
4888 struct amdgpu_device *adev = drm_to_adev(dev);
4889 struct dm_connector_state *dm_old_state =
4890 to_dm_connector_state(connector->state);
4891 struct dm_connector_state *dm_new_state =
4892 to_dm_connector_state(connector_state);
4896 if (property == dev->mode_config.scaling_mode_property) {
4897 enum amdgpu_rmx_type rmx_type;
4900 case DRM_MODE_SCALE_CENTER:
4901 rmx_type = RMX_CENTER;
4903 case DRM_MODE_SCALE_ASPECT:
4904 rmx_type = RMX_ASPECT;
4906 case DRM_MODE_SCALE_FULLSCREEN:
4907 rmx_type = RMX_FULL;
4909 case DRM_MODE_SCALE_NONE:
4915 if (dm_old_state->scaling == rmx_type)
4918 dm_new_state->scaling = rmx_type;
4920 } else if (property == adev->mode_info.underscan_hborder_property) {
4921 dm_new_state->underscan_hborder = val;
4923 } else if (property == adev->mode_info.underscan_vborder_property) {
4924 dm_new_state->underscan_vborder = val;
4926 } else if (property == adev->mode_info.underscan_property) {
4927 dm_new_state->underscan_enable = val;
4929 } else if (property == adev->mode_info.abm_level_property) {
4930 dm_new_state->abm_level = val;
4937 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4938 const struct drm_connector_state *state,
4939 struct drm_property *property,
4942 struct drm_device *dev = connector->dev;
4943 struct amdgpu_device *adev = drm_to_adev(dev);
4944 struct dm_connector_state *dm_state =
4945 to_dm_connector_state(state);
4948 if (property == dev->mode_config.scaling_mode_property) {
4949 switch (dm_state->scaling) {
4951 *val = DRM_MODE_SCALE_CENTER;
4954 *val = DRM_MODE_SCALE_ASPECT;
4957 *val = DRM_MODE_SCALE_FULLSCREEN;
4961 *val = DRM_MODE_SCALE_NONE;
4965 } else if (property == adev->mode_info.underscan_hborder_property) {
4966 *val = dm_state->underscan_hborder;
4968 } else if (property == adev->mode_info.underscan_vborder_property) {
4969 *val = dm_state->underscan_vborder;
4971 } else if (property == adev->mode_info.underscan_property) {
4972 *val = dm_state->underscan_enable;
4974 } else if (property == adev->mode_info.abm_level_property) {
4975 *val = dm_state->abm_level;
4982 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4984 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4986 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4989 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4991 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4992 const struct dc_link *link = aconnector->dc_link;
4993 struct amdgpu_device *adev = drm_to_adev(connector->dev);
4994 struct amdgpu_display_manager *dm = &adev->dm;
4996 drm_atomic_private_obj_fini(&aconnector->mst_mgr.base);
4997 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4998 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5000 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5001 link->type != dc_connection_none &&
5002 dm->backlight_dev) {
5003 backlight_device_unregister(dm->backlight_dev);
5004 dm->backlight_dev = NULL;
5008 if (aconnector->dc_em_sink)
5009 dc_sink_release(aconnector->dc_em_sink);
5010 aconnector->dc_em_sink = NULL;
5011 if (aconnector->dc_sink)
5012 dc_sink_release(aconnector->dc_sink);
5013 aconnector->dc_sink = NULL;
5015 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5016 drm_connector_unregister(connector);
5017 drm_connector_cleanup(connector);
5018 if (aconnector->i2c) {
5019 i2c_del_adapter(&aconnector->i2c->base);
5020 kfree(aconnector->i2c);
5022 kfree(aconnector->dm_dp_aux.aux.name);
5027 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5029 struct dm_connector_state *state =
5030 to_dm_connector_state(connector->state);
5032 if (connector->state)
5033 __drm_atomic_helper_connector_destroy_state(connector->state);
5037 state = kzalloc(sizeof(*state), GFP_KERNEL);
5040 state->scaling = RMX_OFF;
5041 state->underscan_enable = false;
5042 state->underscan_hborder = 0;
5043 state->underscan_vborder = 0;
5044 state->base.max_requested_bpc = 8;
5045 state->vcpi_slots = 0;
5047 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5048 state->abm_level = amdgpu_dm_abm_level;
5050 __drm_atomic_helper_connector_reset(connector, &state->base);
5054 struct drm_connector_state *
5055 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5057 struct dm_connector_state *state =
5058 to_dm_connector_state(connector->state);
5060 struct dm_connector_state *new_state =
5061 kmemdup(state, sizeof(*state), GFP_KERNEL);
5066 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5068 new_state->freesync_capable = state->freesync_capable;
5069 new_state->abm_level = state->abm_level;
5070 new_state->scaling = state->scaling;
5071 new_state->underscan_enable = state->underscan_enable;
5072 new_state->underscan_hborder = state->underscan_hborder;
5073 new_state->underscan_vborder = state->underscan_vborder;
5074 new_state->vcpi_slots = state->vcpi_slots;
5075 new_state->pbn = state->pbn;
5076 return &new_state->base;
5080 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5082 struct amdgpu_dm_connector *amdgpu_dm_connector =
5083 to_amdgpu_dm_connector(connector);
5086 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5087 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5088 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5089 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5094 #if defined(CONFIG_DEBUG_FS)
5095 connector_debugfs_init(amdgpu_dm_connector);
5101 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5102 .reset = amdgpu_dm_connector_funcs_reset,
5103 .detect = amdgpu_dm_connector_detect,
5104 .fill_modes = drm_helper_probe_single_connector_modes,
5105 .destroy = amdgpu_dm_connector_destroy,
5106 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5107 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5108 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5109 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5110 .late_register = amdgpu_dm_connector_late_register,
5111 .early_unregister = amdgpu_dm_connector_unregister
5114 static int get_modes(struct drm_connector *connector)
5116 return amdgpu_dm_connector_get_modes(connector);
5119 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5121 struct dc_sink_init_data init_params = {
5122 .link = aconnector->dc_link,
5123 .sink_signal = SIGNAL_TYPE_VIRTUAL
5127 if (!aconnector->base.edid_blob_ptr) {
5128 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5129 aconnector->base.name);
5131 aconnector->base.force = DRM_FORCE_OFF;
5132 aconnector->base.override_edid = false;
5136 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5138 aconnector->edid = edid;
5140 aconnector->dc_em_sink = dc_link_add_remote_sink(
5141 aconnector->dc_link,
5143 (edid->extensions + 1) * EDID_LENGTH,
5146 if (aconnector->base.force == DRM_FORCE_ON) {
5147 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5148 aconnector->dc_link->local_sink :
5149 aconnector->dc_em_sink;
5150 dc_sink_retain(aconnector->dc_sink);
5154 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5156 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5159 * In case of headless boot with force on for DP managed connector
5160 * Those settings have to be != 0 to get initial modeset
5162 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5163 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5164 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5168 aconnector->base.override_edid = true;
5169 create_eml_sink(aconnector);
5172 static struct dc_stream_state *
5173 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5174 const struct drm_display_mode *drm_mode,
5175 const struct dm_connector_state *dm_state,
5176 const struct dc_stream_state *old_stream)
5178 struct drm_connector *connector = &aconnector->base;
5179 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5180 struct dc_stream_state *stream;
5181 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5182 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5183 enum dc_status dc_result = DC_OK;
5186 stream = create_stream_for_sink(aconnector, drm_mode,
5187 dm_state, old_stream,
5189 if (stream == NULL) {
5190 DRM_ERROR("Failed to create stream for sink!\n");
5194 dc_result = dc_validate_stream(adev->dm.dc, stream);
5196 if (dc_result != DC_OK) {
5197 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5202 dc_status_to_str(dc_result));
5204 dc_stream_release(stream);
5206 requested_bpc -= 2; /* lower bpc to retry validation */
5209 } while (stream == NULL && requested_bpc >= 6);
5214 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5215 struct drm_display_mode *mode)
5217 int result = MODE_ERROR;
5218 struct dc_sink *dc_sink;
5219 /* TODO: Unhardcode stream count */
5220 struct dc_stream_state *stream;
5221 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5223 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5224 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5228 * Only run this the first time mode_valid is called to initilialize
5231 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5232 !aconnector->dc_em_sink)
5233 handle_edid_mgmt(aconnector);
5235 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5237 if (dc_sink == NULL) {
5238 DRM_ERROR("dc_sink is NULL!\n");
5242 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5244 dc_stream_release(stream);
5249 /* TODO: error handling*/
5253 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5254 struct dc_info_packet *out)
5256 struct hdmi_drm_infoframe frame;
5257 unsigned char buf[30]; /* 26 + 4 */
5261 memset(out, 0, sizeof(*out));
5263 if (!state->hdr_output_metadata)
5266 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5270 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5274 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5278 /* Prepare the infopacket for DC. */
5279 switch (state->connector->connector_type) {
5280 case DRM_MODE_CONNECTOR_HDMIA:
5281 out->hb0 = 0x87; /* type */
5282 out->hb1 = 0x01; /* version */
5283 out->hb2 = 0x1A; /* length */
5284 out->sb[0] = buf[3]; /* checksum */
5288 case DRM_MODE_CONNECTOR_DisplayPort:
5289 case DRM_MODE_CONNECTOR_eDP:
5290 out->hb0 = 0x00; /* sdp id, zero */
5291 out->hb1 = 0x87; /* type */
5292 out->hb2 = 0x1D; /* payload len - 1 */
5293 out->hb3 = (0x13 << 2); /* sdp version */
5294 out->sb[0] = 0x01; /* version */
5295 out->sb[1] = 0x1A; /* length */
5303 memcpy(&out->sb[i], &buf[4], 26);
5306 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5307 sizeof(out->sb), false);
5313 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5314 const struct drm_connector_state *new_state)
5316 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5317 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5319 if (old_blob != new_blob) {
5320 if (old_blob && new_blob &&
5321 old_blob->length == new_blob->length)
5322 return memcmp(old_blob->data, new_blob->data,
5332 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5333 struct drm_atomic_state *state)
5335 struct drm_connector_state *new_con_state =
5336 drm_atomic_get_new_connector_state(state, conn);
5337 struct drm_connector_state *old_con_state =
5338 drm_atomic_get_old_connector_state(state, conn);
5339 struct drm_crtc *crtc = new_con_state->crtc;
5340 struct drm_crtc_state *new_crtc_state;
5346 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5347 struct dc_info_packet hdr_infopacket;
5349 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5353 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5354 if (IS_ERR(new_crtc_state))
5355 return PTR_ERR(new_crtc_state);
5358 * DC considers the stream backends changed if the
5359 * static metadata changes. Forcing the modeset also
5360 * gives a simple way for userspace to switch from
5361 * 8bpc to 10bpc when setting the metadata to enter
5364 * Changing the static metadata after it's been
5365 * set is permissible, however. So only force a
5366 * modeset if we're entering or exiting HDR.
5368 new_crtc_state->mode_changed =
5369 !old_con_state->hdr_output_metadata ||
5370 !new_con_state->hdr_output_metadata;
5376 static const struct drm_connector_helper_funcs
5377 amdgpu_dm_connector_helper_funcs = {
5379 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5380 * modes will be filtered by drm_mode_validate_size(), and those modes
5381 * are missing after user start lightdm. So we need to renew modes list.
5382 * in get_modes call back, not just return the modes count
5384 .get_modes = get_modes,
5385 .mode_valid = amdgpu_dm_connector_mode_valid,
5386 .atomic_check = amdgpu_dm_connector_atomic_check,
5389 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5393 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5395 struct drm_device *dev = new_crtc_state->crtc->dev;
5396 struct drm_plane *plane;
5398 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5399 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5406 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5408 struct drm_atomic_state *state = new_crtc_state->state;
5409 struct drm_plane *plane;
5412 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5413 struct drm_plane_state *new_plane_state;
5415 /* Cursor planes are "fake". */
5416 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5419 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5421 if (!new_plane_state) {
5423 * The plane is enable on the CRTC and hasn't changed
5424 * state. This means that it previously passed
5425 * validation and is therefore enabled.
5431 /* We need a framebuffer to be considered enabled. */
5432 num_active += (new_plane_state->fb != NULL);
5438 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5439 struct drm_crtc_state *new_crtc_state)
5441 struct dm_crtc_state *dm_new_crtc_state =
5442 to_dm_crtc_state(new_crtc_state);
5444 dm_new_crtc_state->active_planes = 0;
5446 if (!dm_new_crtc_state->stream)
5449 dm_new_crtc_state->active_planes =
5450 count_crtc_active_planes(new_crtc_state);
5453 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5454 struct drm_crtc_state *state)
5456 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5457 struct dc *dc = adev->dm.dc;
5458 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5461 dm_update_crtc_active_planes(crtc, state);
5463 if (unlikely(!dm_crtc_state->stream &&
5464 modeset_required(state, NULL, dm_crtc_state->stream))) {
5469 /* In some use cases, like reset, no stream is attached */
5470 if (!dm_crtc_state->stream)
5474 * We want at least one hardware plane enabled to use
5475 * the stream with a cursor enabled.
5477 if (state->enable && state->active &&
5478 does_crtc_have_active_cursor(state) &&
5479 dm_crtc_state->active_planes == 0)
5482 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5488 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5489 const struct drm_display_mode *mode,
5490 struct drm_display_mode *adjusted_mode)
5495 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5496 .disable = dm_crtc_helper_disable,
5497 .atomic_check = dm_crtc_helper_atomic_check,
5498 .mode_fixup = dm_crtc_helper_mode_fixup,
5499 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5502 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5507 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5509 switch (display_color_depth) {
5510 case COLOR_DEPTH_666:
5512 case COLOR_DEPTH_888:
5514 case COLOR_DEPTH_101010:
5516 case COLOR_DEPTH_121212:
5518 case COLOR_DEPTH_141414:
5520 case COLOR_DEPTH_161616:
5528 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5529 struct drm_crtc_state *crtc_state,
5530 struct drm_connector_state *conn_state)
5532 struct drm_atomic_state *state = crtc_state->state;
5533 struct drm_connector *connector = conn_state->connector;
5534 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5535 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5536 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5537 struct drm_dp_mst_topology_mgr *mst_mgr;
5538 struct drm_dp_mst_port *mst_port;
5539 enum dc_color_depth color_depth;
5541 bool is_y420 = false;
5543 if (!aconnector->port || !aconnector->dc_sink)
5546 mst_port = aconnector->port;
5547 mst_mgr = &aconnector->mst_port->mst_mgr;
5549 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5552 if (!state->duplicated) {
5553 int max_bpc = conn_state->max_requested_bpc;
5554 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5555 aconnector->force_yuv420_output;
5556 color_depth = convert_color_depth_from_display_info(connector,
5559 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5560 clock = adjusted_mode->clock;
5561 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5563 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5566 dm_new_connector_state->pbn,
5567 dm_mst_get_pbn_divider(aconnector->dc_link));
5568 if (dm_new_connector_state->vcpi_slots < 0) {
5569 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5570 return dm_new_connector_state->vcpi_slots;
5575 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5576 .disable = dm_encoder_helper_disable,
5577 .atomic_check = dm_encoder_helper_atomic_check
5580 #if defined(CONFIG_DRM_AMD_DC_DCN)
5581 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5582 struct dc_state *dc_state)
5584 struct dc_stream_state *stream = NULL;
5585 struct drm_connector *connector;
5586 struct drm_connector_state *new_con_state, *old_con_state;
5587 struct amdgpu_dm_connector *aconnector;
5588 struct dm_connector_state *dm_conn_state;
5589 int i, j, clock, bpp;
5590 int vcpi, pbn_div, pbn = 0;
5592 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5594 aconnector = to_amdgpu_dm_connector(connector);
5596 if (!aconnector->port)
5599 if (!new_con_state || !new_con_state->crtc)
5602 dm_conn_state = to_dm_connector_state(new_con_state);
5604 for (j = 0; j < dc_state->stream_count; j++) {
5605 stream = dc_state->streams[j];
5609 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5618 if (stream->timing.flags.DSC != 1) {
5619 drm_dp_mst_atomic_enable_dsc(state,
5627 pbn_div = dm_mst_get_pbn_divider(stream->link);
5628 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5629 clock = stream->timing.pix_clk_100hz / 10;
5630 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5631 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5638 dm_conn_state->pbn = pbn;
5639 dm_conn_state->vcpi_slots = vcpi;
5645 static void dm_drm_plane_reset(struct drm_plane *plane)
5647 struct dm_plane_state *amdgpu_state = NULL;
5650 plane->funcs->atomic_destroy_state(plane, plane->state);
5652 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5653 WARN_ON(amdgpu_state == NULL);
5656 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5659 static struct drm_plane_state *
5660 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5662 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5664 old_dm_plane_state = to_dm_plane_state(plane->state);
5665 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5666 if (!dm_plane_state)
5669 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5671 if (old_dm_plane_state->dc_state) {
5672 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5673 dc_plane_state_retain(dm_plane_state->dc_state);
5676 /* Framebuffer hasn't been updated yet, so retain old flags. */
5677 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5678 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5680 return &dm_plane_state->base;
5683 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5684 struct drm_plane_state *state)
5686 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5688 if (dm_plane_state->dc_state)
5689 dc_plane_state_release(dm_plane_state->dc_state);
5691 drm_atomic_helper_plane_destroy_state(plane, state);
5694 static const struct drm_plane_funcs dm_plane_funcs = {
5695 .update_plane = drm_atomic_helper_update_plane,
5696 .disable_plane = drm_atomic_helper_disable_plane,
5697 .destroy = drm_primary_helper_destroy,
5698 .reset = dm_drm_plane_reset,
5699 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5700 .atomic_destroy_state = dm_drm_plane_destroy_state,
5703 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5704 struct drm_plane_state *new_state)
5706 struct amdgpu_framebuffer *afb;
5707 struct drm_gem_object *obj;
5708 struct amdgpu_device *adev;
5709 struct amdgpu_bo *rbo;
5710 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5711 struct list_head list;
5712 struct ttm_validate_buffer tv;
5713 struct ww_acquire_ctx ticket;
5717 if (!new_state->fb) {
5718 DRM_DEBUG_DRIVER("No FB bound\n");
5722 afb = to_amdgpu_framebuffer(new_state->fb);
5723 obj = new_state->fb->obj[0];
5724 rbo = gem_to_amdgpu_bo(obj);
5725 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5726 INIT_LIST_HEAD(&list);
5730 list_add(&tv.head, &list);
5732 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5734 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5738 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5739 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5741 domain = AMDGPU_GEM_DOMAIN_VRAM;
5743 r = amdgpu_bo_pin(rbo, domain);
5744 if (unlikely(r != 0)) {
5745 if (r != -ERESTARTSYS)
5746 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5747 ttm_eu_backoff_reservation(&ticket, &list);
5751 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5752 if (unlikely(r != 0)) {
5753 amdgpu_bo_unpin(rbo);
5754 ttm_eu_backoff_reservation(&ticket, &list);
5755 DRM_ERROR("%p bind failed\n", rbo);
5759 ttm_eu_backoff_reservation(&ticket, &list);
5761 afb->address = amdgpu_bo_gpu_offset(rbo);
5766 * We don't do surface updates on planes that have been newly created,
5767 * but we also don't have the afb->address during atomic check.
5769 * Fill in buffer attributes depending on the address here, but only on
5770 * newly created planes since they're not being used by DC yet and this
5771 * won't modify global state.
5773 dm_plane_state_old = to_dm_plane_state(plane->state);
5774 dm_plane_state_new = to_dm_plane_state(new_state);
5776 if (dm_plane_state_new->dc_state &&
5777 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5778 struct dc_plane_state *plane_state =
5779 dm_plane_state_new->dc_state;
5780 bool force_disable_dcc = !plane_state->dcc.enable;
5782 fill_plane_buffer_attributes(
5783 adev, afb, plane_state->format, plane_state->rotation,
5784 dm_plane_state_new->tiling_flags,
5785 &plane_state->tiling_info, &plane_state->plane_size,
5786 &plane_state->dcc, &plane_state->address,
5787 dm_plane_state_new->tmz_surface, force_disable_dcc);
5793 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5794 struct drm_plane_state *old_state)
5796 struct amdgpu_bo *rbo;
5802 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5803 r = amdgpu_bo_reserve(rbo, false);
5805 DRM_ERROR("failed to reserve rbo before unpin\n");
5809 amdgpu_bo_unpin(rbo);
5810 amdgpu_bo_unreserve(rbo);
5811 amdgpu_bo_unref(&rbo);
5814 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5815 struct drm_crtc_state *new_crtc_state)
5817 int max_downscale = 0;
5818 int max_upscale = INT_MAX;
5820 /* TODO: These should be checked against DC plane caps */
5821 return drm_atomic_helper_check_plane_state(
5822 state, new_crtc_state, max_downscale, max_upscale, true, true);
5825 static int dm_plane_atomic_check(struct drm_plane *plane,
5826 struct drm_plane_state *state)
5828 struct amdgpu_device *adev = drm_to_adev(plane->dev);
5829 struct dc *dc = adev->dm.dc;
5830 struct dm_plane_state *dm_plane_state;
5831 struct dc_scaling_info scaling_info;
5832 struct drm_crtc_state *new_crtc_state;
5835 dm_plane_state = to_dm_plane_state(state);
5837 if (!dm_plane_state->dc_state)
5841 drm_atomic_get_new_crtc_state(state->state, state->crtc);
5842 if (!new_crtc_state)
5845 ret = dm_plane_helper_check_state(state, new_crtc_state);
5849 ret = fill_dc_scaling_info(state, &scaling_info);
5853 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5859 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5860 struct drm_plane_state *new_plane_state)
5862 /* Only support async updates on cursor planes. */
5863 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5869 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5870 struct drm_plane_state *new_state)
5872 struct drm_plane_state *old_state =
5873 drm_atomic_get_old_plane_state(new_state->state, plane);
5875 swap(plane->state->fb, new_state->fb);
5877 plane->state->src_x = new_state->src_x;
5878 plane->state->src_y = new_state->src_y;
5879 plane->state->src_w = new_state->src_w;
5880 plane->state->src_h = new_state->src_h;
5881 plane->state->crtc_x = new_state->crtc_x;
5882 plane->state->crtc_y = new_state->crtc_y;
5883 plane->state->crtc_w = new_state->crtc_w;
5884 plane->state->crtc_h = new_state->crtc_h;
5886 handle_cursor_update(plane, old_state);
5889 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5890 .prepare_fb = dm_plane_helper_prepare_fb,
5891 .cleanup_fb = dm_plane_helper_cleanup_fb,
5892 .atomic_check = dm_plane_atomic_check,
5893 .atomic_async_check = dm_plane_atomic_async_check,
5894 .atomic_async_update = dm_plane_atomic_async_update
5898 * TODO: these are currently initialized to rgb formats only.
5899 * For future use cases we should either initialize them dynamically based on
5900 * plane capabilities, or initialize this array to all formats, so internal drm
5901 * check will succeed, and let DC implement proper check
5903 static const uint32_t rgb_formats[] = {
5904 DRM_FORMAT_XRGB8888,
5905 DRM_FORMAT_ARGB8888,
5906 DRM_FORMAT_RGBA8888,
5907 DRM_FORMAT_XRGB2101010,
5908 DRM_FORMAT_XBGR2101010,
5909 DRM_FORMAT_ARGB2101010,
5910 DRM_FORMAT_ABGR2101010,
5911 DRM_FORMAT_XBGR8888,
5912 DRM_FORMAT_ABGR8888,
5916 static const uint32_t overlay_formats[] = {
5917 DRM_FORMAT_XRGB8888,
5918 DRM_FORMAT_ARGB8888,
5919 DRM_FORMAT_RGBA8888,
5920 DRM_FORMAT_XBGR8888,
5921 DRM_FORMAT_ABGR8888,
5925 static const u32 cursor_formats[] = {
5929 static int get_plane_formats(const struct drm_plane *plane,
5930 const struct dc_plane_cap *plane_cap,
5931 uint32_t *formats, int max_formats)
5933 int i, num_formats = 0;
5936 * TODO: Query support for each group of formats directly from
5937 * DC plane caps. This will require adding more formats to the
5941 switch (plane->type) {
5942 case DRM_PLANE_TYPE_PRIMARY:
5943 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5944 if (num_formats >= max_formats)
5947 formats[num_formats++] = rgb_formats[i];
5950 if (plane_cap && plane_cap->pixel_format_support.nv12)
5951 formats[num_formats++] = DRM_FORMAT_NV12;
5952 if (plane_cap && plane_cap->pixel_format_support.p010)
5953 formats[num_formats++] = DRM_FORMAT_P010;
5954 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5955 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5956 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5957 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5958 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5962 case DRM_PLANE_TYPE_OVERLAY:
5963 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5964 if (num_formats >= max_formats)
5967 formats[num_formats++] = overlay_formats[i];
5971 case DRM_PLANE_TYPE_CURSOR:
5972 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5973 if (num_formats >= max_formats)
5976 formats[num_formats++] = cursor_formats[i];
5984 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5985 struct drm_plane *plane,
5986 unsigned long possible_crtcs,
5987 const struct dc_plane_cap *plane_cap)
5989 uint32_t formats[32];
5992 unsigned int supported_rotations;
5994 num_formats = get_plane_formats(plane, plane_cap, formats,
5995 ARRAY_SIZE(formats));
5997 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
5998 &dm_plane_funcs, formats, num_formats,
5999 NULL, plane->type, NULL);
6003 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6004 plane_cap && plane_cap->per_pixel_alpha) {
6005 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6006 BIT(DRM_MODE_BLEND_PREMULTI);
6008 drm_plane_create_alpha_property(plane);
6009 drm_plane_create_blend_mode_property(plane, blend_caps);
6012 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6014 (plane_cap->pixel_format_support.nv12 ||
6015 plane_cap->pixel_format_support.p010)) {
6016 /* This only affects YUV formats. */
6017 drm_plane_create_color_properties(
6019 BIT(DRM_COLOR_YCBCR_BT601) |
6020 BIT(DRM_COLOR_YCBCR_BT709) |
6021 BIT(DRM_COLOR_YCBCR_BT2020),
6022 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6023 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6024 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6027 supported_rotations =
6028 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6029 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6031 if (dm->adev->asic_type >= CHIP_BONAIRE)
6032 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6033 supported_rotations);
6035 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6037 /* Create (reset) the plane state */
6038 if (plane->funcs->reset)
6039 plane->funcs->reset(plane);
6044 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6045 struct drm_plane *plane,
6046 uint32_t crtc_index)
6048 struct amdgpu_crtc *acrtc = NULL;
6049 struct drm_plane *cursor_plane;
6053 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6057 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6058 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6060 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6064 res = drm_crtc_init_with_planes(
6069 &amdgpu_dm_crtc_funcs, NULL);
6074 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6076 /* Create (reset) the plane state */
6077 if (acrtc->base.funcs->reset)
6078 acrtc->base.funcs->reset(&acrtc->base);
6080 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6081 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6083 acrtc->crtc_id = crtc_index;
6084 acrtc->base.enabled = false;
6085 acrtc->otg_inst = -1;
6087 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6088 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6089 true, MAX_COLOR_LUT_ENTRIES);
6090 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6096 kfree(cursor_plane);
6101 static int to_drm_connector_type(enum signal_type st)
6104 case SIGNAL_TYPE_HDMI_TYPE_A:
6105 return DRM_MODE_CONNECTOR_HDMIA;
6106 case SIGNAL_TYPE_EDP:
6107 return DRM_MODE_CONNECTOR_eDP;
6108 case SIGNAL_TYPE_LVDS:
6109 return DRM_MODE_CONNECTOR_LVDS;
6110 case SIGNAL_TYPE_RGB:
6111 return DRM_MODE_CONNECTOR_VGA;
6112 case SIGNAL_TYPE_DISPLAY_PORT:
6113 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6114 return DRM_MODE_CONNECTOR_DisplayPort;
6115 case SIGNAL_TYPE_DVI_DUAL_LINK:
6116 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6117 return DRM_MODE_CONNECTOR_DVID;
6118 case SIGNAL_TYPE_VIRTUAL:
6119 return DRM_MODE_CONNECTOR_VIRTUAL;
6122 return DRM_MODE_CONNECTOR_Unknown;
6126 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6128 struct drm_encoder *encoder;
6130 /* There is only one encoder per connector */
6131 drm_connector_for_each_possible_encoder(connector, encoder)
6137 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6139 struct drm_encoder *encoder;
6140 struct amdgpu_encoder *amdgpu_encoder;
6142 encoder = amdgpu_dm_connector_to_encoder(connector);
6144 if (encoder == NULL)
6147 amdgpu_encoder = to_amdgpu_encoder(encoder);
6149 amdgpu_encoder->native_mode.clock = 0;
6151 if (!list_empty(&connector->probed_modes)) {
6152 struct drm_display_mode *preferred_mode = NULL;
6154 list_for_each_entry(preferred_mode,
6155 &connector->probed_modes,
6157 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6158 amdgpu_encoder->native_mode = *preferred_mode;
6166 static struct drm_display_mode *
6167 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6169 int hdisplay, int vdisplay)
6171 struct drm_device *dev = encoder->dev;
6172 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6173 struct drm_display_mode *mode = NULL;
6174 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6176 mode = drm_mode_duplicate(dev, native_mode);
6181 mode->hdisplay = hdisplay;
6182 mode->vdisplay = vdisplay;
6183 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6184 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6190 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6191 struct drm_connector *connector)
6193 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6194 struct drm_display_mode *mode = NULL;
6195 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6196 struct amdgpu_dm_connector *amdgpu_dm_connector =
6197 to_amdgpu_dm_connector(connector);
6201 char name[DRM_DISPLAY_MODE_LEN];
6204 } common_modes[] = {
6205 { "640x480", 640, 480},
6206 { "800x600", 800, 600},
6207 { "1024x768", 1024, 768},
6208 { "1280x720", 1280, 720},
6209 { "1280x800", 1280, 800},
6210 {"1280x1024", 1280, 1024},
6211 { "1440x900", 1440, 900},
6212 {"1680x1050", 1680, 1050},
6213 {"1600x1200", 1600, 1200},
6214 {"1920x1080", 1920, 1080},
6215 {"1920x1200", 1920, 1200}
6218 n = ARRAY_SIZE(common_modes);
6220 for (i = 0; i < n; i++) {
6221 struct drm_display_mode *curmode = NULL;
6222 bool mode_existed = false;
6224 if (common_modes[i].w > native_mode->hdisplay ||
6225 common_modes[i].h > native_mode->vdisplay ||
6226 (common_modes[i].w == native_mode->hdisplay &&
6227 common_modes[i].h == native_mode->vdisplay))
6230 list_for_each_entry(curmode, &connector->probed_modes, head) {
6231 if (common_modes[i].w == curmode->hdisplay &&
6232 common_modes[i].h == curmode->vdisplay) {
6233 mode_existed = true;
6241 mode = amdgpu_dm_create_common_mode(encoder,
6242 common_modes[i].name, common_modes[i].w,
6244 drm_mode_probed_add(connector, mode);
6245 amdgpu_dm_connector->num_modes++;
6249 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6252 struct amdgpu_dm_connector *amdgpu_dm_connector =
6253 to_amdgpu_dm_connector(connector);
6256 /* empty probed_modes */
6257 INIT_LIST_HEAD(&connector->probed_modes);
6258 amdgpu_dm_connector->num_modes =
6259 drm_add_edid_modes(connector, edid);
6261 /* sorting the probed modes before calling function
6262 * amdgpu_dm_get_native_mode() since EDID can have
6263 * more than one preferred mode. The modes that are
6264 * later in the probed mode list could be of higher
6265 * and preferred resolution. For example, 3840x2160
6266 * resolution in base EDID preferred timing and 4096x2160
6267 * preferred resolution in DID extension block later.
6269 drm_mode_sort(&connector->probed_modes);
6270 amdgpu_dm_get_native_mode(connector);
6272 amdgpu_dm_connector->num_modes = 0;
6276 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6278 struct amdgpu_dm_connector *amdgpu_dm_connector =
6279 to_amdgpu_dm_connector(connector);
6280 struct drm_encoder *encoder;
6281 struct edid *edid = amdgpu_dm_connector->edid;
6283 encoder = amdgpu_dm_connector_to_encoder(connector);
6285 if (!edid || !drm_edid_is_valid(edid)) {
6286 amdgpu_dm_connector->num_modes =
6287 drm_add_modes_noedid(connector, 640, 480);
6289 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6290 amdgpu_dm_connector_add_common_modes(encoder, connector);
6292 amdgpu_dm_fbc_init(connector);
6294 return amdgpu_dm_connector->num_modes;
6297 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6298 struct amdgpu_dm_connector *aconnector,
6300 struct dc_link *link,
6303 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6306 * Some of the properties below require access to state, like bpc.
6307 * Allocate some default initial connector state with our reset helper.
6309 if (aconnector->base.funcs->reset)
6310 aconnector->base.funcs->reset(&aconnector->base);
6312 aconnector->connector_id = link_index;
6313 aconnector->dc_link = link;
6314 aconnector->base.interlace_allowed = false;
6315 aconnector->base.doublescan_allowed = false;
6316 aconnector->base.stereo_allowed = false;
6317 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6318 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6319 aconnector->audio_inst = -1;
6320 mutex_init(&aconnector->hpd_lock);
6323 * configure support HPD hot plug connector_>polled default value is 0
6324 * which means HPD hot plug not supported
6326 switch (connector_type) {
6327 case DRM_MODE_CONNECTOR_HDMIA:
6328 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6329 aconnector->base.ycbcr_420_allowed =
6330 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6332 case DRM_MODE_CONNECTOR_DisplayPort:
6333 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6334 aconnector->base.ycbcr_420_allowed =
6335 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6337 case DRM_MODE_CONNECTOR_DVID:
6338 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6344 drm_object_attach_property(&aconnector->base.base,
6345 dm->ddev->mode_config.scaling_mode_property,
6346 DRM_MODE_SCALE_NONE);
6348 drm_object_attach_property(&aconnector->base.base,
6349 adev->mode_info.underscan_property,
6351 drm_object_attach_property(&aconnector->base.base,
6352 adev->mode_info.underscan_hborder_property,
6354 drm_object_attach_property(&aconnector->base.base,
6355 adev->mode_info.underscan_vborder_property,
6358 if (!aconnector->mst_port)
6359 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6361 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6362 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6363 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6365 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6366 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6367 drm_object_attach_property(&aconnector->base.base,
6368 adev->mode_info.abm_level_property, 0);
6371 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6372 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6373 connector_type == DRM_MODE_CONNECTOR_eDP) {
6374 drm_object_attach_property(
6375 &aconnector->base.base,
6376 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6378 if (!aconnector->mst_port)
6379 drm_connector_attach_vrr_capable_property(&aconnector->base);
6381 #ifdef CONFIG_DRM_AMD_DC_HDCP
6382 if (adev->dm.hdcp_workqueue)
6383 drm_connector_attach_content_protection_property(&aconnector->base, true);
6388 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6389 struct i2c_msg *msgs, int num)
6391 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6392 struct ddc_service *ddc_service = i2c->ddc_service;
6393 struct i2c_command cmd;
6397 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6402 cmd.number_of_payloads = num;
6403 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6406 for (i = 0; i < num; i++) {
6407 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6408 cmd.payloads[i].address = msgs[i].addr;
6409 cmd.payloads[i].length = msgs[i].len;
6410 cmd.payloads[i].data = msgs[i].buf;
6414 ddc_service->ctx->dc,
6415 ddc_service->ddc_pin->hw_info.ddc_channel,
6419 kfree(cmd.payloads);
6423 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6425 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6428 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6429 .master_xfer = amdgpu_dm_i2c_xfer,
6430 .functionality = amdgpu_dm_i2c_func,
6433 static struct amdgpu_i2c_adapter *
6434 create_i2c(struct ddc_service *ddc_service,
6438 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6439 struct amdgpu_i2c_adapter *i2c;
6441 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6444 i2c->base.owner = THIS_MODULE;
6445 i2c->base.class = I2C_CLASS_DDC;
6446 i2c->base.dev.parent = &adev->pdev->dev;
6447 i2c->base.algo = &amdgpu_dm_i2c_algo;
6448 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6449 i2c_set_adapdata(&i2c->base, i2c);
6450 i2c->ddc_service = ddc_service;
6451 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6458 * Note: this function assumes that dc_link_detect() was called for the
6459 * dc_link which will be represented by this aconnector.
6461 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6462 struct amdgpu_dm_connector *aconnector,
6463 uint32_t link_index,
6464 struct amdgpu_encoder *aencoder)
6468 struct dc *dc = dm->dc;
6469 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6470 struct amdgpu_i2c_adapter *i2c;
6472 link->priv = aconnector;
6474 DRM_DEBUG_DRIVER("%s()\n", __func__);
6476 i2c = create_i2c(link->ddc, link->link_index, &res);
6478 DRM_ERROR("Failed to create i2c adapter data\n");
6482 aconnector->i2c = i2c;
6483 res = i2c_add_adapter(&i2c->base);
6486 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6490 connector_type = to_drm_connector_type(link->connector_signal);
6492 res = drm_connector_init_with_ddc(
6495 &amdgpu_dm_connector_funcs,
6500 DRM_ERROR("connector_init failed\n");
6501 aconnector->connector_id = -1;
6505 drm_connector_helper_add(
6507 &amdgpu_dm_connector_helper_funcs);
6509 amdgpu_dm_connector_init_helper(
6516 drm_connector_attach_encoder(
6517 &aconnector->base, &aencoder->base);
6519 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6520 || connector_type == DRM_MODE_CONNECTOR_eDP)
6521 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6526 aconnector->i2c = NULL;
6531 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6533 switch (adev->mode_info.num_crtc) {
6550 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6551 struct amdgpu_encoder *aencoder,
6552 uint32_t link_index)
6554 struct amdgpu_device *adev = drm_to_adev(dev);
6556 int res = drm_encoder_init(dev,
6558 &amdgpu_dm_encoder_funcs,
6559 DRM_MODE_ENCODER_TMDS,
6562 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6565 aencoder->encoder_id = link_index;
6567 aencoder->encoder_id = -1;
6569 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6574 static void manage_dm_interrupts(struct amdgpu_device *adev,
6575 struct amdgpu_crtc *acrtc,
6579 * We have no guarantee that the frontend index maps to the same
6580 * backend index - some even map to more than one.
6582 * TODO: Use a different interrupt or check DC itself for the mapping.
6585 amdgpu_display_crtc_idx_to_irq_type(
6590 drm_crtc_vblank_on(&acrtc->base);
6593 &adev->pageflip_irq,
6599 &adev->pageflip_irq,
6601 drm_crtc_vblank_off(&acrtc->base);
6605 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6606 struct amdgpu_crtc *acrtc)
6609 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6612 * This reads the current state for the IRQ and force reapplies
6613 * the setting to hardware.
6615 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6619 is_scaling_state_different(const struct dm_connector_state *dm_state,
6620 const struct dm_connector_state *old_dm_state)
6622 if (dm_state->scaling != old_dm_state->scaling)
6624 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6625 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6627 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6628 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6630 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6631 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6636 #ifdef CONFIG_DRM_AMD_DC_HDCP
6637 static bool is_content_protection_different(struct drm_connector_state *state,
6638 const struct drm_connector_state *old_state,
6639 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6641 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6643 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6644 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6645 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6649 /* CP is being re enabled, ignore this */
6650 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6651 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6652 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6656 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6657 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6658 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6659 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6661 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6662 * hot-plug, headless s3, dpms
6664 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6665 aconnector->dc_sink != NULL)
6668 if (old_state->content_protection == state->content_protection)
6671 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6678 static void remove_stream(struct amdgpu_device *adev,
6679 struct amdgpu_crtc *acrtc,
6680 struct dc_stream_state *stream)
6682 /* this is the update mode case */
6684 acrtc->otg_inst = -1;
6685 acrtc->enabled = false;
6688 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6689 struct dc_cursor_position *position)
6691 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6693 int xorigin = 0, yorigin = 0;
6695 position->enable = false;
6699 if (!crtc || !plane->state->fb)
6702 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6703 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6704 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6706 plane->state->crtc_w,
6707 plane->state->crtc_h);
6711 x = plane->state->crtc_x;
6712 y = plane->state->crtc_y;
6714 if (x <= -amdgpu_crtc->max_cursor_width ||
6715 y <= -amdgpu_crtc->max_cursor_height)
6719 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6723 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6726 position->enable = true;
6727 position->translate_by_source = true;
6730 position->x_hotspot = xorigin;
6731 position->y_hotspot = yorigin;
6736 static void handle_cursor_update(struct drm_plane *plane,
6737 struct drm_plane_state *old_plane_state)
6739 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6740 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6741 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6742 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6743 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6744 uint64_t address = afb ? afb->address : 0;
6745 struct dc_cursor_position position;
6746 struct dc_cursor_attributes attributes;
6749 if (!plane->state->fb && !old_plane_state->fb)
6752 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6754 amdgpu_crtc->crtc_id,
6755 plane->state->crtc_w,
6756 plane->state->crtc_h);
6758 ret = get_cursor_position(plane, crtc, &position);
6762 if (!position.enable) {
6763 /* turn off cursor */
6764 if (crtc_state && crtc_state->stream) {
6765 mutex_lock(&adev->dm.dc_lock);
6766 dc_stream_set_cursor_position(crtc_state->stream,
6768 mutex_unlock(&adev->dm.dc_lock);
6773 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6774 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6776 memset(&attributes, 0, sizeof(attributes));
6777 attributes.address.high_part = upper_32_bits(address);
6778 attributes.address.low_part = lower_32_bits(address);
6779 attributes.width = plane->state->crtc_w;
6780 attributes.height = plane->state->crtc_h;
6781 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6782 attributes.rotation_angle = 0;
6783 attributes.attribute_flags.value = 0;
6785 attributes.pitch = attributes.width;
6787 if (crtc_state->stream) {
6788 mutex_lock(&adev->dm.dc_lock);
6789 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6791 DRM_ERROR("DC failed to set cursor attributes\n");
6793 if (!dc_stream_set_cursor_position(crtc_state->stream,
6795 DRM_ERROR("DC failed to set cursor position\n");
6796 mutex_unlock(&adev->dm.dc_lock);
6800 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6803 assert_spin_locked(&acrtc->base.dev->event_lock);
6804 WARN_ON(acrtc->event);
6806 acrtc->event = acrtc->base.state->event;
6808 /* Set the flip status */
6809 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6811 /* Mark this event as consumed */
6812 acrtc->base.state->event = NULL;
6814 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6818 static void update_freesync_state_on_stream(
6819 struct amdgpu_display_manager *dm,
6820 struct dm_crtc_state *new_crtc_state,
6821 struct dc_stream_state *new_stream,
6822 struct dc_plane_state *surface,
6823 u32 flip_timestamp_in_us)
6825 struct mod_vrr_params vrr_params;
6826 struct dc_info_packet vrr_infopacket = {0};
6827 struct amdgpu_device *adev = dm->adev;
6828 unsigned long flags;
6834 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6835 * For now it's sufficient to just guard against these conditions.
6838 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6841 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6842 vrr_params = new_crtc_state->vrr_params;
6845 mod_freesync_handle_preflip(
6846 dm->freesync_module,
6849 flip_timestamp_in_us,
6852 if (adev->family < AMDGPU_FAMILY_AI &&
6853 amdgpu_dm_vrr_active(new_crtc_state)) {
6854 mod_freesync_handle_v_update(dm->freesync_module,
6855 new_stream, &vrr_params);
6857 /* Need to call this before the frame ends. */
6858 dc_stream_adjust_vmin_vmax(dm->dc,
6859 new_crtc_state->stream,
6860 &vrr_params.adjust);
6864 mod_freesync_build_vrr_infopacket(
6865 dm->freesync_module,
6869 TRANSFER_FUNC_UNKNOWN,
6872 new_crtc_state->freesync_timing_changed |=
6873 (memcmp(&new_crtc_state->vrr_params.adjust,
6875 sizeof(vrr_params.adjust)) != 0);
6877 new_crtc_state->freesync_vrr_info_changed |=
6878 (memcmp(&new_crtc_state->vrr_infopacket,
6880 sizeof(vrr_infopacket)) != 0);
6882 new_crtc_state->vrr_params = vrr_params;
6883 new_crtc_state->vrr_infopacket = vrr_infopacket;
6885 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6886 new_stream->vrr_infopacket = vrr_infopacket;
6888 if (new_crtc_state->freesync_vrr_info_changed)
6889 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6890 new_crtc_state->base.crtc->base.id,
6891 (int)new_crtc_state->base.vrr_enabled,
6892 (int)vrr_params.state);
6894 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6897 static void pre_update_freesync_state_on_stream(
6898 struct amdgpu_display_manager *dm,
6899 struct dm_crtc_state *new_crtc_state)
6901 struct dc_stream_state *new_stream = new_crtc_state->stream;
6902 struct mod_vrr_params vrr_params;
6903 struct mod_freesync_config config = new_crtc_state->freesync_config;
6904 struct amdgpu_device *adev = dm->adev;
6905 unsigned long flags;
6911 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6912 * For now it's sufficient to just guard against these conditions.
6914 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6917 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6918 vrr_params = new_crtc_state->vrr_params;
6920 if (new_crtc_state->vrr_supported &&
6921 config.min_refresh_in_uhz &&
6922 config.max_refresh_in_uhz) {
6923 config.state = new_crtc_state->base.vrr_enabled ?
6924 VRR_STATE_ACTIVE_VARIABLE :
6927 config.state = VRR_STATE_UNSUPPORTED;
6930 mod_freesync_build_vrr_params(dm->freesync_module,
6932 &config, &vrr_params);
6934 new_crtc_state->freesync_timing_changed |=
6935 (memcmp(&new_crtc_state->vrr_params.adjust,
6937 sizeof(vrr_params.adjust)) != 0);
6939 new_crtc_state->vrr_params = vrr_params;
6940 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6943 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6944 struct dm_crtc_state *new_state)
6946 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6947 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6949 if (!old_vrr_active && new_vrr_active) {
6950 /* Transition VRR inactive -> active:
6951 * While VRR is active, we must not disable vblank irq, as a
6952 * reenable after disable would compute bogus vblank/pflip
6953 * timestamps if it likely happened inside display front-porch.
6955 * We also need vupdate irq for the actual core vblank handling
6958 dm_set_vupdate_irq(new_state->base.crtc, true);
6959 drm_crtc_vblank_get(new_state->base.crtc);
6960 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6961 __func__, new_state->base.crtc->base.id);
6962 } else if (old_vrr_active && !new_vrr_active) {
6963 /* Transition VRR active -> inactive:
6964 * Allow vblank irq disable again for fixed refresh rate.
6966 dm_set_vupdate_irq(new_state->base.crtc, false);
6967 drm_crtc_vblank_put(new_state->base.crtc);
6968 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6969 __func__, new_state->base.crtc->base.id);
6973 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6975 struct drm_plane *plane;
6976 struct drm_plane_state *old_plane_state, *new_plane_state;
6980 * TODO: Make this per-stream so we don't issue redundant updates for
6981 * commits with multiple streams.
6983 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6985 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6986 handle_cursor_update(plane, old_plane_state);
6989 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6990 struct dc_state *dc_state,
6991 struct drm_device *dev,
6992 struct amdgpu_display_manager *dm,
6993 struct drm_crtc *pcrtc,
6994 bool wait_for_vblank)
6997 uint64_t timestamp_ns;
6998 struct drm_plane *plane;
6999 struct drm_plane_state *old_plane_state, *new_plane_state;
7000 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7001 struct drm_crtc_state *new_pcrtc_state =
7002 drm_atomic_get_new_crtc_state(state, pcrtc);
7003 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7004 struct dm_crtc_state *dm_old_crtc_state =
7005 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7006 int planes_count = 0, vpos, hpos;
7008 unsigned long flags;
7009 struct amdgpu_bo *abo;
7010 uint32_t target_vblank, last_flip_vblank;
7011 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7012 bool pflip_present = false;
7014 struct dc_surface_update surface_updates[MAX_SURFACES];
7015 struct dc_plane_info plane_infos[MAX_SURFACES];
7016 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7017 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7018 struct dc_stream_update stream_update;
7021 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7024 dm_error("Failed to allocate update bundle\n");
7029 * Disable the cursor first if we're disabling all the planes.
7030 * It'll remain on the screen after the planes are re-enabled
7033 if (acrtc_state->active_planes == 0)
7034 amdgpu_dm_commit_cursors(state);
7036 /* update planes when needed */
7037 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7038 struct drm_crtc *crtc = new_plane_state->crtc;
7039 struct drm_crtc_state *new_crtc_state;
7040 struct drm_framebuffer *fb = new_plane_state->fb;
7041 bool plane_needs_flip;
7042 struct dc_plane_state *dc_plane;
7043 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7045 /* Cursor plane is handled after stream updates */
7046 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7049 if (!fb || !crtc || pcrtc != crtc)
7052 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7053 if (!new_crtc_state->active)
7056 dc_plane = dm_new_plane_state->dc_state;
7058 bundle->surface_updates[planes_count].surface = dc_plane;
7059 if (new_pcrtc_state->color_mgmt_changed) {
7060 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7061 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7062 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7065 fill_dc_scaling_info(new_plane_state,
7066 &bundle->scaling_infos[planes_count]);
7068 bundle->surface_updates[planes_count].scaling_info =
7069 &bundle->scaling_infos[planes_count];
7071 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7073 pflip_present = pflip_present || plane_needs_flip;
7075 if (!plane_needs_flip) {
7080 abo = gem_to_amdgpu_bo(fb->obj[0]);
7083 * Wait for all fences on this FB. Do limited wait to avoid
7084 * deadlock during GPU reset when this fence will not signal
7085 * but we hold reservation lock for the BO.
7087 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7089 msecs_to_jiffies(5000));
7090 if (unlikely(r <= 0))
7091 DRM_ERROR("Waiting for fences timed out!");
7093 fill_dc_plane_info_and_addr(
7094 dm->adev, new_plane_state,
7095 dm_new_plane_state->tiling_flags,
7096 &bundle->plane_infos[planes_count],
7097 &bundle->flip_addrs[planes_count].address,
7098 dm_new_plane_state->tmz_surface, false);
7100 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7101 new_plane_state->plane->index,
7102 bundle->plane_infos[planes_count].dcc.enable);
7104 bundle->surface_updates[planes_count].plane_info =
7105 &bundle->plane_infos[planes_count];
7108 * Only allow immediate flips for fast updates that don't
7109 * change FB pitch, DCC state, rotation or mirroing.
7111 bundle->flip_addrs[planes_count].flip_immediate =
7112 crtc->state->async_flip &&
7113 acrtc_state->update_type == UPDATE_TYPE_FAST;
7115 timestamp_ns = ktime_get_ns();
7116 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7117 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7118 bundle->surface_updates[planes_count].surface = dc_plane;
7120 if (!bundle->surface_updates[planes_count].surface) {
7121 DRM_ERROR("No surface for CRTC: id=%d\n",
7122 acrtc_attach->crtc_id);
7126 if (plane == pcrtc->primary)
7127 update_freesync_state_on_stream(
7130 acrtc_state->stream,
7132 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7134 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7136 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7137 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7143 if (pflip_present) {
7145 /* Use old throttling in non-vrr fixed refresh rate mode
7146 * to keep flip scheduling based on target vblank counts
7147 * working in a backwards compatible way, e.g., for
7148 * clients using the GLX_OML_sync_control extension or
7149 * DRI3/Present extension with defined target_msc.
7151 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7154 /* For variable refresh rate mode only:
7155 * Get vblank of last completed flip to avoid > 1 vrr
7156 * flips per video frame by use of throttling, but allow
7157 * flip programming anywhere in the possibly large
7158 * variable vrr vblank interval for fine-grained flip
7159 * timing control and more opportunity to avoid stutter
7160 * on late submission of flips.
7162 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7163 last_flip_vblank = acrtc_attach->last_flip_vblank;
7164 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7167 target_vblank = last_flip_vblank + wait_for_vblank;
7170 * Wait until we're out of the vertical blank period before the one
7171 * targeted by the flip
7173 while ((acrtc_attach->enabled &&
7174 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7175 0, &vpos, &hpos, NULL,
7176 NULL, &pcrtc->hwmode)
7177 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7178 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7179 (int)(target_vblank -
7180 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7181 usleep_range(1000, 1100);
7185 * Prepare the flip event for the pageflip interrupt to handle.
7187 * This only works in the case where we've already turned on the
7188 * appropriate hardware blocks (eg. HUBP) so in the transition case
7189 * from 0 -> n planes we have to skip a hardware generated event
7190 * and rely on sending it from software.
7192 if (acrtc_attach->base.state->event &&
7193 acrtc_state->active_planes > 0) {
7194 drm_crtc_vblank_get(pcrtc);
7196 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7198 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7199 prepare_flip_isr(acrtc_attach);
7201 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7204 if (acrtc_state->stream) {
7205 if (acrtc_state->freesync_vrr_info_changed)
7206 bundle->stream_update.vrr_infopacket =
7207 &acrtc_state->stream->vrr_infopacket;
7211 /* Update the planes if changed or disable if we don't have any. */
7212 if ((planes_count || acrtc_state->active_planes == 0) &&
7213 acrtc_state->stream) {
7214 bundle->stream_update.stream = acrtc_state->stream;
7215 if (new_pcrtc_state->mode_changed) {
7216 bundle->stream_update.src = acrtc_state->stream->src;
7217 bundle->stream_update.dst = acrtc_state->stream->dst;
7220 if (new_pcrtc_state->color_mgmt_changed) {
7222 * TODO: This isn't fully correct since we've actually
7223 * already modified the stream in place.
7225 bundle->stream_update.gamut_remap =
7226 &acrtc_state->stream->gamut_remap_matrix;
7227 bundle->stream_update.output_csc_transform =
7228 &acrtc_state->stream->csc_color_matrix;
7229 bundle->stream_update.out_transfer_func =
7230 acrtc_state->stream->out_transfer_func;
7233 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7234 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7235 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7238 * If FreeSync state on the stream has changed then we need to
7239 * re-adjust the min/max bounds now that DC doesn't handle this
7240 * as part of commit.
7242 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7243 amdgpu_dm_vrr_active(acrtc_state)) {
7244 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7245 dc_stream_adjust_vmin_vmax(
7246 dm->dc, acrtc_state->stream,
7247 &acrtc_state->vrr_params.adjust);
7248 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7250 mutex_lock(&dm->dc_lock);
7251 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7252 acrtc_state->stream->link->psr_settings.psr_allow_active)
7253 amdgpu_dm_psr_disable(acrtc_state->stream);
7255 dc_commit_updates_for_stream(dm->dc,
7256 bundle->surface_updates,
7258 acrtc_state->stream,
7259 &bundle->stream_update,
7263 * Enable or disable the interrupts on the backend.
7265 * Most pipes are put into power gating when unused.
7267 * When power gating is enabled on a pipe we lose the
7268 * interrupt enablement state when power gating is disabled.
7270 * So we need to update the IRQ control state in hardware
7271 * whenever the pipe turns on (since it could be previously
7272 * power gated) or off (since some pipes can't be power gated
7275 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7276 dm_update_pflip_irq_state(drm_to_adev(dev),
7279 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7280 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7281 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7282 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7283 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7284 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7285 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7286 amdgpu_dm_psr_enable(acrtc_state->stream);
7289 mutex_unlock(&dm->dc_lock);
7293 * Update cursor state *after* programming all the planes.
7294 * This avoids redundant programming in the case where we're going
7295 * to be disabling a single plane - those pipes are being disabled.
7297 if (acrtc_state->active_planes)
7298 amdgpu_dm_commit_cursors(state);
7304 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7305 struct drm_atomic_state *state)
7307 struct amdgpu_device *adev = drm_to_adev(dev);
7308 struct amdgpu_dm_connector *aconnector;
7309 struct drm_connector *connector;
7310 struct drm_connector_state *old_con_state, *new_con_state;
7311 struct drm_crtc_state *new_crtc_state;
7312 struct dm_crtc_state *new_dm_crtc_state;
7313 const struct dc_stream_status *status;
7316 /* Notify device removals. */
7317 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7318 if (old_con_state->crtc != new_con_state->crtc) {
7319 /* CRTC changes require notification. */
7323 if (!new_con_state->crtc)
7326 new_crtc_state = drm_atomic_get_new_crtc_state(
7327 state, new_con_state->crtc);
7329 if (!new_crtc_state)
7332 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7336 aconnector = to_amdgpu_dm_connector(connector);
7338 mutex_lock(&adev->dm.audio_lock);
7339 inst = aconnector->audio_inst;
7340 aconnector->audio_inst = -1;
7341 mutex_unlock(&adev->dm.audio_lock);
7343 amdgpu_dm_audio_eld_notify(adev, inst);
7346 /* Notify audio device additions. */
7347 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7348 if (!new_con_state->crtc)
7351 new_crtc_state = drm_atomic_get_new_crtc_state(
7352 state, new_con_state->crtc);
7354 if (!new_crtc_state)
7357 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7360 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7361 if (!new_dm_crtc_state->stream)
7364 status = dc_stream_get_status(new_dm_crtc_state->stream);
7368 aconnector = to_amdgpu_dm_connector(connector);
7370 mutex_lock(&adev->dm.audio_lock);
7371 inst = status->audio_inst;
7372 aconnector->audio_inst = inst;
7373 mutex_unlock(&adev->dm.audio_lock);
7375 amdgpu_dm_audio_eld_notify(adev, inst);
7380 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7381 * @crtc_state: the DRM CRTC state
7382 * @stream_state: the DC stream state.
7384 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7385 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7387 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7388 struct dc_stream_state *stream_state)
7390 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7393 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7394 struct drm_atomic_state *state,
7397 struct drm_crtc *crtc;
7398 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7399 struct amdgpu_device *adev = drm_to_adev(dev);
7403 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7404 * a modeset, being disabled, or have no active planes.
7406 * It's done in atomic commit rather than commit tail for now since
7407 * some of these interrupt handlers access the current CRTC state and
7408 * potentially the stream pointer itself.
7410 * Since the atomic state is swapped within atomic commit and not within
7411 * commit tail this would leave to new state (that hasn't been committed yet)
7412 * being accesssed from within the handlers.
7414 * TODO: Fix this so we can do this in commit tail and not have to block
7417 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7418 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7420 if (old_crtc_state->active &&
7421 (!new_crtc_state->active ||
7422 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7423 manage_dm_interrupts(adev, acrtc, false);
7426 * Add check here for SoC's that support hardware cursor plane, to
7427 * unset legacy_cursor_update
7430 return drm_atomic_helper_commit(dev, state, nonblock);
7432 /*TODO Handle EINTR, reenable IRQ*/
7436 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7437 * @state: The atomic state to commit
7439 * This will tell DC to commit the constructed DC state from atomic_check,
7440 * programming the hardware. Any failures here implies a hardware failure, since
7441 * atomic check should have filtered anything non-kosher.
7443 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7445 struct drm_device *dev = state->dev;
7446 struct amdgpu_device *adev = drm_to_adev(dev);
7447 struct amdgpu_display_manager *dm = &adev->dm;
7448 struct dm_atomic_state *dm_state;
7449 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7451 struct drm_crtc *crtc;
7452 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7453 unsigned long flags;
7454 bool wait_for_vblank = true;
7455 struct drm_connector *connector;
7456 struct drm_connector_state *old_con_state, *new_con_state;
7457 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7458 int crtc_disable_count = 0;
7459 bool mode_set_reset_required = false;
7461 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7463 dm_state = dm_atomic_get_new_state(state);
7464 if (dm_state && dm_state->context) {
7465 dc_state = dm_state->context;
7467 /* No state changes, retain current state. */
7468 dc_state_temp = dc_create_state(dm->dc);
7469 ASSERT(dc_state_temp);
7470 dc_state = dc_state_temp;
7471 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7474 /* update changed items */
7475 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7476 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7478 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7479 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7482 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7483 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7484 "connectors_changed:%d\n",
7486 new_crtc_state->enable,
7487 new_crtc_state->active,
7488 new_crtc_state->planes_changed,
7489 new_crtc_state->mode_changed,
7490 new_crtc_state->active_changed,
7491 new_crtc_state->connectors_changed);
7493 /* Copy all transient state flags into dc state */
7494 if (dm_new_crtc_state->stream) {
7495 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7496 dm_new_crtc_state->stream);
7499 /* handles headless hotplug case, updating new_state and
7500 * aconnector as needed
7503 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7505 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7507 if (!dm_new_crtc_state->stream) {
7509 * this could happen because of issues with
7510 * userspace notifications delivery.
7511 * In this case userspace tries to set mode on
7512 * display which is disconnected in fact.
7513 * dc_sink is NULL in this case on aconnector.
7514 * We expect reset mode will come soon.
7516 * This can also happen when unplug is done
7517 * during resume sequence ended
7519 * In this case, we want to pretend we still
7520 * have a sink to keep the pipe running so that
7521 * hw state is consistent with the sw state
7523 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7524 __func__, acrtc->base.base.id);
7528 if (dm_old_crtc_state->stream)
7529 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7531 pm_runtime_get_noresume(dev->dev);
7533 acrtc->enabled = true;
7534 acrtc->hw_mode = new_crtc_state->mode;
7535 crtc->hwmode = new_crtc_state->mode;
7536 mode_set_reset_required = true;
7537 } else if (modereset_required(new_crtc_state)) {
7538 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7539 /* i.e. reset mode */
7540 if (dm_old_crtc_state->stream)
7541 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7542 mode_set_reset_required = true;
7544 } /* for_each_crtc_in_state() */
7547 /* if there mode set or reset, disable eDP PSR */
7548 if (mode_set_reset_required)
7549 amdgpu_dm_psr_disable_all(dm);
7551 dm_enable_per_frame_crtc_master_sync(dc_state);
7552 mutex_lock(&dm->dc_lock);
7553 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7554 mutex_unlock(&dm->dc_lock);
7557 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7558 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7560 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7562 if (dm_new_crtc_state->stream != NULL) {
7563 const struct dc_stream_status *status =
7564 dc_stream_get_status(dm_new_crtc_state->stream);
7567 status = dc_stream_get_status_from_state(dc_state,
7568 dm_new_crtc_state->stream);
7571 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7573 acrtc->otg_inst = status->primary_otg_inst;
7576 #ifdef CONFIG_DRM_AMD_DC_HDCP
7577 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7578 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7579 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7580 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7582 new_crtc_state = NULL;
7585 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7587 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7589 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7590 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7591 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7592 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7596 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7597 hdcp_update_display(
7598 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7599 new_con_state->hdcp_content_type,
7600 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7605 /* Handle connector state changes */
7606 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7607 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7608 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7609 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7610 struct dc_surface_update dummy_updates[MAX_SURFACES];
7611 struct dc_stream_update stream_update;
7612 struct dc_info_packet hdr_packet;
7613 struct dc_stream_status *status = NULL;
7614 bool abm_changed, hdr_changed, scaling_changed;
7616 memset(&dummy_updates, 0, sizeof(dummy_updates));
7617 memset(&stream_update, 0, sizeof(stream_update));
7620 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7621 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7624 /* Skip any modesets/resets */
7625 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7628 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7629 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7631 scaling_changed = is_scaling_state_different(dm_new_con_state,
7634 abm_changed = dm_new_crtc_state->abm_level !=
7635 dm_old_crtc_state->abm_level;
7638 is_hdr_metadata_different(old_con_state, new_con_state);
7640 if (!scaling_changed && !abm_changed && !hdr_changed)
7643 stream_update.stream = dm_new_crtc_state->stream;
7644 if (scaling_changed) {
7645 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7646 dm_new_con_state, dm_new_crtc_state->stream);
7648 stream_update.src = dm_new_crtc_state->stream->src;
7649 stream_update.dst = dm_new_crtc_state->stream->dst;
7653 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7655 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7659 fill_hdr_info_packet(new_con_state, &hdr_packet);
7660 stream_update.hdr_static_metadata = &hdr_packet;
7663 status = dc_stream_get_status(dm_new_crtc_state->stream);
7665 WARN_ON(!status->plane_count);
7668 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7669 * Here we create an empty update on each plane.
7670 * To fix this, DC should permit updating only stream properties.
7672 for (j = 0; j < status->plane_count; j++)
7673 dummy_updates[j].surface = status->plane_states[0];
7676 mutex_lock(&dm->dc_lock);
7677 dc_commit_updates_for_stream(dm->dc,
7679 status->plane_count,
7680 dm_new_crtc_state->stream,
7683 mutex_unlock(&dm->dc_lock);
7686 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7687 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7688 new_crtc_state, i) {
7689 if (old_crtc_state->active && !new_crtc_state->active)
7690 crtc_disable_count++;
7692 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7693 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7695 /* Update freesync active state. */
7696 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7698 /* Handle vrr on->off / off->on transitions */
7699 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7704 * Enable interrupts for CRTCs that are newly enabled or went through
7705 * a modeset. It was intentionally deferred until after the front end
7706 * state was modified to wait until the OTG was on and so the IRQ
7707 * handlers didn't access stale or invalid state.
7709 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7710 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7712 if (new_crtc_state->active &&
7713 (!old_crtc_state->active ||
7714 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7715 manage_dm_interrupts(adev, acrtc, true);
7716 #ifdef CONFIG_DEBUG_FS
7718 * Frontend may have changed so reapply the CRC capture
7719 * settings for the stream.
7721 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7723 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7724 amdgpu_dm_crtc_configure_crc_source(
7725 crtc, dm_new_crtc_state,
7726 dm_new_crtc_state->crc_src);
7732 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7733 if (new_crtc_state->async_flip)
7734 wait_for_vblank = false;
7736 /* update planes when needed per crtc*/
7737 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7738 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7740 if (dm_new_crtc_state->stream)
7741 amdgpu_dm_commit_planes(state, dc_state, dev,
7742 dm, crtc, wait_for_vblank);
7745 /* Update audio instances for each connector. */
7746 amdgpu_dm_commit_audio(dev, state);
7749 * send vblank event on all events not handled in flip and
7750 * mark consumed event for drm_atomic_helper_commit_hw_done
7752 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7753 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7755 if (new_crtc_state->event)
7756 drm_send_event_locked(dev, &new_crtc_state->event->base);
7758 new_crtc_state->event = NULL;
7760 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7762 /* Signal HW programming completion */
7763 drm_atomic_helper_commit_hw_done(state);
7765 if (wait_for_vblank)
7766 drm_atomic_helper_wait_for_flip_done(dev, state);
7768 drm_atomic_helper_cleanup_planes(dev, state);
7771 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7772 * so we can put the GPU into runtime suspend if we're not driving any
7775 for (i = 0; i < crtc_disable_count; i++)
7776 pm_runtime_put_autosuspend(dev->dev);
7777 pm_runtime_mark_last_busy(dev->dev);
7780 dc_release_state(dc_state_temp);
7784 static int dm_force_atomic_commit(struct drm_connector *connector)
7787 struct drm_device *ddev = connector->dev;
7788 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7789 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7790 struct drm_plane *plane = disconnected_acrtc->base.primary;
7791 struct drm_connector_state *conn_state;
7792 struct drm_crtc_state *crtc_state;
7793 struct drm_plane_state *plane_state;
7798 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7800 /* Construct an atomic state to restore previous display setting */
7803 * Attach connectors to drm_atomic_state
7805 conn_state = drm_atomic_get_connector_state(state, connector);
7807 ret = PTR_ERR_OR_ZERO(conn_state);
7811 /* Attach crtc to drm_atomic_state*/
7812 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7814 ret = PTR_ERR_OR_ZERO(crtc_state);
7818 /* force a restore */
7819 crtc_state->mode_changed = true;
7821 /* Attach plane to drm_atomic_state */
7822 plane_state = drm_atomic_get_plane_state(state, plane);
7824 ret = PTR_ERR_OR_ZERO(plane_state);
7829 /* Call commit internally with the state we just constructed */
7830 ret = drm_atomic_commit(state);
7835 DRM_ERROR("Restoring old state failed with %i\n", ret);
7836 drm_atomic_state_put(state);
7842 * This function handles all cases when set mode does not come upon hotplug.
7843 * This includes when a display is unplugged then plugged back into the
7844 * same port and when running without usermode desktop manager supprot
7846 void dm_restore_drm_connector_state(struct drm_device *dev,
7847 struct drm_connector *connector)
7849 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7850 struct amdgpu_crtc *disconnected_acrtc;
7851 struct dm_crtc_state *acrtc_state;
7853 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7856 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7857 if (!disconnected_acrtc)
7860 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7861 if (!acrtc_state->stream)
7865 * If the previous sink is not released and different from the current,
7866 * we deduce we are in a state where we can not rely on usermode call
7867 * to turn on the display, so we do it here
7869 if (acrtc_state->stream->sink != aconnector->dc_sink)
7870 dm_force_atomic_commit(&aconnector->base);
7874 * Grabs all modesetting locks to serialize against any blocking commits,
7875 * Waits for completion of all non blocking commits.
7877 static int do_aquire_global_lock(struct drm_device *dev,
7878 struct drm_atomic_state *state)
7880 struct drm_crtc *crtc;
7881 struct drm_crtc_commit *commit;
7885 * Adding all modeset locks to aquire_ctx will
7886 * ensure that when the framework release it the
7887 * extra locks we are locking here will get released to
7889 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7893 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7894 spin_lock(&crtc->commit_lock);
7895 commit = list_first_entry_or_null(&crtc->commit_list,
7896 struct drm_crtc_commit, commit_entry);
7898 drm_crtc_commit_get(commit);
7899 spin_unlock(&crtc->commit_lock);
7905 * Make sure all pending HW programming completed and
7908 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7911 ret = wait_for_completion_interruptible_timeout(
7912 &commit->flip_done, 10*HZ);
7915 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7916 "timed out\n", crtc->base.id, crtc->name);
7918 drm_crtc_commit_put(commit);
7921 return ret < 0 ? ret : 0;
7924 static void get_freesync_config_for_crtc(
7925 struct dm_crtc_state *new_crtc_state,
7926 struct dm_connector_state *new_con_state)
7928 struct mod_freesync_config config = {0};
7929 struct amdgpu_dm_connector *aconnector =
7930 to_amdgpu_dm_connector(new_con_state->base.connector);
7931 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7932 int vrefresh = drm_mode_vrefresh(mode);
7934 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7935 vrefresh >= aconnector->min_vfreq &&
7936 vrefresh <= aconnector->max_vfreq;
7938 if (new_crtc_state->vrr_supported) {
7939 new_crtc_state->stream->ignore_msa_timing_param = true;
7940 config.state = new_crtc_state->base.vrr_enabled ?
7941 VRR_STATE_ACTIVE_VARIABLE :
7943 config.min_refresh_in_uhz =
7944 aconnector->min_vfreq * 1000000;
7945 config.max_refresh_in_uhz =
7946 aconnector->max_vfreq * 1000000;
7947 config.vsif_supported = true;
7951 new_crtc_state->freesync_config = config;
7954 static void reset_freesync_config_for_crtc(
7955 struct dm_crtc_state *new_crtc_state)
7957 new_crtc_state->vrr_supported = false;
7959 memset(&new_crtc_state->vrr_params, 0,
7960 sizeof(new_crtc_state->vrr_params));
7961 memset(&new_crtc_state->vrr_infopacket, 0,
7962 sizeof(new_crtc_state->vrr_infopacket));
7965 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7966 struct drm_atomic_state *state,
7967 struct drm_crtc *crtc,
7968 struct drm_crtc_state *old_crtc_state,
7969 struct drm_crtc_state *new_crtc_state,
7971 bool *lock_and_validation_needed)
7973 struct dm_atomic_state *dm_state = NULL;
7974 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7975 struct dc_stream_state *new_stream;
7979 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7980 * update changed items
7982 struct amdgpu_crtc *acrtc = NULL;
7983 struct amdgpu_dm_connector *aconnector = NULL;
7984 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7985 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7989 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7990 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7991 acrtc = to_amdgpu_crtc(crtc);
7992 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7994 /* TODO This hack should go away */
7995 if (aconnector && enable) {
7996 /* Make sure fake sink is created in plug-in scenario */
7997 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7999 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8002 if (IS_ERR(drm_new_conn_state)) {
8003 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8007 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8008 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8010 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8013 new_stream = create_validate_stream_for_sink(aconnector,
8014 &new_crtc_state->mode,
8016 dm_old_crtc_state->stream);
8019 * we can have no stream on ACTION_SET if a display
8020 * was disconnected during S3, in this case it is not an
8021 * error, the OS will be updated after detection, and
8022 * will do the right thing on next atomic commit
8026 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8027 __func__, acrtc->base.base.id);
8033 * TODO: Check VSDB bits to decide whether this should
8034 * be enabled or not.
8036 new_stream->triggered_crtc_reset.enabled =
8037 dm->force_timing_sync;
8039 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8041 ret = fill_hdr_info_packet(drm_new_conn_state,
8042 &new_stream->hdr_static_metadata);
8047 * If we already removed the old stream from the context
8048 * (and set the new stream to NULL) then we can't reuse
8049 * the old stream even if the stream and scaling are unchanged.
8050 * We'll hit the BUG_ON and black screen.
8052 * TODO: Refactor this function to allow this check to work
8053 * in all conditions.
8055 if (dm_new_crtc_state->stream &&
8056 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8057 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8058 new_crtc_state->mode_changed = false;
8059 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8060 new_crtc_state->mode_changed);
8064 /* mode_changed flag may get updated above, need to check again */
8065 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8069 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8070 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8071 "connectors_changed:%d\n",
8073 new_crtc_state->enable,
8074 new_crtc_state->active,
8075 new_crtc_state->planes_changed,
8076 new_crtc_state->mode_changed,
8077 new_crtc_state->active_changed,
8078 new_crtc_state->connectors_changed);
8080 /* Remove stream for any changed/disabled CRTC */
8083 if (!dm_old_crtc_state->stream)
8086 ret = dm_atomic_get_state(state, &dm_state);
8090 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8093 /* i.e. reset mode */
8094 if (dc_remove_stream_from_ctx(
8097 dm_old_crtc_state->stream) != DC_OK) {
8102 dc_stream_release(dm_old_crtc_state->stream);
8103 dm_new_crtc_state->stream = NULL;
8105 reset_freesync_config_for_crtc(dm_new_crtc_state);
8107 *lock_and_validation_needed = true;
8109 } else {/* Add stream for any updated/enabled CRTC */
8111 * Quick fix to prevent NULL pointer on new_stream when
8112 * added MST connectors not found in existing crtc_state in the chained mode
8113 * TODO: need to dig out the root cause of that
8115 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8118 if (modereset_required(new_crtc_state))
8121 if (modeset_required(new_crtc_state, new_stream,
8122 dm_old_crtc_state->stream)) {
8124 WARN_ON(dm_new_crtc_state->stream);
8126 ret = dm_atomic_get_state(state, &dm_state);
8130 dm_new_crtc_state->stream = new_stream;
8132 dc_stream_retain(new_stream);
8134 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8137 if (dc_add_stream_to_ctx(
8140 dm_new_crtc_state->stream) != DC_OK) {
8145 *lock_and_validation_needed = true;
8150 /* Release extra reference */
8152 dc_stream_release(new_stream);
8155 * We want to do dc stream updates that do not require a
8156 * full modeset below.
8158 if (!(enable && aconnector && new_crtc_state->active))
8161 * Given above conditions, the dc state cannot be NULL because:
8162 * 1. We're in the process of enabling CRTCs (just been added
8163 * to the dc context, or already is on the context)
8164 * 2. Has a valid connector attached, and
8165 * 3. Is currently active and enabled.
8166 * => The dc stream state currently exists.
8168 BUG_ON(dm_new_crtc_state->stream == NULL);
8170 /* Scaling or underscan settings */
8171 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8172 update_stream_scaling_settings(
8173 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8176 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8179 * Color management settings. We also update color properties
8180 * when a modeset is needed, to ensure it gets reprogrammed.
8182 if (dm_new_crtc_state->base.color_mgmt_changed ||
8183 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8184 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8189 /* Update Freesync settings. */
8190 get_freesync_config_for_crtc(dm_new_crtc_state,
8197 dc_stream_release(new_stream);
8201 static bool should_reset_plane(struct drm_atomic_state *state,
8202 struct drm_plane *plane,
8203 struct drm_plane_state *old_plane_state,
8204 struct drm_plane_state *new_plane_state)
8206 struct drm_plane *other;
8207 struct drm_plane_state *old_other_state, *new_other_state;
8208 struct drm_crtc_state *new_crtc_state;
8212 * TODO: Remove this hack once the checks below are sufficient
8213 * enough to determine when we need to reset all the planes on
8216 if (state->allow_modeset)
8219 /* Exit early if we know that we're adding or removing the plane. */
8220 if (old_plane_state->crtc != new_plane_state->crtc)
8223 /* old crtc == new_crtc == NULL, plane not in context. */
8224 if (!new_plane_state->crtc)
8228 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8230 if (!new_crtc_state)
8233 /* CRTC Degamma changes currently require us to recreate planes. */
8234 if (new_crtc_state->color_mgmt_changed)
8237 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8241 * If there are any new primary or overlay planes being added or
8242 * removed then the z-order can potentially change. To ensure
8243 * correct z-order and pipe acquisition the current DC architecture
8244 * requires us to remove and recreate all existing planes.
8246 * TODO: Come up with a more elegant solution for this.
8248 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8249 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8251 if (other->type == DRM_PLANE_TYPE_CURSOR)
8254 if (old_other_state->crtc != new_plane_state->crtc &&
8255 new_other_state->crtc != new_plane_state->crtc)
8258 if (old_other_state->crtc != new_other_state->crtc)
8261 /* Src/dst size and scaling updates. */
8262 if (old_other_state->src_w != new_other_state->src_w ||
8263 old_other_state->src_h != new_other_state->src_h ||
8264 old_other_state->crtc_w != new_other_state->crtc_w ||
8265 old_other_state->crtc_h != new_other_state->crtc_h)
8268 /* Rotation / mirroring updates. */
8269 if (old_other_state->rotation != new_other_state->rotation)
8272 /* Blending updates. */
8273 if (old_other_state->pixel_blend_mode !=
8274 new_other_state->pixel_blend_mode)
8277 /* Alpha updates. */
8278 if (old_other_state->alpha != new_other_state->alpha)
8281 /* Colorspace changes. */
8282 if (old_other_state->color_range != new_other_state->color_range ||
8283 old_other_state->color_encoding != new_other_state->color_encoding)
8286 /* Framebuffer checks fall at the end. */
8287 if (!old_other_state->fb || !new_other_state->fb)
8290 /* Pixel format changes can require bandwidth updates. */
8291 if (old_other_state->fb->format != new_other_state->fb->format)
8294 old_dm_plane_state = to_dm_plane_state(old_other_state);
8295 new_dm_plane_state = to_dm_plane_state(new_other_state);
8297 /* Tiling and DCC changes also require bandwidth updates. */
8298 if (old_dm_plane_state->tiling_flags !=
8299 new_dm_plane_state->tiling_flags)
8306 static int dm_update_plane_state(struct dc *dc,
8307 struct drm_atomic_state *state,
8308 struct drm_plane *plane,
8309 struct drm_plane_state *old_plane_state,
8310 struct drm_plane_state *new_plane_state,
8312 bool *lock_and_validation_needed)
8315 struct dm_atomic_state *dm_state = NULL;
8316 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8317 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8318 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8319 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8320 struct amdgpu_crtc *new_acrtc;
8325 new_plane_crtc = new_plane_state->crtc;
8326 old_plane_crtc = old_plane_state->crtc;
8327 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8328 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8330 /*TODO Implement better atomic check for cursor plane */
8331 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8332 if (!enable || !new_plane_crtc ||
8333 drm_atomic_plane_disabling(plane->state, new_plane_state))
8336 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8338 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8339 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8340 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8341 new_plane_state->crtc_w, new_plane_state->crtc_h);
8348 needs_reset = should_reset_plane(state, plane, old_plane_state,
8351 /* Remove any changed/removed planes */
8356 if (!old_plane_crtc)
8359 old_crtc_state = drm_atomic_get_old_crtc_state(
8360 state, old_plane_crtc);
8361 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8363 if (!dm_old_crtc_state->stream)
8366 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8367 plane->base.id, old_plane_crtc->base.id);
8369 ret = dm_atomic_get_state(state, &dm_state);
8373 if (!dc_remove_plane_from_context(
8375 dm_old_crtc_state->stream,
8376 dm_old_plane_state->dc_state,
8377 dm_state->context)) {
8383 dc_plane_state_release(dm_old_plane_state->dc_state);
8384 dm_new_plane_state->dc_state = NULL;
8386 *lock_and_validation_needed = true;
8388 } else { /* Add new planes */
8389 struct dc_plane_state *dc_new_plane_state;
8391 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8394 if (!new_plane_crtc)
8397 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8398 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8400 if (!dm_new_crtc_state->stream)
8406 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8410 WARN_ON(dm_new_plane_state->dc_state);
8412 dc_new_plane_state = dc_create_plane_state(dc);
8413 if (!dc_new_plane_state)
8416 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8417 plane->base.id, new_plane_crtc->base.id);
8419 ret = fill_dc_plane_attributes(
8420 drm_to_adev(new_plane_crtc->dev),
8425 dc_plane_state_release(dc_new_plane_state);
8429 ret = dm_atomic_get_state(state, &dm_state);
8431 dc_plane_state_release(dc_new_plane_state);
8436 * Any atomic check errors that occur after this will
8437 * not need a release. The plane state will be attached
8438 * to the stream, and therefore part of the atomic
8439 * state. It'll be released when the atomic state is
8442 if (!dc_add_plane_to_context(
8444 dm_new_crtc_state->stream,
8446 dm_state->context)) {
8448 dc_plane_state_release(dc_new_plane_state);
8452 dm_new_plane_state->dc_state = dc_new_plane_state;
8454 /* Tell DC to do a full surface update every time there
8455 * is a plane change. Inefficient, but works for now.
8457 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8459 *lock_and_validation_needed = true;
8466 #if defined(CONFIG_DRM_AMD_DC_DCN)
8467 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8469 struct drm_connector *connector;
8470 struct drm_connector_state *conn_state;
8471 struct amdgpu_dm_connector *aconnector = NULL;
8473 for_each_new_connector_in_state(state, connector, conn_state, i) {
8474 if (conn_state->crtc != crtc)
8477 aconnector = to_amdgpu_dm_connector(connector);
8478 if (!aconnector->port || !aconnector->mst_port)
8487 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8492 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8493 * @dev: The DRM device
8494 * @state: The atomic state to commit
8496 * Validate that the given atomic state is programmable by DC into hardware.
8497 * This involves constructing a &struct dc_state reflecting the new hardware
8498 * state we wish to commit, then querying DC to see if it is programmable. It's
8499 * important not to modify the existing DC state. Otherwise, atomic_check
8500 * may unexpectedly commit hardware changes.
8502 * When validating the DC state, it's important that the right locks are
8503 * acquired. For full updates case which removes/adds/updates streams on one
8504 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8505 * that any such full update commit will wait for completion of any outstanding
8506 * flip using DRMs synchronization events.
8508 * Note that DM adds the affected connectors for all CRTCs in state, when that
8509 * might not seem necessary. This is because DC stream creation requires the
8510 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8511 * be possible but non-trivial - a possible TODO item.
8513 * Return: -Error code if validation failed.
8515 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8516 struct drm_atomic_state *state)
8518 struct amdgpu_device *adev = drm_to_adev(dev);
8519 struct dm_atomic_state *dm_state = NULL;
8520 struct dc *dc = adev->dm.dc;
8521 struct drm_connector *connector;
8522 struct drm_connector_state *old_con_state, *new_con_state;
8523 struct drm_crtc *crtc;
8524 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8525 struct drm_plane *plane;
8526 struct drm_plane_state *old_plane_state, *new_plane_state;
8527 enum dc_status status;
8529 bool lock_and_validation_needed = false;
8531 ret = drm_atomic_helper_check_modeset(dev, state);
8535 /* Check connector changes */
8536 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8537 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8538 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8540 /* Skip connectors that are disabled or part of modeset already. */
8541 if (!old_con_state->crtc && !new_con_state->crtc)
8544 if (!new_con_state->crtc)
8547 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8548 if (IS_ERR(new_crtc_state)) {
8549 ret = PTR_ERR(new_crtc_state);
8553 if (dm_old_con_state->abm_level !=
8554 dm_new_con_state->abm_level)
8555 new_crtc_state->connectors_changed = true;
8558 #if defined(CONFIG_DRM_AMD_DC_DCN)
8559 if (adev->asic_type >= CHIP_NAVI10) {
8560 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8561 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8562 ret = add_affected_mst_dsc_crtcs(state, crtc);
8569 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8570 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8571 !new_crtc_state->color_mgmt_changed &&
8572 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8575 if (!new_crtc_state->enable)
8578 ret = drm_atomic_add_affected_connectors(state, crtc);
8582 ret = drm_atomic_add_affected_planes(state, crtc);
8588 * Add all primary and overlay planes on the CRTC to the state
8589 * whenever a plane is enabled to maintain correct z-ordering
8590 * and to enable fast surface updates.
8592 drm_for_each_crtc(crtc, dev) {
8593 bool modified = false;
8595 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8596 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8599 if (new_plane_state->crtc == crtc ||
8600 old_plane_state->crtc == crtc) {
8609 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8610 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8614 drm_atomic_get_plane_state(state, plane);
8616 if (IS_ERR(new_plane_state)) {
8617 ret = PTR_ERR(new_plane_state);
8623 /* Prepass for updating tiling flags on new planes. */
8624 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8625 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8626 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8628 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8629 &new_dm_plane_state->tmz_surface);
8634 /* Remove exiting planes if they are modified */
8635 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8636 ret = dm_update_plane_state(dc, state, plane,
8640 &lock_and_validation_needed);
8645 /* Disable all crtcs which require disable */
8646 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8647 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8651 &lock_and_validation_needed);
8656 /* Enable all crtcs which require enable */
8657 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8658 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8662 &lock_and_validation_needed);
8667 /* Add new/modified planes */
8668 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8669 ret = dm_update_plane_state(dc, state, plane,
8673 &lock_and_validation_needed);
8678 /* Run this here since we want to validate the streams we created */
8679 ret = drm_atomic_helper_check_planes(dev, state);
8683 if (state->legacy_cursor_update) {
8685 * This is a fast cursor update coming from the plane update
8686 * helper, check if it can be done asynchronously for better
8689 state->async_update =
8690 !drm_atomic_helper_async_check(dev, state);
8693 * Skip the remaining global validation if this is an async
8694 * update. Cursor updates can be done without affecting
8695 * state or bandwidth calcs and this avoids the performance
8696 * penalty of locking the private state object and
8697 * allocating a new dc_state.
8699 if (state->async_update)
8703 /* Check scaling and underscan changes*/
8704 /* TODO Removed scaling changes validation due to inability to commit
8705 * new stream into context w\o causing full reset. Need to
8706 * decide how to handle.
8708 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8709 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8710 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8711 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8713 /* Skip any modesets/resets */
8714 if (!acrtc || drm_atomic_crtc_needs_modeset(
8715 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8718 /* Skip any thing not scale or underscan changes */
8719 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8722 lock_and_validation_needed = true;
8726 * Streams and planes are reset when there are changes that affect
8727 * bandwidth. Anything that affects bandwidth needs to go through
8728 * DC global validation to ensure that the configuration can be applied
8731 * We have to currently stall out here in atomic_check for outstanding
8732 * commits to finish in this case because our IRQ handlers reference
8733 * DRM state directly - we can end up disabling interrupts too early
8736 * TODO: Remove this stall and drop DM state private objects.
8738 if (lock_and_validation_needed) {
8739 ret = dm_atomic_get_state(state, &dm_state);
8743 ret = do_aquire_global_lock(dev, state);
8747 #if defined(CONFIG_DRM_AMD_DC_DCN)
8748 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8751 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8757 * Perform validation of MST topology in the state:
8758 * We need to perform MST atomic check before calling
8759 * dc_validate_global_state(), or there is a chance
8760 * to get stuck in an infinite loop and hang eventually.
8762 ret = drm_dp_mst_atomic_check(state);
8765 status = dc_validate_global_state(dc, dm_state->context, false);
8766 if (status != DC_OK) {
8767 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8768 dc_status_to_str(status), status);
8774 * The commit is a fast update. Fast updates shouldn't change
8775 * the DC context, affect global validation, and can have their
8776 * commit work done in parallel with other commits not touching
8777 * the same resource. If we have a new DC context as part of
8778 * the DM atomic state from validation we need to free it and
8779 * retain the existing one instead.
8781 * Furthermore, since the DM atomic state only contains the DC
8782 * context and can safely be annulled, we can free the state
8783 * and clear the associated private object now to free
8784 * some memory and avoid a possible use-after-free later.
8787 for (i = 0; i < state->num_private_objs; i++) {
8788 struct drm_private_obj *obj = state->private_objs[i].ptr;
8790 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8791 int j = state->num_private_objs-1;
8793 dm_atomic_destroy_state(obj,
8794 state->private_objs[i].state);
8796 /* If i is not at the end of the array then the
8797 * last element needs to be moved to where i was
8798 * before the array can safely be truncated.
8801 state->private_objs[i] =
8802 state->private_objs[j];
8804 state->private_objs[j].ptr = NULL;
8805 state->private_objs[j].state = NULL;
8806 state->private_objs[j].old_state = NULL;
8807 state->private_objs[j].new_state = NULL;
8809 state->num_private_objs = j;
8815 /* Store the overall update type for use later in atomic check. */
8816 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8817 struct dm_crtc_state *dm_new_crtc_state =
8818 to_dm_crtc_state(new_crtc_state);
8820 dm_new_crtc_state->update_type = lock_and_validation_needed ?
8825 /* Must be success */
8830 if (ret == -EDEADLK)
8831 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8832 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8833 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8835 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8840 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8841 struct amdgpu_dm_connector *amdgpu_dm_connector)
8844 bool capable = false;
8846 if (amdgpu_dm_connector->dc_link &&
8847 dm_helpers_dp_read_dpcd(
8849 amdgpu_dm_connector->dc_link,
8850 DP_DOWN_STREAM_PORT_COUNT,
8852 sizeof(dpcd_data))) {
8853 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8858 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8862 bool edid_check_required;
8863 struct detailed_timing *timing;
8864 struct detailed_non_pixel *data;
8865 struct detailed_data_monitor_range *range;
8866 struct amdgpu_dm_connector *amdgpu_dm_connector =
8867 to_amdgpu_dm_connector(connector);
8868 struct dm_connector_state *dm_con_state = NULL;
8870 struct drm_device *dev = connector->dev;
8871 struct amdgpu_device *adev = drm_to_adev(dev);
8872 bool freesync_capable = false;
8874 if (!connector->state) {
8875 DRM_ERROR("%s - Connector has no state", __func__);
8880 dm_con_state = to_dm_connector_state(connector->state);
8882 amdgpu_dm_connector->min_vfreq = 0;
8883 amdgpu_dm_connector->max_vfreq = 0;
8884 amdgpu_dm_connector->pixel_clock_mhz = 0;
8889 dm_con_state = to_dm_connector_state(connector->state);
8891 edid_check_required = false;
8892 if (!amdgpu_dm_connector->dc_sink) {
8893 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8896 if (!adev->dm.freesync_module)
8899 * if edid non zero restrict freesync only for dp and edp
8902 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8903 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8904 edid_check_required = is_dp_capable_without_timing_msa(
8906 amdgpu_dm_connector);
8909 if (edid_check_required == true && (edid->version > 1 ||
8910 (edid->version == 1 && edid->revision > 1))) {
8911 for (i = 0; i < 4; i++) {
8913 timing = &edid->detailed_timings[i];
8914 data = &timing->data.other_data;
8915 range = &data->data.range;
8917 * Check if monitor has continuous frequency mode
8919 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8922 * Check for flag range limits only. If flag == 1 then
8923 * no additional timing information provided.
8924 * Default GTF, GTF Secondary curve and CVT are not
8927 if (range->flags != 1)
8930 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8931 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8932 amdgpu_dm_connector->pixel_clock_mhz =
8933 range->pixel_clock_mhz * 10;
8937 if (amdgpu_dm_connector->max_vfreq -
8938 amdgpu_dm_connector->min_vfreq > 10) {
8940 freesync_capable = true;
8946 dm_con_state->freesync_capable = freesync_capable;
8948 if (connector->vrr_capable_property)
8949 drm_connector_set_vrr_capable_property(connector,
8953 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8955 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8957 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8959 if (link->type == dc_connection_none)
8961 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8962 dpcd_data, sizeof(dpcd_data))) {
8963 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8965 if (dpcd_data[0] == 0) {
8966 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8967 link->psr_settings.psr_feature_enabled = false;
8969 link->psr_settings.psr_version = DC_PSR_VERSION_1;
8970 link->psr_settings.psr_feature_enabled = true;
8973 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8978 * amdgpu_dm_link_setup_psr() - configure psr link
8979 * @stream: stream state
8981 * Return: true if success
8983 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8985 struct dc_link *link = NULL;
8986 struct psr_config psr_config = {0};
8987 struct psr_context psr_context = {0};
8993 link = stream->link;
8995 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8997 if (psr_config.psr_version > 0) {
8998 psr_config.psr_exit_link_training_required = 0x1;
8999 psr_config.psr_frame_capture_indication_req = 0;
9000 psr_config.psr_rfb_setup_time = 0x37;
9001 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9002 psr_config.allow_smu_optimizations = 0x0;
9004 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9007 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9013 * amdgpu_dm_psr_enable() - enable psr f/w
9014 * @stream: stream state
9016 * Return: true if success
9018 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9020 struct dc_link *link = stream->link;
9021 unsigned int vsync_rate_hz = 0;
9022 struct dc_static_screen_params params = {0};
9023 /* Calculate number of static frames before generating interrupt to
9026 // Init fail safe of 2 frames static
9027 unsigned int num_frames_static = 2;
9029 DRM_DEBUG_DRIVER("Enabling psr...\n");
9031 vsync_rate_hz = div64_u64(div64_u64((
9032 stream->timing.pix_clk_100hz * 100),
9033 stream->timing.v_total),
9034 stream->timing.h_total);
9037 * Calculate number of frames such that at least 30 ms of time has
9040 if (vsync_rate_hz != 0) {
9041 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9042 num_frames_static = (30000 / frame_time_microsec) + 1;
9045 params.triggers.cursor_update = true;
9046 params.triggers.overlay_update = true;
9047 params.triggers.surface_update = true;
9048 params.num_frames = num_frames_static;
9050 dc_stream_set_static_screen_params(link->ctx->dc,
9054 return dc_link_set_psr_allow_active(link, true, false);
9058 * amdgpu_dm_psr_disable() - disable psr f/w
9059 * @stream: stream state
9061 * Return: true if success
9063 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9066 DRM_DEBUG_DRIVER("Disabling psr...\n");
9068 return dc_link_set_psr_allow_active(stream->link, false, true);
9072 * amdgpu_dm_psr_disable() - disable psr f/w
9073 * if psr is enabled on any stream
9075 * Return: true if success
9077 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9079 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9080 return dc_set_psr_allow_active(dm->dc, false);
9083 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9085 struct amdgpu_device *adev = drm_to_adev(dev);
9086 struct dc *dc = adev->dm.dc;
9089 mutex_lock(&adev->dm.dc_lock);
9090 if (dc->current_state) {
9091 for (i = 0; i < dc->current_state->stream_count; ++i)
9092 dc->current_state->streams[i]
9093 ->triggered_crtc_reset.enabled =
9094 adev->dm.force_timing_sync;
9096 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9097 dc_trigger_sync(dc, dc->current_state);
9099 mutex_unlock(&adev->dm.dc_lock);