2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
103 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
104 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
112 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
116 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
133 * requests into DC requests, and DC responses into DRM responses.
135 * The root control structure is &struct amdgpu_display_manager.
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 switch (link->dpcd_caps.dongle_type) {
145 case DISPLAY_DONGLE_NONE:
146 return DRM_MODE_SUBCONNECTOR_Native;
147 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
148 return DRM_MODE_SUBCONNECTOR_VGA;
149 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
150 case DISPLAY_DONGLE_DP_DVI_DONGLE:
151 return DRM_MODE_SUBCONNECTOR_DVID;
152 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
153 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_HDMIA;
155 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_Unknown;
161 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 struct dc_link *link = aconnector->dc_link;
164 struct drm_connector *connector = &aconnector->base;
165 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 if (aconnector->dc_sink)
171 subconnector = get_subconnector_type(link);
173 drm_object_property_set_value(&connector->base,
174 connector->dev->mode_config.dp_subconnector_property,
179 * initializes drm_device display related structures, based on the information
180 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
181 * drm_encoder, drm_mode_config
183 * Returns 0 on success
185 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
186 /* removes and deallocates the drm structures, created by the above function */
187 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
190 struct drm_plane *plane,
191 unsigned long possible_crtcs,
192 const struct dc_plane_cap *plane_cap);
193 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
194 struct drm_plane *plane,
195 uint32_t link_index);
196 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
197 struct amdgpu_dm_connector *amdgpu_dm_connector,
199 struct amdgpu_encoder *amdgpu_encoder);
200 static int amdgpu_dm_encoder_init(struct drm_device *dev,
201 struct amdgpu_encoder *aencoder,
202 uint32_t link_index);
204 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
207 struct drm_atomic_state *state,
210 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
212 static int amdgpu_dm_atomic_check(struct drm_device *dev,
213 struct drm_atomic_state *state);
215 static void handle_cursor_update(struct drm_plane *plane,
216 struct drm_plane_state *old_plane_state);
218 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
219 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
220 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
221 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
222 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
225 * dm_vblank_get_counter
228 * Get counter for number of vertical blanks
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
235 * Counter for vertical blanks
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 if (crtc >= adev->mode_info.num_crtc)
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244 if (acrtc->dm_irq_params.stream == NULL) {
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 u32 *vbl, u32 *position)
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264 if (acrtc->dm_irq_params.stream == NULL) {
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
287 static bool dm_is_idle(void *handle)
293 static int dm_wait_for_idle(void *handle)
299 static bool dm_check_soft_reset(void *handle)
304 static int dm_soft_reset(void *handle)
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 struct drm_device *dev = adev_to_drm(adev);
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
318 if (otg_inst == -1) {
320 return adev->mode_info.crtcs[0];
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
326 if (amdgpu_crtc->otg_inst == otg_inst)
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
348 * dm_pflip_high_irq() - Handle pageflip interrupt
349 * @interrupt_params: ignored
351 * Handles the pageflip interrupt by notifying all interested parties
352 * that the pageflip has been completed.
354 static void dm_pflip_high_irq(void *interrupt_params)
356 struct amdgpu_crtc *amdgpu_crtc;
357 struct common_irq_params *irq_params = interrupt_params;
358 struct amdgpu_device *adev = irq_params->adev;
360 struct drm_pending_vblank_event *e;
361 uint32_t vpos, hpos, v_blank_start, v_blank_end;
364 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
366 /* IRQ could occur when in initial stage */
367 /* TODO work and BO cleanup */
368 if (amdgpu_crtc == NULL) {
369 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
373 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
375 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
376 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
377 amdgpu_crtc->pflip_status,
378 AMDGPU_FLIP_SUBMITTED,
379 amdgpu_crtc->crtc_id,
381 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
385 /* page flip completed. */
386 e = amdgpu_crtc->event;
387 amdgpu_crtc->event = NULL;
392 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
394 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
396 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
397 &v_blank_end, &hpos, &vpos) ||
398 (vpos < v_blank_start)) {
399 /* Update to correct count and vblank timestamp if racing with
400 * vblank irq. This also updates to the correct vblank timestamp
401 * even in VRR mode, as scanout is past the front-porch atm.
403 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
405 /* Wake up userspace by sending the pageflip event with proper
406 * count and timestamp of vblank of flip completion.
409 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
411 /* Event sent, so done with vblank for this flip */
412 drm_crtc_vblank_put(&amdgpu_crtc->base);
415 /* VRR active and inside front-porch: vblank count and
416 * timestamp for pageflip event will only be up to date after
417 * drm_crtc_handle_vblank() has been executed from late vblank
418 * irq handler after start of back-porch (vline 0). We queue the
419 * pageflip event for send-out by drm_crtc_handle_vblank() with
420 * updated timestamp and count, once it runs after us.
422 * We need to open-code this instead of using the helper
423 * drm_crtc_arm_vblank_event(), as that helper would
424 * call drm_crtc_accurate_vblank_count(), which we must
425 * not call in VRR mode while we are in front-porch!
428 /* sequence will be replaced by real count during send-out. */
429 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
430 e->pipe = amdgpu_crtc->crtc_id;
432 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
436 /* Keep track of vblank of this flip for flip throttling. We use the
437 * cooked hw counter, as that one incremented at start of this vblank
438 * of pageflip completion, so last_flip_vblank is the forbidden count
439 * for queueing new pageflips if vsync + VRR is enabled.
441 amdgpu_crtc->dm_irq_params.last_flip_vblank =
442 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
444 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
445 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
447 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
448 amdgpu_crtc->crtc_id, amdgpu_crtc,
449 vrr_active, (int) !e);
452 static void dm_vupdate_high_irq(void *interrupt_params)
454 struct common_irq_params *irq_params = interrupt_params;
455 struct amdgpu_device *adev = irq_params->adev;
456 struct amdgpu_crtc *acrtc;
460 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
463 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
465 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
469 /* Core vblank handling is done here after end of front-porch in
470 * vrr mode, as vblank timestamping will give valid results
471 * while now done after front-porch. This will also deliver
472 * page-flip completion events that have been queued to us
473 * if a pageflip happened inside front-porch.
476 drm_crtc_handle_vblank(&acrtc->base);
478 /* BTR processing for pre-DCE12 ASICs */
479 if (acrtc->dm_irq_params.stream &&
480 adev->family < AMDGPU_FAMILY_AI) {
481 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
482 mod_freesync_handle_v_update(
483 adev->dm.freesync_module,
484 acrtc->dm_irq_params.stream,
485 &acrtc->dm_irq_params.vrr_params);
487 dc_stream_adjust_vmin_vmax(
489 acrtc->dm_irq_params.stream,
490 &acrtc->dm_irq_params.vrr_params.adjust);
491 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
498 * dm_crtc_high_irq() - Handles CRTC interrupt
499 * @interrupt_params: used for determining the CRTC instance
501 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
504 static void dm_crtc_high_irq(void *interrupt_params)
506 struct common_irq_params *irq_params = interrupt_params;
507 struct amdgpu_device *adev = irq_params->adev;
508 struct amdgpu_crtc *acrtc;
512 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
516 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
518 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
519 vrr_active, acrtc->dm_irq_params.active_planes);
522 * Core vblank handling at start of front-porch is only possible
523 * in non-vrr mode, as only there vblank timestamping will give
524 * valid results while done in front-porch. Otherwise defer it
525 * to dm_vupdate_high_irq after end of front-porch.
528 drm_crtc_handle_vblank(&acrtc->base);
531 * Following stuff must happen at start of vblank, for crc
532 * computation and below-the-range btr support in vrr mode.
534 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
536 /* BTR updates need to happen before VUPDATE on Vega and above. */
537 if (adev->family < AMDGPU_FAMILY_AI)
540 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
542 if (acrtc->dm_irq_params.stream &&
543 acrtc->dm_irq_params.vrr_params.supported &&
544 acrtc->dm_irq_params.freesync_config.state ==
545 VRR_STATE_ACTIVE_VARIABLE) {
546 mod_freesync_handle_v_update(adev->dm.freesync_module,
547 acrtc->dm_irq_params.stream,
548 &acrtc->dm_irq_params.vrr_params);
550 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
551 &acrtc->dm_irq_params.vrr_params.adjust);
555 * If there aren't any active_planes then DCH HUBP may be clock-gated.
556 * In that case, pageflip completion interrupts won't fire and pageflip
557 * completion events won't get delivered. Prevent this by sending
558 * pending pageflip events from here if a flip is still pending.
560 * If any planes are enabled, use dm_pflip_high_irq() instead, to
561 * avoid race conditions between flip programming and completion,
562 * which could cause too early flip completion events.
564 if (adev->family >= AMDGPU_FAMILY_RV &&
565 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
566 acrtc->dm_irq_params.active_planes == 0) {
568 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
570 drm_crtc_vblank_put(&acrtc->base);
572 acrtc->pflip_status = AMDGPU_FLIP_NONE;
575 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
578 static int dm_set_clockgating_state(void *handle,
579 enum amd_clockgating_state state)
584 static int dm_set_powergating_state(void *handle,
585 enum amd_powergating_state state)
590 /* Prototypes of private functions */
591 static int dm_early_init(void* handle);
593 /* Allocate memory for FBC compressed data */
594 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
596 struct drm_device *dev = connector->dev;
597 struct amdgpu_device *adev = drm_to_adev(dev);
598 struct dm_comressor_info *compressor = &adev->dm.compressor;
599 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
600 struct drm_display_mode *mode;
601 unsigned long max_size = 0;
603 if (adev->dm.dc->fbc_compressor == NULL)
606 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
609 if (compressor->bo_ptr)
613 list_for_each_entry(mode, &connector->modes, head) {
614 if (max_size < mode->htotal * mode->vtotal)
615 max_size = mode->htotal * mode->vtotal;
619 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
620 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
621 &compressor->gpu_addr, &compressor->cpu_addr);
624 DRM_ERROR("DM: Failed to initialize FBC\n");
626 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
627 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
634 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
635 int pipe, bool *enabled,
636 unsigned char *buf, int max_bytes)
638 struct drm_device *dev = dev_get_drvdata(kdev);
639 struct amdgpu_device *adev = drm_to_adev(dev);
640 struct drm_connector *connector;
641 struct drm_connector_list_iter conn_iter;
642 struct amdgpu_dm_connector *aconnector;
647 mutex_lock(&adev->dm.audio_lock);
649 drm_connector_list_iter_begin(dev, &conn_iter);
650 drm_for_each_connector_iter(connector, &conn_iter) {
651 aconnector = to_amdgpu_dm_connector(connector);
652 if (aconnector->audio_inst != port)
656 ret = drm_eld_size(connector->eld);
657 memcpy(buf, connector->eld, min(max_bytes, ret));
661 drm_connector_list_iter_end(&conn_iter);
663 mutex_unlock(&adev->dm.audio_lock);
665 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
670 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
671 .get_eld = amdgpu_dm_audio_component_get_eld,
674 static int amdgpu_dm_audio_component_bind(struct device *kdev,
675 struct device *hda_kdev, void *data)
677 struct drm_device *dev = dev_get_drvdata(kdev);
678 struct amdgpu_device *adev = drm_to_adev(dev);
679 struct drm_audio_component *acomp = data;
681 acomp->ops = &amdgpu_dm_audio_component_ops;
683 adev->dm.audio_component = acomp;
688 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
689 struct device *hda_kdev, void *data)
691 struct drm_device *dev = dev_get_drvdata(kdev);
692 struct amdgpu_device *adev = drm_to_adev(dev);
693 struct drm_audio_component *acomp = data;
697 adev->dm.audio_component = NULL;
700 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
701 .bind = amdgpu_dm_audio_component_bind,
702 .unbind = amdgpu_dm_audio_component_unbind,
705 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
712 adev->mode_info.audio.enabled = true;
714 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
716 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
717 adev->mode_info.audio.pin[i].channels = -1;
718 adev->mode_info.audio.pin[i].rate = -1;
719 adev->mode_info.audio.pin[i].bits_per_sample = -1;
720 adev->mode_info.audio.pin[i].status_bits = 0;
721 adev->mode_info.audio.pin[i].category_code = 0;
722 adev->mode_info.audio.pin[i].connected = false;
723 adev->mode_info.audio.pin[i].id =
724 adev->dm.dc->res_pool->audios[i]->inst;
725 adev->mode_info.audio.pin[i].offset = 0;
728 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
732 adev->dm.audio_registered = true;
737 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
742 if (!adev->mode_info.audio.enabled)
745 if (adev->dm.audio_registered) {
746 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
747 adev->dm.audio_registered = false;
750 /* TODO: Disable audio? */
752 adev->mode_info.audio.enabled = false;
755 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
757 struct drm_audio_component *acomp = adev->dm.audio_component;
759 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
760 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
762 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
767 static int dm_dmub_hw_init(struct amdgpu_device *adev)
769 const struct dmcub_firmware_header_v1_0 *hdr;
770 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
771 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
772 const struct firmware *dmub_fw = adev->dm.dmub_fw;
773 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
774 struct abm *abm = adev->dm.dc->res_pool->abm;
775 struct dmub_srv_hw_params hw_params;
776 enum dmub_status status;
777 const unsigned char *fw_inst_const, *fw_bss_data;
778 uint32_t i, fw_inst_const_size, fw_bss_data_size;
782 /* DMUB isn't supported on the ASIC. */
786 DRM_ERROR("No framebuffer info for DMUB service.\n");
791 /* Firmware required for DMUB support. */
792 DRM_ERROR("No firmware provided for DMUB.\n");
796 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
797 if (status != DMUB_STATUS_OK) {
798 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
802 if (!has_hw_support) {
803 DRM_INFO("DMUB unsupported on ASIC\n");
807 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
809 fw_inst_const = dmub_fw->data +
810 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
813 fw_bss_data = dmub_fw->data +
814 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
815 le32_to_cpu(hdr->inst_const_bytes);
817 /* Copy firmware and bios info into FB memory. */
818 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
819 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
821 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
823 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
824 * amdgpu_ucode_init_single_fw will load dmub firmware
825 * fw_inst_const part to cw0; otherwise, the firmware back door load
826 * will be done by dm_dmub_hw_init
828 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
829 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
833 if (fw_bss_data_size)
834 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
835 fw_bss_data, fw_bss_data_size);
837 /* Copy firmware bios info into FB memory. */
838 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
841 /* Reset regions that need to be reset. */
842 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
843 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
845 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
846 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
848 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
849 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
851 /* Initialize hardware. */
852 memset(&hw_params, 0, sizeof(hw_params));
853 hw_params.fb_base = adev->gmc.fb_start;
854 hw_params.fb_offset = adev->gmc.aper_base;
856 /* backdoor load firmware and trigger dmub running */
857 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
858 hw_params.load_inst_const = true;
861 hw_params.psp_version = dmcu->psp_version;
863 for (i = 0; i < fb_info->num_fb; ++i)
864 hw_params.fb[i] = &fb_info->fb[i];
866 status = dmub_srv_hw_init(dmub_srv, &hw_params);
867 if (status != DMUB_STATUS_OK) {
868 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
872 /* Wait for firmware load to finish. */
873 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
874 if (status != DMUB_STATUS_OK)
875 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
877 /* Init DMCU and ABM if available. */
879 dmcu->funcs->dmcu_init(dmcu);
880 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
883 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
884 if (!adev->dm.dc->ctx->dmub_srv) {
885 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
889 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
890 adev->dm.dmcub_fw_version);
895 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
896 struct drm_atomic_state *state)
898 struct drm_connector *connector;
899 struct drm_crtc *crtc;
900 struct amdgpu_dm_connector *amdgpu_dm_connector;
901 struct drm_connector_state *conn_state;
902 struct dm_crtc_state *acrtc_state;
903 struct drm_crtc_state *crtc_state;
904 struct dc_stream_state *stream;
905 struct drm_device *dev = adev_to_drm(adev);
907 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
909 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
910 conn_state = connector->state;
912 if (!(conn_state && conn_state->crtc))
915 crtc = conn_state->crtc;
916 acrtc_state = to_dm_crtc_state(crtc->state);
918 if (!(acrtc_state && acrtc_state->stream))
921 stream = acrtc_state->stream;
923 if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
924 amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
925 amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
926 amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
927 conn_state = drm_atomic_get_connector_state(state, connector);
928 crtc_state = drm_atomic_get_crtc_state(state, crtc);
929 crtc_state->mode_changed = true;
934 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
937 uint32_t logical_addr_low;
938 uint32_t logical_addr_high;
939 uint32_t agp_base, agp_bot, agp_top;
940 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
942 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
943 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
945 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
947 * Raven2 has a HW issue that it is unable to use the vram which
948 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
949 * workaround that increase system aperture high address (add 1)
950 * to get rid of the VM fault and hardware hang.
952 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
954 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
957 agp_bot = adev->gmc.agp_start >> 24;
958 agp_top = adev->gmc.agp_end >> 24;
961 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
962 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
963 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
964 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
965 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
966 page_table_base.low_part = lower_32_bits(pt_base);
968 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
969 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
971 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
972 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
973 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
975 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
976 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
977 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
979 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
980 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
981 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
983 pa_config->is_hvm_enabled = 0;
988 static int amdgpu_dm_init(struct amdgpu_device *adev)
990 struct dc_init_data init_data;
991 #ifdef CONFIG_DRM_AMD_DC_HDCP
992 struct dc_callback_init init_params;
994 struct dc_phy_addr_space_config pa_config;
997 adev->dm.ddev = adev_to_drm(adev);
998 adev->dm.adev = adev;
1000 /* Zero all the fields */
1001 memset(&init_data, 0, sizeof(init_data));
1002 #ifdef CONFIG_DRM_AMD_DC_HDCP
1003 memset(&init_params, 0, sizeof(init_params));
1006 mutex_init(&adev->dm.dc_lock);
1007 mutex_init(&adev->dm.audio_lock);
1009 if(amdgpu_dm_irq_init(adev)) {
1010 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1014 init_data.asic_id.chip_family = adev->family;
1016 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1017 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1019 init_data.asic_id.vram_width = adev->gmc.vram_width;
1020 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1021 init_data.asic_id.atombios_base_address =
1022 adev->mode_info.atom_context->bios;
1024 init_data.driver = adev;
1026 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1028 if (!adev->dm.cgs_device) {
1029 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1033 init_data.cgs_device = adev->dm.cgs_device;
1035 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1037 switch (adev->asic_type) {
1042 init_data.flags.gpu_vm_support = true;
1043 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1044 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1045 init_data.flags.disable_dmcu = true;
1052 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053 init_data.flags.fbc_support = true;
1055 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056 init_data.flags.multi_mon_pp_mclk_switch = true;
1058 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059 init_data.flags.disable_fractional_pwm = true;
1061 init_data.flags.power_down_display_on_boot = true;
1063 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1065 /* Display Core create. */
1066 adev->dm.dc = dc_create(&init_data);
1069 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1071 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1075 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1076 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1077 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1080 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1081 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1083 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1084 adev->dm.dc->debug.disable_stutter = true;
1086 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1087 adev->dm.dc->debug.disable_dsc = true;
1089 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1090 adev->dm.dc->debug.disable_clock_gate = true;
1092 r = dm_dmub_hw_init(adev);
1094 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1098 dc_hardware_init(adev->dm.dc);
1100 #if defined(CONFIG_DRM_AMD_DC_DCN)
1101 if (adev->asic_type == CHIP_RENOIR) {
1102 mmhub_read_system_context(adev, &pa_config);
1104 // Call the DC init_memory func
1105 dc_setup_system_context(adev->dm.dc, &pa_config);
1109 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110 if (!adev->dm.freesync_module) {
1112 "amdgpu: failed to initialize freesync_module.\n");
1114 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115 adev->dm.freesync_module);
1117 amdgpu_dm_init_color_mod();
1119 #ifdef CONFIG_DRM_AMD_DC_HDCP
1120 if (adev->asic_type >= CHIP_RAVEN) {
1121 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1123 if (!adev->dm.hdcp_workqueue)
1124 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1126 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1128 dc_init_callbacks(adev->dm.dc, &init_params);
1131 if (amdgpu_dm_initialize_drm_device(adev)) {
1133 "amdgpu: failed to initialize sw for display support.\n");
1137 /* Update the actual used number of crtc */
1138 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1140 /* create fake encoders for MST */
1141 dm_dp_create_fake_mst_encoders(adev);
1143 /* TODO: Add_display_info? */
1145 /* TODO use dynamic cursor width */
1146 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1147 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1149 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1151 "amdgpu: failed to initialize sw for display support.\n");
1156 DRM_DEBUG_DRIVER("KMS initialized.\n");
1160 amdgpu_dm_fini(adev);
1165 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1169 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1170 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1173 amdgpu_dm_audio_fini(adev);
1175 amdgpu_dm_destroy_drm_device(&adev->dm);
1177 #ifdef CONFIG_DRM_AMD_DC_HDCP
1178 if (adev->dm.hdcp_workqueue) {
1179 hdcp_destroy(adev->dm.hdcp_workqueue);
1180 adev->dm.hdcp_workqueue = NULL;
1184 dc_deinit_callbacks(adev->dm.dc);
1186 if (adev->dm.dc->ctx->dmub_srv) {
1187 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1188 adev->dm.dc->ctx->dmub_srv = NULL;
1191 if (adev->dm.dmub_bo)
1192 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1193 &adev->dm.dmub_bo_gpu_addr,
1194 &adev->dm.dmub_bo_cpu_addr);
1196 /* DC Destroy TODO: Replace destroy DAL */
1198 dc_destroy(&adev->dm.dc);
1200 * TODO: pageflip, vlank interrupt
1202 * amdgpu_dm_irq_fini(adev);
1205 if (adev->dm.cgs_device) {
1206 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1207 adev->dm.cgs_device = NULL;
1209 if (adev->dm.freesync_module) {
1210 mod_freesync_destroy(adev->dm.freesync_module);
1211 adev->dm.freesync_module = NULL;
1214 mutex_destroy(&adev->dm.audio_lock);
1215 mutex_destroy(&adev->dm.dc_lock);
1220 static int load_dmcu_fw(struct amdgpu_device *adev)
1222 const char *fw_name_dmcu = NULL;
1224 const struct dmcu_firmware_header_v1_0 *hdr;
1226 switch(adev->asic_type) {
1227 #if defined(CONFIG_DRM_AMD_DC_SI)
1242 case CHIP_POLARIS11:
1243 case CHIP_POLARIS10:
1244 case CHIP_POLARIS12:
1252 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1253 case CHIP_SIENNA_CICHLID:
1254 case CHIP_NAVY_FLOUNDER:
1256 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1257 case CHIP_DIMGREY_CAVEFISH:
1259 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1264 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1267 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1268 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1269 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1270 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1275 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1279 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1280 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1284 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1286 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1287 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1288 adev->dm.fw_dmcu = NULL;
1292 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1297 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1299 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1301 release_firmware(adev->dm.fw_dmcu);
1302 adev->dm.fw_dmcu = NULL;
1306 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1307 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1308 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1309 adev->firmware.fw_size +=
1310 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1312 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1313 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1314 adev->firmware.fw_size +=
1315 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1317 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1319 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1324 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1326 struct amdgpu_device *adev = ctx;
1328 return dm_read_reg(adev->dm.dc->ctx, address);
1331 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1334 struct amdgpu_device *adev = ctx;
1336 return dm_write_reg(adev->dm.dc->ctx, address, value);
1339 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1341 struct dmub_srv_create_params create_params;
1342 struct dmub_srv_region_params region_params;
1343 struct dmub_srv_region_info region_info;
1344 struct dmub_srv_fb_params fb_params;
1345 struct dmub_srv_fb_info *fb_info;
1346 struct dmub_srv *dmub_srv;
1347 const struct dmcub_firmware_header_v1_0 *hdr;
1348 const char *fw_name_dmub;
1349 enum dmub_asic dmub_asic;
1350 enum dmub_status status;
1353 switch (adev->asic_type) {
1355 dmub_asic = DMUB_ASIC_DCN21;
1356 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1357 #if defined(CONFIG_DRM_AMD_DC_GREEN_SARDINE)
1358 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1359 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1362 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1363 case CHIP_SIENNA_CICHLID:
1364 dmub_asic = DMUB_ASIC_DCN30;
1365 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1367 case CHIP_NAVY_FLOUNDER:
1368 dmub_asic = DMUB_ASIC_DCN30;
1369 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1372 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
1374 dmub_asic = DMUB_ASIC_DCN301;
1375 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1378 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
1379 case CHIP_DIMGREY_CAVEFISH:
1380 dmub_asic = DMUB_ASIC_DCN302;
1381 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1386 /* ASIC doesn't support DMUB. */
1390 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1392 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1396 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1398 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1402 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1404 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1405 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1406 AMDGPU_UCODE_ID_DMCUB;
1407 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1409 adev->firmware.fw_size +=
1410 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1412 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1413 adev->dm.dmcub_fw_version);
1416 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1418 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1419 dmub_srv = adev->dm.dmub_srv;
1422 DRM_ERROR("Failed to allocate DMUB service!\n");
1426 memset(&create_params, 0, sizeof(create_params));
1427 create_params.user_ctx = adev;
1428 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1429 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1430 create_params.asic = dmub_asic;
1432 /* Create the DMUB service. */
1433 status = dmub_srv_create(dmub_srv, &create_params);
1434 if (status != DMUB_STATUS_OK) {
1435 DRM_ERROR("Error creating DMUB service: %d\n", status);
1439 /* Calculate the size of all the regions for the DMUB service. */
1440 memset(®ion_params, 0, sizeof(region_params));
1442 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1443 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1444 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1445 region_params.vbios_size = adev->bios_size;
1446 region_params.fw_bss_data = region_params.bss_data_size ?
1447 adev->dm.dmub_fw->data +
1448 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1449 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1450 region_params.fw_inst_const =
1451 adev->dm.dmub_fw->data +
1452 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1455 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1458 if (status != DMUB_STATUS_OK) {
1459 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1464 * Allocate a framebuffer based on the total size of all the regions.
1465 * TODO: Move this into GART.
1467 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1468 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1469 &adev->dm.dmub_bo_gpu_addr,
1470 &adev->dm.dmub_bo_cpu_addr);
1474 /* Rebase the regions on the framebuffer address. */
1475 memset(&fb_params, 0, sizeof(fb_params));
1476 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1477 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1478 fb_params.region_info = ®ion_info;
1480 adev->dm.dmub_fb_info =
1481 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1482 fb_info = adev->dm.dmub_fb_info;
1486 "Failed to allocate framebuffer info for DMUB service!\n");
1490 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1491 if (status != DMUB_STATUS_OK) {
1492 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1499 static int dm_sw_init(void *handle)
1501 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1504 r = dm_dmub_sw_init(adev);
1508 return load_dmcu_fw(adev);
1511 static int dm_sw_fini(void *handle)
1513 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1515 kfree(adev->dm.dmub_fb_info);
1516 adev->dm.dmub_fb_info = NULL;
1518 if (adev->dm.dmub_srv) {
1519 dmub_srv_destroy(adev->dm.dmub_srv);
1520 adev->dm.dmub_srv = NULL;
1523 release_firmware(adev->dm.dmub_fw);
1524 adev->dm.dmub_fw = NULL;
1526 release_firmware(adev->dm.fw_dmcu);
1527 adev->dm.fw_dmcu = NULL;
1532 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1534 struct amdgpu_dm_connector *aconnector;
1535 struct drm_connector *connector;
1536 struct drm_connector_list_iter iter;
1539 drm_connector_list_iter_begin(dev, &iter);
1540 drm_for_each_connector_iter(connector, &iter) {
1541 aconnector = to_amdgpu_dm_connector(connector);
1542 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1543 aconnector->mst_mgr.aux) {
1544 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1546 aconnector->base.base.id);
1548 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1550 DRM_ERROR("DM_MST: Failed to start MST\n");
1551 aconnector->dc_link->type =
1552 dc_connection_single;
1557 drm_connector_list_iter_end(&iter);
1562 static int dm_late_init(void *handle)
1564 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1566 struct dmcu_iram_parameters params;
1567 unsigned int linear_lut[16];
1569 struct dmcu *dmcu = NULL;
1572 dmcu = adev->dm.dc->res_pool->dmcu;
1574 for (i = 0; i < 16; i++)
1575 linear_lut[i] = 0xFFFF * i / 15;
1578 params.backlight_ramping_start = 0xCCCC;
1579 params.backlight_ramping_reduction = 0xCCCCCCCC;
1580 params.backlight_lut_array_size = 16;
1581 params.backlight_lut_array = linear_lut;
1583 /* Min backlight level after ABM reduction, Don't allow below 1%
1584 * 0xFFFF x 0.01 = 0x28F
1586 params.min_abm_backlight = 0x28F;
1588 /* In the case where abm is implemented on dmcub,
1589 * dmcu object will be null.
1590 * ABM 2.4 and up are implemented on dmcub.
1593 ret = dmcu_load_iram(dmcu, params);
1594 else if (adev->dm.dc->ctx->dmub_srv)
1595 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1600 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1603 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1605 struct amdgpu_dm_connector *aconnector;
1606 struct drm_connector *connector;
1607 struct drm_connector_list_iter iter;
1608 struct drm_dp_mst_topology_mgr *mgr;
1610 bool need_hotplug = false;
1612 drm_connector_list_iter_begin(dev, &iter);
1613 drm_for_each_connector_iter(connector, &iter) {
1614 aconnector = to_amdgpu_dm_connector(connector);
1615 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1616 aconnector->mst_port)
1619 mgr = &aconnector->mst_mgr;
1622 drm_dp_mst_topology_mgr_suspend(mgr);
1624 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1626 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1627 need_hotplug = true;
1631 drm_connector_list_iter_end(&iter);
1634 drm_kms_helper_hotplug_event(dev);
1637 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1639 struct smu_context *smu = &adev->smu;
1642 if (!is_support_sw_smu(adev))
1645 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1646 * on window driver dc implementation.
1647 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1648 * should be passed to smu during boot up and resume from s3.
1649 * boot up: dc calculate dcn watermark clock settings within dc_create,
1650 * dcn20_resource_construct
1651 * then call pplib functions below to pass the settings to smu:
1652 * smu_set_watermarks_for_clock_ranges
1653 * smu_set_watermarks_table
1654 * navi10_set_watermarks_table
1655 * smu_write_watermarks_table
1657 * For Renoir, clock settings of dcn watermark are also fixed values.
1658 * dc has implemented different flow for window driver:
1659 * dc_hardware_init / dc_set_power_state
1664 * smu_set_watermarks_for_clock_ranges
1665 * renoir_set_watermarks_table
1666 * smu_write_watermarks_table
1669 * dc_hardware_init -> amdgpu_dm_init
1670 * dc_set_power_state --> dm_resume
1672 * therefore, this function apply to navi10/12/14 but not Renoir
1675 switch(adev->asic_type) {
1684 ret = smu_write_watermarks_table(smu);
1686 DRM_ERROR("Failed to update WMTABLE!\n");
1694 * dm_hw_init() - Initialize DC device
1695 * @handle: The base driver device containing the amdgpu_dm device.
1697 * Initialize the &struct amdgpu_display_manager device. This involves calling
1698 * the initializers of each DM component, then populating the struct with them.
1700 * Although the function implies hardware initialization, both hardware and
1701 * software are initialized here. Splitting them out to their relevant init
1702 * hooks is a future TODO item.
1704 * Some notable things that are initialized here:
1706 * - Display Core, both software and hardware
1707 * - DC modules that we need (freesync and color management)
1708 * - DRM software states
1709 * - Interrupt sources and handlers
1711 * - Debug FS entries, if enabled
1713 static int dm_hw_init(void *handle)
1715 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1716 /* Create DAL display manager */
1717 amdgpu_dm_init(adev);
1718 amdgpu_dm_hpd_init(adev);
1724 * dm_hw_fini() - Teardown DC device
1725 * @handle: The base driver device containing the amdgpu_dm device.
1727 * Teardown components within &struct amdgpu_display_manager that require
1728 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1729 * were loaded. Also flush IRQ workqueues and disable them.
1731 static int dm_hw_fini(void *handle)
1733 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1735 amdgpu_dm_hpd_fini(adev);
1737 amdgpu_dm_irq_fini(adev);
1738 amdgpu_dm_fini(adev);
1743 static int dm_enable_vblank(struct drm_crtc *crtc);
1744 static void dm_disable_vblank(struct drm_crtc *crtc);
1746 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1747 struct dc_state *state, bool enable)
1749 enum dc_irq_source irq_source;
1750 struct amdgpu_crtc *acrtc;
1754 for (i = 0; i < state->stream_count; i++) {
1755 acrtc = get_crtc_by_otg_inst(
1756 adev, state->stream_status[i].primary_otg_inst);
1758 if (acrtc && state->stream_status[i].plane_count != 0) {
1759 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1760 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1761 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1762 acrtc->crtc_id, enable ? "en" : "dis", rc);
1764 DRM_WARN("Failed to %s pflip interrupts\n",
1765 enable ? "enable" : "disable");
1768 rc = dm_enable_vblank(&acrtc->base);
1770 DRM_WARN("Failed to enable vblank interrupts\n");
1772 dm_disable_vblank(&acrtc->base);
1780 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1782 struct dc_state *context = NULL;
1783 enum dc_status res = DC_ERROR_UNEXPECTED;
1785 struct dc_stream_state *del_streams[MAX_PIPES];
1786 int del_streams_count = 0;
1788 memset(del_streams, 0, sizeof(del_streams));
1790 context = dc_create_state(dc);
1791 if (context == NULL)
1792 goto context_alloc_fail;
1794 dc_resource_state_copy_construct_current(dc, context);
1796 /* First remove from context all streams */
1797 for (i = 0; i < context->stream_count; i++) {
1798 struct dc_stream_state *stream = context->streams[i];
1800 del_streams[del_streams_count++] = stream;
1803 /* Remove all planes for removed streams and then remove the streams */
1804 for (i = 0; i < del_streams_count; i++) {
1805 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1806 res = DC_FAIL_DETACH_SURFACES;
1810 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1816 res = dc_validate_global_state(dc, context, false);
1819 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1823 res = dc_commit_state(dc, context);
1826 dc_release_state(context);
1832 static int dm_suspend(void *handle)
1834 struct amdgpu_device *adev = handle;
1835 struct amdgpu_display_manager *dm = &adev->dm;
1838 if (amdgpu_in_reset(adev)) {
1839 mutex_lock(&dm->dc_lock);
1840 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1842 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1844 amdgpu_dm_commit_zero_streams(dm->dc);
1846 amdgpu_dm_irq_suspend(adev);
1851 WARN_ON(adev->dm.cached_state);
1852 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1854 s3_handle_mst(adev_to_drm(adev), true);
1856 amdgpu_dm_irq_suspend(adev);
1859 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1864 static struct amdgpu_dm_connector *
1865 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1866 struct drm_crtc *crtc)
1869 struct drm_connector_state *new_con_state;
1870 struct drm_connector *connector;
1871 struct drm_crtc *crtc_from_state;
1873 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1874 crtc_from_state = new_con_state->crtc;
1876 if (crtc_from_state == crtc)
1877 return to_amdgpu_dm_connector(connector);
1883 static void emulated_link_detect(struct dc_link *link)
1885 struct dc_sink_init_data sink_init_data = { 0 };
1886 struct display_sink_capability sink_caps = { 0 };
1887 enum dc_edid_status edid_status;
1888 struct dc_context *dc_ctx = link->ctx;
1889 struct dc_sink *sink = NULL;
1890 struct dc_sink *prev_sink = NULL;
1892 link->type = dc_connection_none;
1893 prev_sink = link->local_sink;
1895 if (prev_sink != NULL)
1896 dc_sink_retain(prev_sink);
1898 switch (link->connector_signal) {
1899 case SIGNAL_TYPE_HDMI_TYPE_A: {
1900 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1901 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1905 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1906 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1907 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1911 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1912 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1913 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1917 case SIGNAL_TYPE_LVDS: {
1918 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1919 sink_caps.signal = SIGNAL_TYPE_LVDS;
1923 case SIGNAL_TYPE_EDP: {
1924 sink_caps.transaction_type =
1925 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1926 sink_caps.signal = SIGNAL_TYPE_EDP;
1930 case SIGNAL_TYPE_DISPLAY_PORT: {
1931 sink_caps.transaction_type =
1932 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1933 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1938 DC_ERROR("Invalid connector type! signal:%d\n",
1939 link->connector_signal);
1943 sink_init_data.link = link;
1944 sink_init_data.sink_signal = sink_caps.signal;
1946 sink = dc_sink_create(&sink_init_data);
1948 DC_ERROR("Failed to create sink!\n");
1952 /* dc_sink_create returns a new reference */
1953 link->local_sink = sink;
1955 edid_status = dm_helpers_read_local_edid(
1960 if (edid_status != EDID_OK)
1961 DC_ERROR("Failed to read EDID");
1965 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1966 struct amdgpu_display_manager *dm)
1969 struct dc_surface_update surface_updates[MAX_SURFACES];
1970 struct dc_plane_info plane_infos[MAX_SURFACES];
1971 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1972 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1973 struct dc_stream_update stream_update;
1977 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1980 dm_error("Failed to allocate update bundle\n");
1984 for (k = 0; k < dc_state->stream_count; k++) {
1985 bundle->stream_update.stream = dc_state->streams[k];
1987 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1988 bundle->surface_updates[m].surface =
1989 dc_state->stream_status->plane_states[m];
1990 bundle->surface_updates[m].surface->force_full_update =
1993 dc_commit_updates_for_stream(
1994 dm->dc, bundle->surface_updates,
1995 dc_state->stream_status->plane_count,
1996 dc_state->streams[k], &bundle->stream_update, dc_state);
2005 static int dm_resume(void *handle)
2007 struct amdgpu_device *adev = handle;
2008 struct drm_device *ddev = adev_to_drm(adev);
2009 struct amdgpu_display_manager *dm = &adev->dm;
2010 struct amdgpu_dm_connector *aconnector;
2011 struct drm_connector *connector;
2012 struct drm_connector_list_iter iter;
2013 struct drm_crtc *crtc;
2014 struct drm_crtc_state *new_crtc_state;
2015 struct dm_crtc_state *dm_new_crtc_state;
2016 struct drm_plane *plane;
2017 struct drm_plane_state *new_plane_state;
2018 struct dm_plane_state *dm_new_plane_state;
2019 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2020 enum dc_connection_type new_connection_type = dc_connection_none;
2021 struct dc_state *dc_state;
2024 if (amdgpu_in_reset(adev)) {
2025 dc_state = dm->cached_dc_state;
2027 r = dm_dmub_hw_init(adev);
2029 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2031 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2034 amdgpu_dm_irq_resume_early(adev);
2036 for (i = 0; i < dc_state->stream_count; i++) {
2037 dc_state->streams[i]->mode_changed = true;
2038 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2039 dc_state->stream_status->plane_states[j]->update_flags.raw
2044 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2046 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2048 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2050 dc_release_state(dm->cached_dc_state);
2051 dm->cached_dc_state = NULL;
2053 amdgpu_dm_irq_resume_late(adev);
2055 mutex_unlock(&dm->dc_lock);
2059 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2060 dc_release_state(dm_state->context);
2061 dm_state->context = dc_create_state(dm->dc);
2062 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2063 dc_resource_state_construct(dm->dc, dm_state->context);
2065 /* Before powering on DC we need to re-initialize DMUB. */
2066 r = dm_dmub_hw_init(adev);
2068 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2070 /* power on hardware */
2071 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2073 /* program HPD filter */
2077 * early enable HPD Rx IRQ, should be done before set mode as short
2078 * pulse interrupts are used for MST
2080 amdgpu_dm_irq_resume_early(adev);
2082 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2083 s3_handle_mst(ddev, false);
2086 drm_connector_list_iter_begin(ddev, &iter);
2087 drm_for_each_connector_iter(connector, &iter) {
2088 aconnector = to_amdgpu_dm_connector(connector);
2091 * this is the case when traversing through already created
2092 * MST connectors, should be skipped
2094 if (aconnector->mst_port)
2097 mutex_lock(&aconnector->hpd_lock);
2098 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2099 DRM_ERROR("KMS: Failed to detect connector\n");
2101 if (aconnector->base.force && new_connection_type == dc_connection_none)
2102 emulated_link_detect(aconnector->dc_link);
2104 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2106 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2107 aconnector->fake_enable = false;
2109 if (aconnector->dc_sink)
2110 dc_sink_release(aconnector->dc_sink);
2111 aconnector->dc_sink = NULL;
2112 amdgpu_dm_update_connector_after_detect(aconnector);
2113 mutex_unlock(&aconnector->hpd_lock);
2115 drm_connector_list_iter_end(&iter);
2117 /* Force mode set in atomic commit */
2118 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2119 new_crtc_state->active_changed = true;
2122 * atomic_check is expected to create the dc states. We need to release
2123 * them here, since they were duplicated as part of the suspend
2126 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2127 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2128 if (dm_new_crtc_state->stream) {
2129 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2130 dc_stream_release(dm_new_crtc_state->stream);
2131 dm_new_crtc_state->stream = NULL;
2135 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2136 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2137 if (dm_new_plane_state->dc_state) {
2138 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2139 dc_plane_state_release(dm_new_plane_state->dc_state);
2140 dm_new_plane_state->dc_state = NULL;
2144 drm_atomic_helper_resume(ddev, dm->cached_state);
2146 dm->cached_state = NULL;
2148 amdgpu_dm_irq_resume_late(adev);
2150 amdgpu_dm_smu_write_watermarks_table(adev);
2158 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2159 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2160 * the base driver's device list to be initialized and torn down accordingly.
2162 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2165 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2167 .early_init = dm_early_init,
2168 .late_init = dm_late_init,
2169 .sw_init = dm_sw_init,
2170 .sw_fini = dm_sw_fini,
2171 .hw_init = dm_hw_init,
2172 .hw_fini = dm_hw_fini,
2173 .suspend = dm_suspend,
2174 .resume = dm_resume,
2175 .is_idle = dm_is_idle,
2176 .wait_for_idle = dm_wait_for_idle,
2177 .check_soft_reset = dm_check_soft_reset,
2178 .soft_reset = dm_soft_reset,
2179 .set_clockgating_state = dm_set_clockgating_state,
2180 .set_powergating_state = dm_set_powergating_state,
2183 const struct amdgpu_ip_block_version dm_ip_block =
2185 .type = AMD_IP_BLOCK_TYPE_DCE,
2189 .funcs = &amdgpu_dm_funcs,
2199 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2200 .fb_create = amdgpu_display_user_framebuffer_create,
2201 .output_poll_changed = drm_fb_helper_output_poll_changed,
2202 .atomic_check = amdgpu_dm_atomic_check,
2203 .atomic_commit = amdgpu_dm_atomic_commit,
2206 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2207 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2210 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2212 u32 max_cll, min_cll, max, min, q, r;
2213 struct amdgpu_dm_backlight_caps *caps;
2214 struct amdgpu_display_manager *dm;
2215 struct drm_connector *conn_base;
2216 struct amdgpu_device *adev;
2217 struct dc_link *link = NULL;
2218 static const u8 pre_computed_values[] = {
2219 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2220 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2222 if (!aconnector || !aconnector->dc_link)
2225 link = aconnector->dc_link;
2226 if (link->connector_signal != SIGNAL_TYPE_EDP)
2229 conn_base = &aconnector->base;
2230 adev = drm_to_adev(conn_base->dev);
2232 caps = &dm->backlight_caps;
2233 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2234 caps->aux_support = false;
2235 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2236 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2238 if (caps->ext_caps->bits.oled == 1 ||
2239 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2240 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2241 caps->aux_support = true;
2243 /* From the specification (CTA-861-G), for calculating the maximum
2244 * luminance we need to use:
2245 * Luminance = 50*2**(CV/32)
2246 * Where CV is a one-byte value.
2247 * For calculating this expression we may need float point precision;
2248 * to avoid this complexity level, we take advantage that CV is divided
2249 * by a constant. From the Euclids division algorithm, we know that CV
2250 * can be written as: CV = 32*q + r. Next, we replace CV in the
2251 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2252 * need to pre-compute the value of r/32. For pre-computing the values
2253 * We just used the following Ruby line:
2254 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2255 * The results of the above expressions can be verified at
2256 * pre_computed_values.
2260 max = (1 << q) * pre_computed_values[r];
2262 // min luminance: maxLum * (CV/255)^2 / 100
2263 q = DIV_ROUND_CLOSEST(min_cll, 255);
2264 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2266 caps->aux_max_input_signal = max;
2267 caps->aux_min_input_signal = min;
2270 void amdgpu_dm_update_connector_after_detect(
2271 struct amdgpu_dm_connector *aconnector)
2273 struct drm_connector *connector = &aconnector->base;
2274 struct drm_device *dev = connector->dev;
2275 struct dc_sink *sink;
2277 /* MST handled by drm_mst framework */
2278 if (aconnector->mst_mgr.mst_state == true)
2281 sink = aconnector->dc_link->local_sink;
2283 dc_sink_retain(sink);
2286 * Edid mgmt connector gets first update only in mode_valid hook and then
2287 * the connector sink is set to either fake or physical sink depends on link status.
2288 * Skip if already done during boot.
2290 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2291 && aconnector->dc_em_sink) {
2294 * For S3 resume with headless use eml_sink to fake stream
2295 * because on resume connector->sink is set to NULL
2297 mutex_lock(&dev->mode_config.mutex);
2300 if (aconnector->dc_sink) {
2301 amdgpu_dm_update_freesync_caps(connector, NULL);
2303 * retain and release below are used to
2304 * bump up refcount for sink because the link doesn't point
2305 * to it anymore after disconnect, so on next crtc to connector
2306 * reshuffle by UMD we will get into unwanted dc_sink release
2308 dc_sink_release(aconnector->dc_sink);
2310 aconnector->dc_sink = sink;
2311 dc_sink_retain(aconnector->dc_sink);
2312 amdgpu_dm_update_freesync_caps(connector,
2315 amdgpu_dm_update_freesync_caps(connector, NULL);
2316 if (!aconnector->dc_sink) {
2317 aconnector->dc_sink = aconnector->dc_em_sink;
2318 dc_sink_retain(aconnector->dc_sink);
2322 mutex_unlock(&dev->mode_config.mutex);
2325 dc_sink_release(sink);
2330 * TODO: temporary guard to look for proper fix
2331 * if this sink is MST sink, we should not do anything
2333 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2334 dc_sink_release(sink);
2338 if (aconnector->dc_sink == sink) {
2340 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2343 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2344 aconnector->connector_id);
2346 dc_sink_release(sink);
2350 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2351 aconnector->connector_id, aconnector->dc_sink, sink);
2353 mutex_lock(&dev->mode_config.mutex);
2356 * 1. Update status of the drm connector
2357 * 2. Send an event and let userspace tell us what to do
2361 * TODO: check if we still need the S3 mode update workaround.
2362 * If yes, put it here.
2364 if (aconnector->dc_sink)
2365 amdgpu_dm_update_freesync_caps(connector, NULL);
2367 aconnector->dc_sink = sink;
2368 dc_sink_retain(aconnector->dc_sink);
2369 if (sink->dc_edid.length == 0) {
2370 aconnector->edid = NULL;
2371 if (aconnector->dc_link->aux_mode) {
2372 drm_dp_cec_unset_edid(
2373 &aconnector->dm_dp_aux.aux);
2377 (struct edid *)sink->dc_edid.raw_edid;
2379 drm_connector_update_edid_property(connector,
2381 drm_add_edid_modes(connector, aconnector->edid);
2383 if (aconnector->dc_link->aux_mode)
2384 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2388 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2389 update_connector_ext_caps(aconnector);
2391 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2392 amdgpu_dm_update_freesync_caps(connector, NULL);
2393 drm_connector_update_edid_property(connector, NULL);
2394 aconnector->num_modes = 0;
2395 dc_sink_release(aconnector->dc_sink);
2396 aconnector->dc_sink = NULL;
2397 aconnector->edid = NULL;
2398 #ifdef CONFIG_DRM_AMD_DC_HDCP
2399 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2400 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2401 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2405 mutex_unlock(&dev->mode_config.mutex);
2407 update_subconnector_property(aconnector);
2410 dc_sink_release(sink);
2413 static void handle_hpd_irq(void *param)
2415 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2416 struct drm_connector *connector = &aconnector->base;
2417 struct drm_device *dev = connector->dev;
2418 enum dc_connection_type new_connection_type = dc_connection_none;
2419 #ifdef CONFIG_DRM_AMD_DC_HDCP
2420 struct amdgpu_device *adev = drm_to_adev(dev);
2424 * In case of failure or MST no need to update connector status or notify the OS
2425 * since (for MST case) MST does this in its own context.
2427 mutex_lock(&aconnector->hpd_lock);
2429 #ifdef CONFIG_DRM_AMD_DC_HDCP
2430 if (adev->dm.hdcp_workqueue)
2431 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2433 if (aconnector->fake_enable)
2434 aconnector->fake_enable = false;
2436 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2437 DRM_ERROR("KMS: Failed to detect connector\n");
2439 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2440 emulated_link_detect(aconnector->dc_link);
2443 drm_modeset_lock_all(dev);
2444 dm_restore_drm_connector_state(dev, connector);
2445 drm_modeset_unlock_all(dev);
2447 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2448 drm_kms_helper_hotplug_event(dev);
2450 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2451 amdgpu_dm_update_connector_after_detect(aconnector);
2454 drm_modeset_lock_all(dev);
2455 dm_restore_drm_connector_state(dev, connector);
2456 drm_modeset_unlock_all(dev);
2458 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2459 drm_kms_helper_hotplug_event(dev);
2461 mutex_unlock(&aconnector->hpd_lock);
2465 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2467 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2469 bool new_irq_handled = false;
2471 int dpcd_bytes_to_read;
2473 const int max_process_count = 30;
2474 int process_count = 0;
2476 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2478 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2479 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2480 /* DPCD 0x200 - 0x201 for downstream IRQ */
2481 dpcd_addr = DP_SINK_COUNT;
2483 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2484 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2485 dpcd_addr = DP_SINK_COUNT_ESI;
2488 dret = drm_dp_dpcd_read(
2489 &aconnector->dm_dp_aux.aux,
2492 dpcd_bytes_to_read);
2494 while (dret == dpcd_bytes_to_read &&
2495 process_count < max_process_count) {
2501 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2502 /* handle HPD short pulse irq */
2503 if (aconnector->mst_mgr.mst_state)
2505 &aconnector->mst_mgr,
2509 if (new_irq_handled) {
2510 /* ACK at DPCD to notify down stream */
2511 const int ack_dpcd_bytes_to_write =
2512 dpcd_bytes_to_read - 1;
2514 for (retry = 0; retry < 3; retry++) {
2517 wret = drm_dp_dpcd_write(
2518 &aconnector->dm_dp_aux.aux,
2521 ack_dpcd_bytes_to_write);
2522 if (wret == ack_dpcd_bytes_to_write)
2526 /* check if there is new irq to be handled */
2527 dret = drm_dp_dpcd_read(
2528 &aconnector->dm_dp_aux.aux,
2531 dpcd_bytes_to_read);
2533 new_irq_handled = false;
2539 if (process_count == max_process_count)
2540 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2543 static void handle_hpd_rx_irq(void *param)
2545 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2546 struct drm_connector *connector = &aconnector->base;
2547 struct drm_device *dev = connector->dev;
2548 struct dc_link *dc_link = aconnector->dc_link;
2549 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2550 enum dc_connection_type new_connection_type = dc_connection_none;
2551 #ifdef CONFIG_DRM_AMD_DC_HDCP
2552 union hpd_irq_data hpd_irq_data;
2553 struct amdgpu_device *adev = drm_to_adev(dev);
2555 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2559 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2560 * conflict, after implement i2c helper, this mutex should be
2563 if (dc_link->type != dc_connection_mst_branch)
2564 mutex_lock(&aconnector->hpd_lock);
2567 #ifdef CONFIG_DRM_AMD_DC_HDCP
2568 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2570 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2572 !is_mst_root_connector) {
2573 /* Downstream Port status changed. */
2574 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2575 DRM_ERROR("KMS: Failed to detect connector\n");
2577 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2578 emulated_link_detect(dc_link);
2580 if (aconnector->fake_enable)
2581 aconnector->fake_enable = false;
2583 amdgpu_dm_update_connector_after_detect(aconnector);
2586 drm_modeset_lock_all(dev);
2587 dm_restore_drm_connector_state(dev, connector);
2588 drm_modeset_unlock_all(dev);
2590 drm_kms_helper_hotplug_event(dev);
2591 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2593 if (aconnector->fake_enable)
2594 aconnector->fake_enable = false;
2596 amdgpu_dm_update_connector_after_detect(aconnector);
2599 drm_modeset_lock_all(dev);
2600 dm_restore_drm_connector_state(dev, connector);
2601 drm_modeset_unlock_all(dev);
2603 drm_kms_helper_hotplug_event(dev);
2606 #ifdef CONFIG_DRM_AMD_DC_HDCP
2607 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2608 if (adev->dm.hdcp_workqueue)
2609 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2612 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2613 (dc_link->type == dc_connection_mst_branch))
2614 dm_handle_hpd_rx_irq(aconnector);
2616 if (dc_link->type != dc_connection_mst_branch) {
2617 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2618 mutex_unlock(&aconnector->hpd_lock);
2622 static void register_hpd_handlers(struct amdgpu_device *adev)
2624 struct drm_device *dev = adev_to_drm(adev);
2625 struct drm_connector *connector;
2626 struct amdgpu_dm_connector *aconnector;
2627 const struct dc_link *dc_link;
2628 struct dc_interrupt_params int_params = {0};
2630 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2631 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2633 list_for_each_entry(connector,
2634 &dev->mode_config.connector_list, head) {
2636 aconnector = to_amdgpu_dm_connector(connector);
2637 dc_link = aconnector->dc_link;
2639 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2640 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2641 int_params.irq_source = dc_link->irq_source_hpd;
2643 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2645 (void *) aconnector);
2648 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2650 /* Also register for DP short pulse (hpd_rx). */
2651 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2652 int_params.irq_source = dc_link->irq_source_hpd_rx;
2654 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2656 (void *) aconnector);
2661 #if defined(CONFIG_DRM_AMD_DC_SI)
2662 /* Register IRQ sources and initialize IRQ callbacks */
2663 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2665 struct dc *dc = adev->dm.dc;
2666 struct common_irq_params *c_irq_params;
2667 struct dc_interrupt_params int_params = {0};
2670 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2672 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2673 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2676 * Actions of amdgpu_irq_add_id():
2677 * 1. Register a set() function with base driver.
2678 * Base driver will call set() function to enable/disable an
2679 * interrupt in DC hardware.
2680 * 2. Register amdgpu_dm_irq_handler().
2681 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2682 * coming from DC hardware.
2683 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2684 * for acknowledging and handling. */
2686 /* Use VBLANK interrupt */
2687 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2688 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2690 DRM_ERROR("Failed to add crtc irq id!\n");
2694 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2695 int_params.irq_source =
2696 dc_interrupt_to_irq_source(dc, i+1 , 0);
2698 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2700 c_irq_params->adev = adev;
2701 c_irq_params->irq_src = int_params.irq_source;
2703 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2704 dm_crtc_high_irq, c_irq_params);
2707 /* Use GRPH_PFLIP interrupt */
2708 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2709 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2710 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2712 DRM_ERROR("Failed to add page flip irq id!\n");
2716 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2717 int_params.irq_source =
2718 dc_interrupt_to_irq_source(dc, i, 0);
2720 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2722 c_irq_params->adev = adev;
2723 c_irq_params->irq_src = int_params.irq_source;
2725 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2726 dm_pflip_high_irq, c_irq_params);
2731 r = amdgpu_irq_add_id(adev, client_id,
2732 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2734 DRM_ERROR("Failed to add hpd irq id!\n");
2738 register_hpd_handlers(adev);
2744 /* Register IRQ sources and initialize IRQ callbacks */
2745 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2747 struct dc *dc = adev->dm.dc;
2748 struct common_irq_params *c_irq_params;
2749 struct dc_interrupt_params int_params = {0};
2752 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2754 if (adev->asic_type >= CHIP_VEGA10)
2755 client_id = SOC15_IH_CLIENTID_DCE;
2757 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2758 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2761 * Actions of amdgpu_irq_add_id():
2762 * 1. Register a set() function with base driver.
2763 * Base driver will call set() function to enable/disable an
2764 * interrupt in DC hardware.
2765 * 2. Register amdgpu_dm_irq_handler().
2766 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2767 * coming from DC hardware.
2768 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2769 * for acknowledging and handling. */
2771 /* Use VBLANK interrupt */
2772 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2773 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2775 DRM_ERROR("Failed to add crtc irq id!\n");
2779 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2780 int_params.irq_source =
2781 dc_interrupt_to_irq_source(dc, i, 0);
2783 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2785 c_irq_params->adev = adev;
2786 c_irq_params->irq_src = int_params.irq_source;
2788 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2789 dm_crtc_high_irq, c_irq_params);
2792 /* Use VUPDATE interrupt */
2793 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2794 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2796 DRM_ERROR("Failed to add vupdate irq id!\n");
2800 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2801 int_params.irq_source =
2802 dc_interrupt_to_irq_source(dc, i, 0);
2804 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2806 c_irq_params->adev = adev;
2807 c_irq_params->irq_src = int_params.irq_source;
2809 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2810 dm_vupdate_high_irq, c_irq_params);
2813 /* Use GRPH_PFLIP interrupt */
2814 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2815 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2816 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2818 DRM_ERROR("Failed to add page flip irq id!\n");
2822 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2823 int_params.irq_source =
2824 dc_interrupt_to_irq_source(dc, i, 0);
2826 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2828 c_irq_params->adev = adev;
2829 c_irq_params->irq_src = int_params.irq_source;
2831 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2832 dm_pflip_high_irq, c_irq_params);
2837 r = amdgpu_irq_add_id(adev, client_id,
2838 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2840 DRM_ERROR("Failed to add hpd irq id!\n");
2844 register_hpd_handlers(adev);
2849 #if defined(CONFIG_DRM_AMD_DC_DCN)
2850 /* Register IRQ sources and initialize IRQ callbacks */
2851 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2853 struct dc *dc = adev->dm.dc;
2854 struct common_irq_params *c_irq_params;
2855 struct dc_interrupt_params int_params = {0};
2859 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2860 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2863 * Actions of amdgpu_irq_add_id():
2864 * 1. Register a set() function with base driver.
2865 * Base driver will call set() function to enable/disable an
2866 * interrupt in DC hardware.
2867 * 2. Register amdgpu_dm_irq_handler().
2868 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2869 * coming from DC hardware.
2870 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2871 * for acknowledging and handling.
2874 /* Use VSTARTUP interrupt */
2875 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2876 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2878 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2881 DRM_ERROR("Failed to add crtc irq id!\n");
2885 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2886 int_params.irq_source =
2887 dc_interrupt_to_irq_source(dc, i, 0);
2889 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2891 c_irq_params->adev = adev;
2892 c_irq_params->irq_src = int_params.irq_source;
2894 amdgpu_dm_irq_register_interrupt(
2895 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2898 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2899 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2900 * to trigger at end of each vblank, regardless of state of the lock,
2901 * matching DCE behaviour.
2903 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2904 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2906 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2909 DRM_ERROR("Failed to add vupdate irq id!\n");
2913 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2914 int_params.irq_source =
2915 dc_interrupt_to_irq_source(dc, i, 0);
2917 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2919 c_irq_params->adev = adev;
2920 c_irq_params->irq_src = int_params.irq_source;
2922 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2923 dm_vupdate_high_irq, c_irq_params);
2926 /* Use GRPH_PFLIP interrupt */
2927 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2928 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2930 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2932 DRM_ERROR("Failed to add page flip irq id!\n");
2936 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2937 int_params.irq_source =
2938 dc_interrupt_to_irq_source(dc, i, 0);
2940 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2942 c_irq_params->adev = adev;
2943 c_irq_params->irq_src = int_params.irq_source;
2945 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2946 dm_pflip_high_irq, c_irq_params);
2951 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2954 DRM_ERROR("Failed to add hpd irq id!\n");
2958 register_hpd_handlers(adev);
2965 * Acquires the lock for the atomic state object and returns
2966 * the new atomic state.
2968 * This should only be called during atomic check.
2970 static int dm_atomic_get_state(struct drm_atomic_state *state,
2971 struct dm_atomic_state **dm_state)
2973 struct drm_device *dev = state->dev;
2974 struct amdgpu_device *adev = drm_to_adev(dev);
2975 struct amdgpu_display_manager *dm = &adev->dm;
2976 struct drm_private_state *priv_state;
2981 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2982 if (IS_ERR(priv_state))
2983 return PTR_ERR(priv_state);
2985 *dm_state = to_dm_atomic_state(priv_state);
2990 static struct dm_atomic_state *
2991 dm_atomic_get_new_state(struct drm_atomic_state *state)
2993 struct drm_device *dev = state->dev;
2994 struct amdgpu_device *adev = drm_to_adev(dev);
2995 struct amdgpu_display_manager *dm = &adev->dm;
2996 struct drm_private_obj *obj;
2997 struct drm_private_state *new_obj_state;
3000 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3001 if (obj->funcs == dm->atomic_obj.funcs)
3002 return to_dm_atomic_state(new_obj_state);
3008 static struct drm_private_state *
3009 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3011 struct dm_atomic_state *old_state, *new_state;
3013 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3017 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3019 old_state = to_dm_atomic_state(obj->state);
3021 if (old_state && old_state->context)
3022 new_state->context = dc_copy_state(old_state->context);
3024 if (!new_state->context) {
3029 return &new_state->base;
3032 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3033 struct drm_private_state *state)
3035 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3037 if (dm_state && dm_state->context)
3038 dc_release_state(dm_state->context);
3043 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3044 .atomic_duplicate_state = dm_atomic_duplicate_state,
3045 .atomic_destroy_state = dm_atomic_destroy_state,
3048 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3050 struct dm_atomic_state *state;
3053 adev->mode_info.mode_config_initialized = true;
3055 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3056 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3058 adev_to_drm(adev)->mode_config.max_width = 16384;
3059 adev_to_drm(adev)->mode_config.max_height = 16384;
3061 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3062 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3063 /* indicates support for immediate flip */
3064 adev_to_drm(adev)->mode_config.async_page_flip = true;
3066 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3068 state = kzalloc(sizeof(*state), GFP_KERNEL);
3072 state->context = dc_create_state(adev->dm.dc);
3073 if (!state->context) {
3078 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3080 drm_atomic_private_obj_init(adev_to_drm(adev),
3081 &adev->dm.atomic_obj,
3083 &dm_atomic_state_funcs);
3085 r = amdgpu_display_modeset_create_props(adev);
3087 dc_release_state(state->context);
3092 r = amdgpu_dm_audio_init(adev);
3094 dc_release_state(state->context);
3102 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3103 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3104 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3106 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3107 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3109 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3111 #if defined(CONFIG_ACPI)
3112 struct amdgpu_dm_backlight_caps caps;
3114 memset(&caps, 0, sizeof(caps));
3116 if (dm->backlight_caps.caps_valid)
3119 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3120 if (caps.caps_valid) {
3121 dm->backlight_caps.caps_valid = true;
3122 if (caps.aux_support)
3124 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3125 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3127 dm->backlight_caps.min_input_signal =
3128 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3129 dm->backlight_caps.max_input_signal =
3130 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3133 if (dm->backlight_caps.aux_support)
3136 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3137 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3141 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3148 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3149 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3154 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3155 unsigned *min, unsigned *max)
3160 if (caps->aux_support) {
3161 // Firmware limits are in nits, DC API wants millinits.
3162 *max = 1000 * caps->aux_max_input_signal;
3163 *min = 1000 * caps->aux_min_input_signal;
3165 // Firmware limits are 8-bit, PWM control is 16-bit.
3166 *max = 0x101 * caps->max_input_signal;
3167 *min = 0x101 * caps->min_input_signal;
3172 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3173 uint32_t brightness)
3177 if (!get_brightness_range(caps, &min, &max))
3180 // Rescale 0..255 to min..max
3181 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3182 AMDGPU_MAX_BL_LEVEL);
3185 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3186 uint32_t brightness)
3190 if (!get_brightness_range(caps, &min, &max))
3193 if (brightness < min)
3195 // Rescale min..max to 0..255
3196 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3200 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3202 struct amdgpu_display_manager *dm = bl_get_data(bd);
3203 struct amdgpu_dm_backlight_caps caps;
3204 struct dc_link *link = NULL;
3208 amdgpu_dm_update_backlight_caps(dm);
3209 caps = dm->backlight_caps;
3211 link = (struct dc_link *)dm->backlight_link;
3213 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3214 // Change brightness based on AUX property
3215 if (caps.aux_support)
3216 return set_backlight_via_aux(link, brightness);
3218 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3223 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3225 struct amdgpu_display_manager *dm = bl_get_data(bd);
3226 int ret = dc_link_get_backlight_level(dm->backlight_link);
3228 if (ret == DC_ERROR_UNEXPECTED)
3229 return bd->props.brightness;
3230 return convert_brightness_to_user(&dm->backlight_caps, ret);
3233 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3234 .options = BL_CORE_SUSPENDRESUME,
3235 .get_brightness = amdgpu_dm_backlight_get_brightness,
3236 .update_status = amdgpu_dm_backlight_update_status,
3240 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3243 struct backlight_properties props = { 0 };
3245 amdgpu_dm_update_backlight_caps(dm);
3247 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3248 props.brightness = AMDGPU_MAX_BL_LEVEL;
3249 props.type = BACKLIGHT_RAW;
3251 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3252 adev_to_drm(dm->adev)->primary->index);
3254 dm->backlight_dev = backlight_device_register(bl_name,
3255 adev_to_drm(dm->adev)->dev,
3257 &amdgpu_dm_backlight_ops,
3260 if (IS_ERR(dm->backlight_dev))
3261 DRM_ERROR("DM: Backlight registration failed!\n");
3263 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3268 static int initialize_plane(struct amdgpu_display_manager *dm,
3269 struct amdgpu_mode_info *mode_info, int plane_id,
3270 enum drm_plane_type plane_type,
3271 const struct dc_plane_cap *plane_cap)
3273 struct drm_plane *plane;
3274 unsigned long possible_crtcs;
3277 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3279 DRM_ERROR("KMS: Failed to allocate plane\n");
3282 plane->type = plane_type;
3285 * HACK: IGT tests expect that the primary plane for a CRTC
3286 * can only have one possible CRTC. Only expose support for
3287 * any CRTC if they're not going to be used as a primary plane
3288 * for a CRTC - like overlay or underlay planes.
3290 possible_crtcs = 1 << plane_id;
3291 if (plane_id >= dm->dc->caps.max_streams)
3292 possible_crtcs = 0xff;
3294 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3297 DRM_ERROR("KMS: Failed to initialize plane\n");
3303 mode_info->planes[plane_id] = plane;
3309 static void register_backlight_device(struct amdgpu_display_manager *dm,
3310 struct dc_link *link)
3312 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3313 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3315 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3316 link->type != dc_connection_none) {
3318 * Event if registration failed, we should continue with
3319 * DM initialization because not having a backlight control
3320 * is better then a black screen.
3322 amdgpu_dm_register_backlight_device(dm);
3324 if (dm->backlight_dev)
3325 dm->backlight_link = link;
3332 * In this architecture, the association
3333 * connector -> encoder -> crtc
3334 * id not really requried. The crtc and connector will hold the
3335 * display_index as an abstraction to use with DAL component
3337 * Returns 0 on success
3339 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3341 struct amdgpu_display_manager *dm = &adev->dm;
3343 struct amdgpu_dm_connector *aconnector = NULL;
3344 struct amdgpu_encoder *aencoder = NULL;
3345 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3347 int32_t primary_planes;
3348 enum dc_connection_type new_connection_type = dc_connection_none;
3349 const struct dc_plane_cap *plane;
3351 link_cnt = dm->dc->caps.max_links;
3352 if (amdgpu_dm_mode_config_init(dm->adev)) {
3353 DRM_ERROR("DM: Failed to initialize mode config\n");
3357 /* There is one primary plane per CRTC */
3358 primary_planes = dm->dc->caps.max_streams;
3359 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3362 * Initialize primary planes, implicit planes for legacy IOCTLS.
3363 * Order is reversed to match iteration order in atomic check.
3365 for (i = (primary_planes - 1); i >= 0; i--) {
3366 plane = &dm->dc->caps.planes[i];
3368 if (initialize_plane(dm, mode_info, i,
3369 DRM_PLANE_TYPE_PRIMARY, plane)) {
3370 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3376 * Initialize overlay planes, index starting after primary planes.
3377 * These planes have a higher DRM index than the primary planes since
3378 * they should be considered as having a higher z-order.
3379 * Order is reversed to match iteration order in atomic check.
3381 * Only support DCN for now, and only expose one so we don't encourage
3382 * userspace to use up all the pipes.
3384 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3385 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3387 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3390 if (!plane->blends_with_above || !plane->blends_with_below)
3393 if (!plane->pixel_format_support.argb8888)
3396 if (initialize_plane(dm, NULL, primary_planes + i,
3397 DRM_PLANE_TYPE_OVERLAY, plane)) {
3398 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3402 /* Only create one overlay plane. */
3406 for (i = 0; i < dm->dc->caps.max_streams; i++)
3407 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3408 DRM_ERROR("KMS: Failed to initialize crtc\n");
3412 dm->display_indexes_num = dm->dc->caps.max_streams;
3414 /* loops over all connectors on the board */
3415 for (i = 0; i < link_cnt; i++) {
3416 struct dc_link *link = NULL;
3418 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3420 "KMS: Cannot support more than %d display indexes\n",
3421 AMDGPU_DM_MAX_DISPLAY_INDEX);
3425 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3429 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3433 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3434 DRM_ERROR("KMS: Failed to initialize encoder\n");
3438 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3439 DRM_ERROR("KMS: Failed to initialize connector\n");
3443 link = dc_get_link_at_index(dm->dc, i);
3445 if (!dc_link_detect_sink(link, &new_connection_type))
3446 DRM_ERROR("KMS: Failed to detect connector\n");
3448 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3449 emulated_link_detect(link);
3450 amdgpu_dm_update_connector_after_detect(aconnector);
3452 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3453 amdgpu_dm_update_connector_after_detect(aconnector);
3454 register_backlight_device(dm, link);
3455 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3456 amdgpu_dm_set_psr_caps(link);
3462 /* Software is initialized. Now we can register interrupt handlers. */
3463 switch (adev->asic_type) {
3464 #if defined(CONFIG_DRM_AMD_DC_SI)
3469 if (dce60_register_irq_handlers(dm->adev)) {
3470 DRM_ERROR("DM: Failed to initialize IRQ\n");
3484 case CHIP_POLARIS11:
3485 case CHIP_POLARIS10:
3486 case CHIP_POLARIS12:
3491 if (dce110_register_irq_handlers(dm->adev)) {
3492 DRM_ERROR("DM: Failed to initialize IRQ\n");
3496 #if defined(CONFIG_DRM_AMD_DC_DCN)
3502 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3503 case CHIP_SIENNA_CICHLID:
3504 case CHIP_NAVY_FLOUNDER:
3506 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3507 case CHIP_DIMGREY_CAVEFISH:
3509 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3512 if (dcn10_register_irq_handlers(dm->adev)) {
3513 DRM_ERROR("DM: Failed to initialize IRQ\n");
3519 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3531 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3533 drm_mode_config_cleanup(dm->ddev);
3534 drm_atomic_private_obj_fini(&dm->atomic_obj);
3538 /******************************************************************************
3539 * amdgpu_display_funcs functions
3540 *****************************************************************************/
3543 * dm_bandwidth_update - program display watermarks
3545 * @adev: amdgpu_device pointer
3547 * Calculate and program the display watermarks and line buffer allocation.
3549 static void dm_bandwidth_update(struct amdgpu_device *adev)
3551 /* TODO: implement later */
3554 static const struct amdgpu_display_funcs dm_display_funcs = {
3555 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3556 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3557 .backlight_set_level = NULL, /* never called for DC */
3558 .backlight_get_level = NULL, /* never called for DC */
3559 .hpd_sense = NULL,/* called unconditionally */
3560 .hpd_set_polarity = NULL, /* called unconditionally */
3561 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3562 .page_flip_get_scanoutpos =
3563 dm_crtc_get_scanoutpos,/* called unconditionally */
3564 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3565 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3568 #if defined(CONFIG_DEBUG_KERNEL_DC)
3570 static ssize_t s3_debug_store(struct device *device,
3571 struct device_attribute *attr,
3577 struct drm_device *drm_dev = dev_get_drvdata(device);
3578 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3580 ret = kstrtoint(buf, 0, &s3_state);
3585 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3590 return ret == 0 ? count : 0;
3593 DEVICE_ATTR_WO(s3_debug);
3597 static int dm_early_init(void *handle)
3599 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3601 switch (adev->asic_type) {
3602 #if defined(CONFIG_DRM_AMD_DC_SI)
3606 adev->mode_info.num_crtc = 6;
3607 adev->mode_info.num_hpd = 6;
3608 adev->mode_info.num_dig = 6;
3611 adev->mode_info.num_crtc = 2;
3612 adev->mode_info.num_hpd = 2;
3613 adev->mode_info.num_dig = 2;
3618 adev->mode_info.num_crtc = 6;
3619 adev->mode_info.num_hpd = 6;
3620 adev->mode_info.num_dig = 6;
3623 adev->mode_info.num_crtc = 4;
3624 adev->mode_info.num_hpd = 6;
3625 adev->mode_info.num_dig = 7;
3629 adev->mode_info.num_crtc = 2;
3630 adev->mode_info.num_hpd = 6;
3631 adev->mode_info.num_dig = 6;
3635 adev->mode_info.num_crtc = 6;
3636 adev->mode_info.num_hpd = 6;
3637 adev->mode_info.num_dig = 7;
3640 adev->mode_info.num_crtc = 3;
3641 adev->mode_info.num_hpd = 6;
3642 adev->mode_info.num_dig = 9;
3645 adev->mode_info.num_crtc = 2;
3646 adev->mode_info.num_hpd = 6;
3647 adev->mode_info.num_dig = 9;
3649 case CHIP_POLARIS11:
3650 case CHIP_POLARIS12:
3651 adev->mode_info.num_crtc = 5;
3652 adev->mode_info.num_hpd = 5;
3653 adev->mode_info.num_dig = 5;
3655 case CHIP_POLARIS10:
3657 adev->mode_info.num_crtc = 6;
3658 adev->mode_info.num_hpd = 6;
3659 adev->mode_info.num_dig = 6;
3664 adev->mode_info.num_crtc = 6;
3665 adev->mode_info.num_hpd = 6;
3666 adev->mode_info.num_dig = 6;
3668 #if defined(CONFIG_DRM_AMD_DC_DCN)
3670 adev->mode_info.num_crtc = 4;
3671 adev->mode_info.num_hpd = 4;
3672 adev->mode_info.num_dig = 4;
3677 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3678 case CHIP_SIENNA_CICHLID:
3679 case CHIP_NAVY_FLOUNDER:
3681 adev->mode_info.num_crtc = 6;
3682 adev->mode_info.num_hpd = 6;
3683 adev->mode_info.num_dig = 6;
3685 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
3687 adev->mode_info.num_crtc = 4;
3688 adev->mode_info.num_hpd = 4;
3689 adev->mode_info.num_dig = 4;
3693 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
3694 case CHIP_DIMGREY_CAVEFISH:
3696 adev->mode_info.num_crtc = 5;
3697 adev->mode_info.num_hpd = 5;
3698 adev->mode_info.num_dig = 5;
3701 adev->mode_info.num_crtc = 4;
3702 adev->mode_info.num_hpd = 4;
3703 adev->mode_info.num_dig = 4;
3706 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3710 amdgpu_dm_set_irq_funcs(adev);
3712 if (adev->mode_info.funcs == NULL)
3713 adev->mode_info.funcs = &dm_display_funcs;
3716 * Note: Do NOT change adev->audio_endpt_rreg and
3717 * adev->audio_endpt_wreg because they are initialised in
3718 * amdgpu_device_init()
3720 #if defined(CONFIG_DEBUG_KERNEL_DC)
3722 adev_to_drm(adev)->dev,
3723 &dev_attr_s3_debug);
3729 static bool modeset_required(struct drm_crtc_state *crtc_state,
3730 struct dc_stream_state *new_stream,
3731 struct dc_stream_state *old_stream)
3733 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3736 static bool modereset_required(struct drm_crtc_state *crtc_state)
3738 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3741 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3743 drm_encoder_cleanup(encoder);
3747 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3748 .destroy = amdgpu_dm_encoder_destroy,
3752 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3753 struct dc_scaling_info *scaling_info)
3755 int scale_w, scale_h;
3757 memset(scaling_info, 0, sizeof(*scaling_info));
3759 /* Source is fixed 16.16 but we ignore mantissa for now... */
3760 scaling_info->src_rect.x = state->src_x >> 16;
3761 scaling_info->src_rect.y = state->src_y >> 16;
3763 scaling_info->src_rect.width = state->src_w >> 16;
3764 if (scaling_info->src_rect.width == 0)
3767 scaling_info->src_rect.height = state->src_h >> 16;
3768 if (scaling_info->src_rect.height == 0)
3771 scaling_info->dst_rect.x = state->crtc_x;
3772 scaling_info->dst_rect.y = state->crtc_y;
3774 if (state->crtc_w == 0)
3777 scaling_info->dst_rect.width = state->crtc_w;
3779 if (state->crtc_h == 0)
3782 scaling_info->dst_rect.height = state->crtc_h;
3784 /* DRM doesn't specify clipping on destination output. */
3785 scaling_info->clip_rect = scaling_info->dst_rect;
3787 /* TODO: Validate scaling per-format with DC plane caps */
3788 scale_w = scaling_info->dst_rect.width * 1000 /
3789 scaling_info->src_rect.width;
3791 if (scale_w < 250 || scale_w > 16000)
3794 scale_h = scaling_info->dst_rect.height * 1000 /
3795 scaling_info->src_rect.height;
3797 if (scale_h < 250 || scale_h > 16000)
3801 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3802 * assume reasonable defaults based on the format.
3808 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3809 uint64_t *tiling_flags, bool *tmz_surface)
3811 struct amdgpu_bo *rbo;
3816 *tmz_surface = false;
3820 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3821 r = amdgpu_bo_reserve(rbo, false);
3824 /* Don't show error message when returning -ERESTARTSYS */
3825 if (r != -ERESTARTSYS)
3826 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3831 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3834 *tmz_surface = amdgpu_bo_encrypted(rbo);
3836 amdgpu_bo_unreserve(rbo);
3841 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3843 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3845 return offset ? (address + offset * 256) : 0;
3849 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3850 const struct amdgpu_framebuffer *afb,
3851 const enum surface_pixel_format format,
3852 const enum dc_rotation_angle rotation,
3853 const struct plane_size *plane_size,
3854 const union dc_tiling_info *tiling_info,
3855 const uint64_t info,
3856 struct dc_plane_dcc_param *dcc,
3857 struct dc_plane_address *address,
3858 bool force_disable_dcc)
3860 struct dc *dc = adev->dm.dc;
3861 struct dc_dcc_surface_param input;
3862 struct dc_surface_dcc_cap output;
3863 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3864 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3865 uint64_t dcc_address;
3867 memset(&input, 0, sizeof(input));
3868 memset(&output, 0, sizeof(output));
3870 if (force_disable_dcc)
3876 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3879 if (!dc->cap_funcs.get_dcc_compression_cap)
3882 input.format = format;
3883 input.surface_size.width = plane_size->surface_size.width;
3884 input.surface_size.height = plane_size->surface_size.height;
3885 input.swizzle_mode = tiling_info->gfx9.swizzle;
3887 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3888 input.scan = SCAN_DIRECTION_HORIZONTAL;
3889 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3890 input.scan = SCAN_DIRECTION_VERTICAL;
3892 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3895 if (!output.capable)
3898 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3903 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3904 dcc->independent_64b_blks = i64b;
3906 dcc_address = get_dcc_address(afb->address, info);
3907 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3908 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3914 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3915 const struct amdgpu_framebuffer *afb,
3916 const enum surface_pixel_format format,
3917 const enum dc_rotation_angle rotation,
3918 const uint64_t tiling_flags,
3919 union dc_tiling_info *tiling_info,
3920 struct plane_size *plane_size,
3921 struct dc_plane_dcc_param *dcc,
3922 struct dc_plane_address *address,
3924 bool force_disable_dcc)
3926 const struct drm_framebuffer *fb = &afb->base;
3929 memset(tiling_info, 0, sizeof(*tiling_info));
3930 memset(plane_size, 0, sizeof(*plane_size));
3931 memset(dcc, 0, sizeof(*dcc));
3932 memset(address, 0, sizeof(*address));
3934 address->tmz_surface = tmz_surface;
3936 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3937 plane_size->surface_size.x = 0;
3938 plane_size->surface_size.y = 0;
3939 plane_size->surface_size.width = fb->width;
3940 plane_size->surface_size.height = fb->height;
3941 plane_size->surface_pitch =
3942 fb->pitches[0] / fb->format->cpp[0];
3944 address->type = PLN_ADDR_TYPE_GRAPHICS;
3945 address->grph.addr.low_part = lower_32_bits(afb->address);
3946 address->grph.addr.high_part = upper_32_bits(afb->address);
3947 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3948 uint64_t chroma_addr = afb->address + fb->offsets[1];
3950 plane_size->surface_size.x = 0;
3951 plane_size->surface_size.y = 0;
3952 plane_size->surface_size.width = fb->width;
3953 plane_size->surface_size.height = fb->height;
3954 plane_size->surface_pitch =
3955 fb->pitches[0] / fb->format->cpp[0];
3957 plane_size->chroma_size.x = 0;
3958 plane_size->chroma_size.y = 0;
3959 /* TODO: set these based on surface format */
3960 plane_size->chroma_size.width = fb->width / 2;
3961 plane_size->chroma_size.height = fb->height / 2;
3963 plane_size->chroma_pitch =
3964 fb->pitches[1] / fb->format->cpp[1];
3966 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3967 address->video_progressive.luma_addr.low_part =
3968 lower_32_bits(afb->address);
3969 address->video_progressive.luma_addr.high_part =
3970 upper_32_bits(afb->address);
3971 address->video_progressive.chroma_addr.low_part =
3972 lower_32_bits(chroma_addr);
3973 address->video_progressive.chroma_addr.high_part =
3974 upper_32_bits(chroma_addr);
3977 /* Fill GFX8 params */
3978 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3979 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3981 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3982 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3983 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3984 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3985 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3987 /* XXX fix me for VI */
3988 tiling_info->gfx8.num_banks = num_banks;
3989 tiling_info->gfx8.array_mode =
3990 DC_ARRAY_2D_TILED_THIN1;
3991 tiling_info->gfx8.tile_split = tile_split;
3992 tiling_info->gfx8.bank_width = bankw;
3993 tiling_info->gfx8.bank_height = bankh;
3994 tiling_info->gfx8.tile_aspect = mtaspect;
3995 tiling_info->gfx8.tile_mode =
3996 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3997 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3998 == DC_ARRAY_1D_TILED_THIN1) {
3999 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4002 tiling_info->gfx8.pipe_config =
4003 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4005 if (adev->asic_type == CHIP_VEGA10 ||
4006 adev->asic_type == CHIP_VEGA12 ||
4007 adev->asic_type == CHIP_VEGA20 ||
4008 adev->asic_type == CHIP_NAVI10 ||
4009 adev->asic_type == CHIP_NAVI14 ||
4010 adev->asic_type == CHIP_NAVI12 ||
4011 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
4012 adev->asic_type == CHIP_SIENNA_CICHLID ||
4013 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4015 #if defined(CONFIG_DRM_AMD_DC_DCN3_02)
4016 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4018 #if defined(CONFIG_DRM_AMD_DC_DCN3_01)
4019 adev->asic_type == CHIP_VANGOGH ||
4021 adev->asic_type == CHIP_RENOIR ||
4022 adev->asic_type == CHIP_RAVEN) {
4023 /* Fill GFX9 params */
4024 tiling_info->gfx9.num_pipes =
4025 adev->gfx.config.gb_addr_config_fields.num_pipes;
4026 tiling_info->gfx9.num_banks =
4027 adev->gfx.config.gb_addr_config_fields.num_banks;
4028 tiling_info->gfx9.pipe_interleave =
4029 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4030 tiling_info->gfx9.num_shader_engines =
4031 adev->gfx.config.gb_addr_config_fields.num_se;
4032 tiling_info->gfx9.max_compressed_frags =
4033 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4034 tiling_info->gfx9.num_rb_per_se =
4035 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4036 tiling_info->gfx9.swizzle =
4037 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
4038 tiling_info->gfx9.shaderEnable = 1;
4040 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
4041 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4042 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4043 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4044 adev->asic_type == CHIP_VANGOGH)
4045 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4047 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4048 plane_size, tiling_info,
4049 tiling_flags, dcc, address,
4059 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4060 bool *per_pixel_alpha, bool *global_alpha,
4061 int *global_alpha_value)
4063 *per_pixel_alpha = false;
4064 *global_alpha = false;
4065 *global_alpha_value = 0xff;
4067 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4070 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4071 static const uint32_t alpha_formats[] = {
4072 DRM_FORMAT_ARGB8888,
4073 DRM_FORMAT_RGBA8888,
4074 DRM_FORMAT_ABGR8888,
4076 uint32_t format = plane_state->fb->format->format;
4079 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4080 if (format == alpha_formats[i]) {
4081 *per_pixel_alpha = true;
4087 if (plane_state->alpha < 0xffff) {
4088 *global_alpha = true;
4089 *global_alpha_value = plane_state->alpha >> 8;
4094 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4095 const enum surface_pixel_format format,
4096 enum dc_color_space *color_space)
4100 *color_space = COLOR_SPACE_SRGB;
4102 /* DRM color properties only affect non-RGB formats. */
4103 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4106 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4108 switch (plane_state->color_encoding) {
4109 case DRM_COLOR_YCBCR_BT601:
4111 *color_space = COLOR_SPACE_YCBCR601;
4113 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4116 case DRM_COLOR_YCBCR_BT709:
4118 *color_space = COLOR_SPACE_YCBCR709;
4120 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4123 case DRM_COLOR_YCBCR_BT2020:
4125 *color_space = COLOR_SPACE_2020_YCBCR;
4138 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4139 const struct drm_plane_state *plane_state,
4140 const uint64_t tiling_flags,
4141 struct dc_plane_info *plane_info,
4142 struct dc_plane_address *address,
4144 bool force_disable_dcc)
4146 const struct drm_framebuffer *fb = plane_state->fb;
4147 const struct amdgpu_framebuffer *afb =
4148 to_amdgpu_framebuffer(plane_state->fb);
4149 struct drm_format_name_buf format_name;
4152 memset(plane_info, 0, sizeof(*plane_info));
4154 switch (fb->format->format) {
4156 plane_info->format =
4157 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4159 case DRM_FORMAT_RGB565:
4160 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4162 case DRM_FORMAT_XRGB8888:
4163 case DRM_FORMAT_ARGB8888:
4164 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4166 case DRM_FORMAT_XRGB2101010:
4167 case DRM_FORMAT_ARGB2101010:
4168 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4170 case DRM_FORMAT_XBGR2101010:
4171 case DRM_FORMAT_ABGR2101010:
4172 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4174 case DRM_FORMAT_XBGR8888:
4175 case DRM_FORMAT_ABGR8888:
4176 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4178 case DRM_FORMAT_NV21:
4179 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4181 case DRM_FORMAT_NV12:
4182 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4184 case DRM_FORMAT_P010:
4185 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4187 case DRM_FORMAT_XRGB16161616F:
4188 case DRM_FORMAT_ARGB16161616F:
4189 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4191 case DRM_FORMAT_XBGR16161616F:
4192 case DRM_FORMAT_ABGR16161616F:
4193 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4197 "Unsupported screen format %s\n",
4198 drm_get_format_name(fb->format->format, &format_name));
4202 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4203 case DRM_MODE_ROTATE_0:
4204 plane_info->rotation = ROTATION_ANGLE_0;
4206 case DRM_MODE_ROTATE_90:
4207 plane_info->rotation = ROTATION_ANGLE_90;
4209 case DRM_MODE_ROTATE_180:
4210 plane_info->rotation = ROTATION_ANGLE_180;
4212 case DRM_MODE_ROTATE_270:
4213 plane_info->rotation = ROTATION_ANGLE_270;
4216 plane_info->rotation = ROTATION_ANGLE_0;
4220 plane_info->visible = true;
4221 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4223 plane_info->layer_index = 0;
4225 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4226 &plane_info->color_space);
4230 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4231 plane_info->rotation, tiling_flags,
4232 &plane_info->tiling_info,
4233 &plane_info->plane_size,
4234 &plane_info->dcc, address, tmz_surface,
4239 fill_blending_from_plane_state(
4240 plane_state, &plane_info->per_pixel_alpha,
4241 &plane_info->global_alpha, &plane_info->global_alpha_value);
4246 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4247 struct dc_plane_state *dc_plane_state,
4248 struct drm_plane_state *plane_state,
4249 struct drm_crtc_state *crtc_state)
4251 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4252 struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4253 struct dc_scaling_info scaling_info;
4254 struct dc_plane_info plane_info;
4256 bool force_disable_dcc = false;
4258 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4262 dc_plane_state->src_rect = scaling_info.src_rect;
4263 dc_plane_state->dst_rect = scaling_info.dst_rect;
4264 dc_plane_state->clip_rect = scaling_info.clip_rect;
4265 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4267 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4268 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4269 dm_plane_state->tiling_flags,
4271 &dc_plane_state->address,
4272 dm_plane_state->tmz_surface,
4277 dc_plane_state->format = plane_info.format;
4278 dc_plane_state->color_space = plane_info.color_space;
4279 dc_plane_state->format = plane_info.format;
4280 dc_plane_state->plane_size = plane_info.plane_size;
4281 dc_plane_state->rotation = plane_info.rotation;
4282 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4283 dc_plane_state->stereo_format = plane_info.stereo_format;
4284 dc_plane_state->tiling_info = plane_info.tiling_info;
4285 dc_plane_state->visible = plane_info.visible;
4286 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4287 dc_plane_state->global_alpha = plane_info.global_alpha;
4288 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4289 dc_plane_state->dcc = plane_info.dcc;
4290 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4293 * Always set input transfer function, since plane state is refreshed
4296 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4303 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4304 const struct dm_connector_state *dm_state,
4305 struct dc_stream_state *stream)
4307 enum amdgpu_rmx_type rmx_type;
4309 struct rect src = { 0 }; /* viewport in composition space*/
4310 struct rect dst = { 0 }; /* stream addressable area */
4312 /* no mode. nothing to be done */
4316 /* Full screen scaling by default */
4317 src.width = mode->hdisplay;
4318 src.height = mode->vdisplay;
4319 dst.width = stream->timing.h_addressable;
4320 dst.height = stream->timing.v_addressable;
4323 rmx_type = dm_state->scaling;
4324 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4325 if (src.width * dst.height <
4326 src.height * dst.width) {
4327 /* height needs less upscaling/more downscaling */
4328 dst.width = src.width *
4329 dst.height / src.height;
4331 /* width needs less upscaling/more downscaling */
4332 dst.height = src.height *
4333 dst.width / src.width;
4335 } else if (rmx_type == RMX_CENTER) {
4339 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4340 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4342 if (dm_state->underscan_enable) {
4343 dst.x += dm_state->underscan_hborder / 2;
4344 dst.y += dm_state->underscan_vborder / 2;
4345 dst.width -= dm_state->underscan_hborder;
4346 dst.height -= dm_state->underscan_vborder;
4353 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4354 dst.x, dst.y, dst.width, dst.height);
4358 static enum dc_color_depth
4359 convert_color_depth_from_display_info(const struct drm_connector *connector,
4360 bool is_y420, int requested_bpc)
4367 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4368 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4370 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4372 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4375 bpc = (uint8_t)connector->display_info.bpc;
4376 /* Assume 8 bpc by default if no bpc is specified. */
4377 bpc = bpc ? bpc : 8;
4380 if (requested_bpc > 0) {
4382 * Cap display bpc based on the user requested value.
4384 * The value for state->max_bpc may not correctly updated
4385 * depending on when the connector gets added to the state
4386 * or if this was called outside of atomic check, so it
4387 * can't be used directly.
4389 bpc = min_t(u8, bpc, requested_bpc);
4391 /* Round down to the nearest even number. */
4392 bpc = bpc - (bpc & 1);
4398 * Temporary Work around, DRM doesn't parse color depth for
4399 * EDID revision before 1.4
4400 * TODO: Fix edid parsing
4402 return COLOR_DEPTH_888;
4404 return COLOR_DEPTH_666;
4406 return COLOR_DEPTH_888;
4408 return COLOR_DEPTH_101010;
4410 return COLOR_DEPTH_121212;
4412 return COLOR_DEPTH_141414;
4414 return COLOR_DEPTH_161616;
4416 return COLOR_DEPTH_UNDEFINED;
4420 static enum dc_aspect_ratio
4421 get_aspect_ratio(const struct drm_display_mode *mode_in)
4423 /* 1-1 mapping, since both enums follow the HDMI spec. */
4424 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4427 static enum dc_color_space
4428 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4430 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4432 switch (dc_crtc_timing->pixel_encoding) {
4433 case PIXEL_ENCODING_YCBCR422:
4434 case PIXEL_ENCODING_YCBCR444:
4435 case PIXEL_ENCODING_YCBCR420:
4438 * 27030khz is the separation point between HDTV and SDTV
4439 * according to HDMI spec, we use YCbCr709 and YCbCr601
4442 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4443 if (dc_crtc_timing->flags.Y_ONLY)
4445 COLOR_SPACE_YCBCR709_LIMITED;
4447 color_space = COLOR_SPACE_YCBCR709;
4449 if (dc_crtc_timing->flags.Y_ONLY)
4451 COLOR_SPACE_YCBCR601_LIMITED;
4453 color_space = COLOR_SPACE_YCBCR601;
4458 case PIXEL_ENCODING_RGB:
4459 color_space = COLOR_SPACE_SRGB;
4470 static bool adjust_colour_depth_from_display_info(
4471 struct dc_crtc_timing *timing_out,
4472 const struct drm_display_info *info)
4474 enum dc_color_depth depth = timing_out->display_color_depth;
4477 normalized_clk = timing_out->pix_clk_100hz / 10;
4478 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4479 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4480 normalized_clk /= 2;
4481 /* Adjusting pix clock following on HDMI spec based on colour depth */
4483 case COLOR_DEPTH_888:
4485 case COLOR_DEPTH_101010:
4486 normalized_clk = (normalized_clk * 30) / 24;
4488 case COLOR_DEPTH_121212:
4489 normalized_clk = (normalized_clk * 36) / 24;
4491 case COLOR_DEPTH_161616:
4492 normalized_clk = (normalized_clk * 48) / 24;
4495 /* The above depths are the only ones valid for HDMI. */
4498 if (normalized_clk <= info->max_tmds_clock) {
4499 timing_out->display_color_depth = depth;
4502 } while (--depth > COLOR_DEPTH_666);
4506 static void fill_stream_properties_from_drm_display_mode(
4507 struct dc_stream_state *stream,
4508 const struct drm_display_mode *mode_in,
4509 const struct drm_connector *connector,
4510 const struct drm_connector_state *connector_state,
4511 const struct dc_stream_state *old_stream,
4514 struct dc_crtc_timing *timing_out = &stream->timing;
4515 const struct drm_display_info *info = &connector->display_info;
4516 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4517 struct hdmi_vendor_infoframe hv_frame;
4518 struct hdmi_avi_infoframe avi_frame;
4520 memset(&hv_frame, 0, sizeof(hv_frame));
4521 memset(&avi_frame, 0, sizeof(avi_frame));
4523 timing_out->h_border_left = 0;
4524 timing_out->h_border_right = 0;
4525 timing_out->v_border_top = 0;
4526 timing_out->v_border_bottom = 0;
4527 /* TODO: un-hardcode */
4528 if (drm_mode_is_420_only(info, mode_in)
4529 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4530 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4531 else if (drm_mode_is_420_also(info, mode_in)
4532 && aconnector->force_yuv420_output)
4533 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4534 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4535 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4536 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4538 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4540 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4541 timing_out->display_color_depth = convert_color_depth_from_display_info(
4543 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4545 timing_out->scan_type = SCANNING_TYPE_NODATA;
4546 timing_out->hdmi_vic = 0;
4549 timing_out->vic = old_stream->timing.vic;
4550 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4551 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4553 timing_out->vic = drm_match_cea_mode(mode_in);
4554 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4555 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4556 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4557 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4560 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4561 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4562 timing_out->vic = avi_frame.video_code;
4563 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4564 timing_out->hdmi_vic = hv_frame.vic;
4567 timing_out->h_addressable = mode_in->crtc_hdisplay;
4568 timing_out->h_total = mode_in->crtc_htotal;
4569 timing_out->h_sync_width =
4570 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4571 timing_out->h_front_porch =
4572 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4573 timing_out->v_total = mode_in->crtc_vtotal;
4574 timing_out->v_addressable = mode_in->crtc_vdisplay;
4575 timing_out->v_front_porch =
4576 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4577 timing_out->v_sync_width =
4578 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4579 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4580 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4582 stream->output_color_space = get_output_color_space(timing_out);
4584 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4585 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4586 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4587 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4588 drm_mode_is_420_also(info, mode_in) &&
4589 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4590 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4591 adjust_colour_depth_from_display_info(timing_out, info);
4596 static void fill_audio_info(struct audio_info *audio_info,
4597 const struct drm_connector *drm_connector,
4598 const struct dc_sink *dc_sink)
4601 int cea_revision = 0;
4602 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4604 audio_info->manufacture_id = edid_caps->manufacturer_id;
4605 audio_info->product_id = edid_caps->product_id;
4607 cea_revision = drm_connector->display_info.cea_rev;
4609 strscpy(audio_info->display_name,
4610 edid_caps->display_name,
4611 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4613 if (cea_revision >= 3) {
4614 audio_info->mode_count = edid_caps->audio_mode_count;
4616 for (i = 0; i < audio_info->mode_count; ++i) {
4617 audio_info->modes[i].format_code =
4618 (enum audio_format_code)
4619 (edid_caps->audio_modes[i].format_code);
4620 audio_info->modes[i].channel_count =
4621 edid_caps->audio_modes[i].channel_count;
4622 audio_info->modes[i].sample_rates.all =
4623 edid_caps->audio_modes[i].sample_rate;
4624 audio_info->modes[i].sample_size =
4625 edid_caps->audio_modes[i].sample_size;
4629 audio_info->flags.all = edid_caps->speaker_flags;
4631 /* TODO: We only check for the progressive mode, check for interlace mode too */
4632 if (drm_connector->latency_present[0]) {
4633 audio_info->video_latency = drm_connector->video_latency[0];
4634 audio_info->audio_latency = drm_connector->audio_latency[0];
4637 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4642 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4643 struct drm_display_mode *dst_mode)
4645 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4646 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4647 dst_mode->crtc_clock = src_mode->crtc_clock;
4648 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4649 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4650 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4651 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4652 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4653 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4654 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4655 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4656 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4657 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4658 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4662 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4663 const struct drm_display_mode *native_mode,
4666 if (scale_enabled) {
4667 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4668 } else if (native_mode->clock == drm_mode->clock &&
4669 native_mode->htotal == drm_mode->htotal &&
4670 native_mode->vtotal == drm_mode->vtotal) {
4671 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4673 /* no scaling nor amdgpu inserted, no need to patch */
4677 static struct dc_sink *
4678 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4680 struct dc_sink_init_data sink_init_data = { 0 };
4681 struct dc_sink *sink = NULL;
4682 sink_init_data.link = aconnector->dc_link;
4683 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4685 sink = dc_sink_create(&sink_init_data);
4687 DRM_ERROR("Failed to create sink!\n");
4690 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4695 static void set_multisync_trigger_params(
4696 struct dc_stream_state *stream)
4698 if (stream->triggered_crtc_reset.enabled) {
4699 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4700 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4704 static void set_master_stream(struct dc_stream_state *stream_set[],
4707 int j, highest_rfr = 0, master_stream = 0;
4709 for (j = 0; j < stream_count; j++) {
4710 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4711 int refresh_rate = 0;
4713 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4714 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4715 if (refresh_rate > highest_rfr) {
4716 highest_rfr = refresh_rate;
4721 for (j = 0; j < stream_count; j++) {
4723 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4727 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4731 if (context->stream_count < 2)
4733 for (i = 0; i < context->stream_count ; i++) {
4734 if (!context->streams[i])
4737 * TODO: add a function to read AMD VSDB bits and set
4738 * crtc_sync_master.multi_sync_enabled flag
4739 * For now it's set to false
4741 set_multisync_trigger_params(context->streams[i]);
4743 set_master_stream(context->streams, context->stream_count);
4746 static struct dc_stream_state *
4747 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4748 const struct drm_display_mode *drm_mode,
4749 const struct dm_connector_state *dm_state,
4750 const struct dc_stream_state *old_stream,
4753 struct drm_display_mode *preferred_mode = NULL;
4754 struct drm_connector *drm_connector;
4755 const struct drm_connector_state *con_state =
4756 dm_state ? &dm_state->base : NULL;
4757 struct dc_stream_state *stream = NULL;
4758 struct drm_display_mode mode = *drm_mode;
4759 bool native_mode_found = false;
4760 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4762 int preferred_refresh = 0;
4763 #if defined(CONFIG_DRM_AMD_DC_DCN)
4764 struct dsc_dec_dpcd_caps dsc_caps;
4766 uint32_t link_bandwidth_kbps;
4768 struct dc_sink *sink = NULL;
4769 if (aconnector == NULL) {
4770 DRM_ERROR("aconnector is NULL!\n");
4774 drm_connector = &aconnector->base;
4776 if (!aconnector->dc_sink) {
4777 sink = create_fake_sink(aconnector);
4781 sink = aconnector->dc_sink;
4782 dc_sink_retain(sink);
4785 stream = dc_create_stream_for_sink(sink);
4787 if (stream == NULL) {
4788 DRM_ERROR("Failed to create stream for sink!\n");
4792 stream->dm_stream_context = aconnector;
4794 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4795 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4797 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4798 /* Search for preferred mode */
4799 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4800 native_mode_found = true;
4804 if (!native_mode_found)
4805 preferred_mode = list_first_entry_or_null(
4806 &aconnector->base.modes,
4807 struct drm_display_mode,
4810 mode_refresh = drm_mode_vrefresh(&mode);
4812 if (preferred_mode == NULL) {
4814 * This may not be an error, the use case is when we have no
4815 * usermode calls to reset and set mode upon hotplug. In this
4816 * case, we call set mode ourselves to restore the previous mode
4817 * and the modelist may not be filled in in time.
4819 DRM_DEBUG_DRIVER("No preferred mode found\n");
4821 decide_crtc_timing_for_drm_display_mode(
4822 &mode, preferred_mode,
4823 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4824 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4828 drm_mode_set_crtcinfo(&mode, 0);
4831 * If scaling is enabled and refresh rate didn't change
4832 * we copy the vic and polarities of the old timings
4834 if (!scale || mode_refresh != preferred_refresh)
4835 fill_stream_properties_from_drm_display_mode(stream,
4836 &mode, &aconnector->base, con_state, NULL, requested_bpc);
4838 fill_stream_properties_from_drm_display_mode(stream,
4839 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
4841 stream->timing.flags.DSC = 0;
4843 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4844 #if defined(CONFIG_DRM_AMD_DC_DCN)
4845 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4846 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4847 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4850 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4851 dc_link_get_link_cap(aconnector->dc_link));
4853 #if defined(CONFIG_DRM_AMD_DC_DCN)
4854 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4855 /* Set DSC policy according to dsc_clock_en */
4856 dc_dsc_policy_set_enable_dsc_when_not_needed(
4857 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4859 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4861 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4862 link_bandwidth_kbps,
4864 &stream->timing.dsc_cfg))
4865 stream->timing.flags.DSC = 1;
4866 /* Overwrite the stream flag if DSC is enabled through debugfs */
4867 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4868 stream->timing.flags.DSC = 1;
4870 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4871 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4873 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4874 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4876 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4877 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4882 update_stream_scaling_settings(&mode, dm_state, stream);
4885 &stream->audio_info,
4889 update_stream_signal(stream, sink);
4891 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4892 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4894 if (stream->link->psr_settings.psr_feature_enabled) {
4896 // should decide stream support vsc sdp colorimetry capability
4897 // before building vsc info packet
4899 stream->use_vsc_sdp_for_colorimetry = false;
4900 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4901 stream->use_vsc_sdp_for_colorimetry =
4902 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4904 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4905 stream->use_vsc_sdp_for_colorimetry = true;
4907 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4910 dc_sink_release(sink);
4915 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4917 drm_crtc_cleanup(crtc);
4921 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4922 struct drm_crtc_state *state)
4924 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4926 /* TODO Destroy dc_stream objects are stream object is flattened */
4928 dc_stream_release(cur->stream);
4931 __drm_atomic_helper_crtc_destroy_state(state);
4937 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4939 struct dm_crtc_state *state;
4942 dm_crtc_destroy_state(crtc, crtc->state);
4944 state = kzalloc(sizeof(*state), GFP_KERNEL);
4945 if (WARN_ON(!state))
4948 __drm_atomic_helper_crtc_reset(crtc, &state->base);
4951 static struct drm_crtc_state *
4952 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4954 struct dm_crtc_state *state, *cur;
4956 cur = to_dm_crtc_state(crtc->state);
4958 if (WARN_ON(!crtc->state))
4961 state = kzalloc(sizeof(*state), GFP_KERNEL);
4965 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4968 state->stream = cur->stream;
4969 dc_stream_retain(state->stream);
4972 state->active_planes = cur->active_planes;
4973 state->vrr_infopacket = cur->vrr_infopacket;
4974 state->abm_level = cur->abm_level;
4975 state->vrr_supported = cur->vrr_supported;
4976 state->freesync_config = cur->freesync_config;
4977 state->crc_src = cur->crc_src;
4978 state->cm_has_degamma = cur->cm_has_degamma;
4979 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4981 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4983 return &state->base;
4986 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4988 enum dc_irq_source irq_source;
4989 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4990 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4993 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4995 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4997 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4998 acrtc->crtc_id, enable ? "en" : "dis", rc);
5002 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5004 enum dc_irq_source irq_source;
5005 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5006 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5007 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5011 /* vblank irq on -> Only need vupdate irq in vrr mode */
5012 if (amdgpu_dm_vrr_active(acrtc_state))
5013 rc = dm_set_vupdate_irq(crtc, true);
5015 /* vblank irq off -> vupdate irq off */
5016 rc = dm_set_vupdate_irq(crtc, false);
5022 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5023 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5026 static int dm_enable_vblank(struct drm_crtc *crtc)
5028 return dm_set_vblank(crtc, true);
5031 static void dm_disable_vblank(struct drm_crtc *crtc)
5033 dm_set_vblank(crtc, false);
5036 /* Implemented only the options currently availible for the driver */
5037 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5038 .reset = dm_crtc_reset_state,
5039 .destroy = amdgpu_dm_crtc_destroy,
5040 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5041 .set_config = drm_atomic_helper_set_config,
5042 .page_flip = drm_atomic_helper_page_flip,
5043 .atomic_duplicate_state = dm_crtc_duplicate_state,
5044 .atomic_destroy_state = dm_crtc_destroy_state,
5045 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5046 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5047 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5048 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5049 .enable_vblank = dm_enable_vblank,
5050 .disable_vblank = dm_disable_vblank,
5051 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5054 static enum drm_connector_status
5055 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5058 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5062 * 1. This interface is NOT called in context of HPD irq.
5063 * 2. This interface *is called* in context of user-mode ioctl. Which
5064 * makes it a bad place for *any* MST-related activity.
5067 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5068 !aconnector->fake_enable)
5069 connected = (aconnector->dc_sink != NULL);
5071 connected = (aconnector->base.force == DRM_FORCE_ON);
5073 update_subconnector_property(aconnector);
5075 return (connected ? connector_status_connected :
5076 connector_status_disconnected);
5079 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5080 struct drm_connector_state *connector_state,
5081 struct drm_property *property,
5084 struct drm_device *dev = connector->dev;
5085 struct amdgpu_device *adev = drm_to_adev(dev);
5086 struct dm_connector_state *dm_old_state =
5087 to_dm_connector_state(connector->state);
5088 struct dm_connector_state *dm_new_state =
5089 to_dm_connector_state(connector_state);
5093 if (property == dev->mode_config.scaling_mode_property) {
5094 enum amdgpu_rmx_type rmx_type;
5097 case DRM_MODE_SCALE_CENTER:
5098 rmx_type = RMX_CENTER;
5100 case DRM_MODE_SCALE_ASPECT:
5101 rmx_type = RMX_ASPECT;
5103 case DRM_MODE_SCALE_FULLSCREEN:
5104 rmx_type = RMX_FULL;
5106 case DRM_MODE_SCALE_NONE:
5112 if (dm_old_state->scaling == rmx_type)
5115 dm_new_state->scaling = rmx_type;
5117 } else if (property == adev->mode_info.underscan_hborder_property) {
5118 dm_new_state->underscan_hborder = val;
5120 } else if (property == adev->mode_info.underscan_vborder_property) {
5121 dm_new_state->underscan_vborder = val;
5123 } else if (property == adev->mode_info.underscan_property) {
5124 dm_new_state->underscan_enable = val;
5126 } else if (property == adev->mode_info.abm_level_property) {
5127 dm_new_state->abm_level = val;
5134 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5135 const struct drm_connector_state *state,
5136 struct drm_property *property,
5139 struct drm_device *dev = connector->dev;
5140 struct amdgpu_device *adev = drm_to_adev(dev);
5141 struct dm_connector_state *dm_state =
5142 to_dm_connector_state(state);
5145 if (property == dev->mode_config.scaling_mode_property) {
5146 switch (dm_state->scaling) {
5148 *val = DRM_MODE_SCALE_CENTER;
5151 *val = DRM_MODE_SCALE_ASPECT;
5154 *val = DRM_MODE_SCALE_FULLSCREEN;
5158 *val = DRM_MODE_SCALE_NONE;
5162 } else if (property == adev->mode_info.underscan_hborder_property) {
5163 *val = dm_state->underscan_hborder;
5165 } else if (property == adev->mode_info.underscan_vborder_property) {
5166 *val = dm_state->underscan_vborder;
5168 } else if (property == adev->mode_info.underscan_property) {
5169 *val = dm_state->underscan_enable;
5171 } else if (property == adev->mode_info.abm_level_property) {
5172 *val = dm_state->abm_level;
5179 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5181 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5183 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5186 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5188 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5189 const struct dc_link *link = aconnector->dc_link;
5190 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5191 struct amdgpu_display_manager *dm = &adev->dm;
5194 * Call only if mst_mgr was iniitalized before since it's not done
5195 * for all connector types.
5197 if (aconnector->mst_mgr.dev)
5198 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5200 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5201 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5203 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5204 link->type != dc_connection_none &&
5205 dm->backlight_dev) {
5206 backlight_device_unregister(dm->backlight_dev);
5207 dm->backlight_dev = NULL;
5211 if (aconnector->dc_em_sink)
5212 dc_sink_release(aconnector->dc_em_sink);
5213 aconnector->dc_em_sink = NULL;
5214 if (aconnector->dc_sink)
5215 dc_sink_release(aconnector->dc_sink);
5216 aconnector->dc_sink = NULL;
5218 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5219 drm_connector_unregister(connector);
5220 drm_connector_cleanup(connector);
5221 if (aconnector->i2c) {
5222 i2c_del_adapter(&aconnector->i2c->base);
5223 kfree(aconnector->i2c);
5225 kfree(aconnector->dm_dp_aux.aux.name);
5230 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5232 struct dm_connector_state *state =
5233 to_dm_connector_state(connector->state);
5235 if (connector->state)
5236 __drm_atomic_helper_connector_destroy_state(connector->state);
5240 state = kzalloc(sizeof(*state), GFP_KERNEL);
5243 state->scaling = RMX_OFF;
5244 state->underscan_enable = false;
5245 state->underscan_hborder = 0;
5246 state->underscan_vborder = 0;
5247 state->base.max_requested_bpc = 8;
5248 state->vcpi_slots = 0;
5250 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5251 state->abm_level = amdgpu_dm_abm_level;
5253 __drm_atomic_helper_connector_reset(connector, &state->base);
5257 struct drm_connector_state *
5258 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5260 struct dm_connector_state *state =
5261 to_dm_connector_state(connector->state);
5263 struct dm_connector_state *new_state =
5264 kmemdup(state, sizeof(*state), GFP_KERNEL);
5269 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5271 new_state->freesync_capable = state->freesync_capable;
5272 new_state->abm_level = state->abm_level;
5273 new_state->scaling = state->scaling;
5274 new_state->underscan_enable = state->underscan_enable;
5275 new_state->underscan_hborder = state->underscan_hborder;
5276 new_state->underscan_vborder = state->underscan_vborder;
5277 new_state->vcpi_slots = state->vcpi_slots;
5278 new_state->pbn = state->pbn;
5279 return &new_state->base;
5283 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5285 struct amdgpu_dm_connector *amdgpu_dm_connector =
5286 to_amdgpu_dm_connector(connector);
5289 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5290 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5291 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5292 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5297 #if defined(CONFIG_DEBUG_FS)
5298 connector_debugfs_init(amdgpu_dm_connector);
5304 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5305 .reset = amdgpu_dm_connector_funcs_reset,
5306 .detect = amdgpu_dm_connector_detect,
5307 .fill_modes = drm_helper_probe_single_connector_modes,
5308 .destroy = amdgpu_dm_connector_destroy,
5309 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5310 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5311 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5312 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5313 .late_register = amdgpu_dm_connector_late_register,
5314 .early_unregister = amdgpu_dm_connector_unregister
5317 static int get_modes(struct drm_connector *connector)
5319 return amdgpu_dm_connector_get_modes(connector);
5322 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5324 struct dc_sink_init_data init_params = {
5325 .link = aconnector->dc_link,
5326 .sink_signal = SIGNAL_TYPE_VIRTUAL
5330 if (!aconnector->base.edid_blob_ptr) {
5331 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5332 aconnector->base.name);
5334 aconnector->base.force = DRM_FORCE_OFF;
5335 aconnector->base.override_edid = false;
5339 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5341 aconnector->edid = edid;
5343 aconnector->dc_em_sink = dc_link_add_remote_sink(
5344 aconnector->dc_link,
5346 (edid->extensions + 1) * EDID_LENGTH,
5349 if (aconnector->base.force == DRM_FORCE_ON) {
5350 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5351 aconnector->dc_link->local_sink :
5352 aconnector->dc_em_sink;
5353 dc_sink_retain(aconnector->dc_sink);
5357 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5359 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5362 * In case of headless boot with force on for DP managed connector
5363 * Those settings have to be != 0 to get initial modeset
5365 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5366 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5367 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5371 aconnector->base.override_edid = true;
5372 create_eml_sink(aconnector);
5375 static struct dc_stream_state *
5376 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5377 const struct drm_display_mode *drm_mode,
5378 const struct dm_connector_state *dm_state,
5379 const struct dc_stream_state *old_stream)
5381 struct drm_connector *connector = &aconnector->base;
5382 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5383 struct dc_stream_state *stream;
5384 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5385 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5386 enum dc_status dc_result = DC_OK;
5389 stream = create_stream_for_sink(aconnector, drm_mode,
5390 dm_state, old_stream,
5392 if (stream == NULL) {
5393 DRM_ERROR("Failed to create stream for sink!\n");
5397 dc_result = dc_validate_stream(adev->dm.dc, stream);
5399 if (dc_result != DC_OK) {
5400 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5405 dc_status_to_str(dc_result));
5407 dc_stream_release(stream);
5409 requested_bpc -= 2; /* lower bpc to retry validation */
5412 } while (stream == NULL && requested_bpc >= 6);
5417 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5418 struct drm_display_mode *mode)
5420 int result = MODE_ERROR;
5421 struct dc_sink *dc_sink;
5422 /* TODO: Unhardcode stream count */
5423 struct dc_stream_state *stream;
5424 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5426 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5427 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5431 * Only run this the first time mode_valid is called to initilialize
5434 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5435 !aconnector->dc_em_sink)
5436 handle_edid_mgmt(aconnector);
5438 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5440 if (dc_sink == NULL) {
5441 DRM_ERROR("dc_sink is NULL!\n");
5445 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5447 dc_stream_release(stream);
5452 /* TODO: error handling*/
5456 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5457 struct dc_info_packet *out)
5459 struct hdmi_drm_infoframe frame;
5460 unsigned char buf[30]; /* 26 + 4 */
5464 memset(out, 0, sizeof(*out));
5466 if (!state->hdr_output_metadata)
5469 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5473 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5477 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5481 /* Prepare the infopacket for DC. */
5482 switch (state->connector->connector_type) {
5483 case DRM_MODE_CONNECTOR_HDMIA:
5484 out->hb0 = 0x87; /* type */
5485 out->hb1 = 0x01; /* version */
5486 out->hb2 = 0x1A; /* length */
5487 out->sb[0] = buf[3]; /* checksum */
5491 case DRM_MODE_CONNECTOR_DisplayPort:
5492 case DRM_MODE_CONNECTOR_eDP:
5493 out->hb0 = 0x00; /* sdp id, zero */
5494 out->hb1 = 0x87; /* type */
5495 out->hb2 = 0x1D; /* payload len - 1 */
5496 out->hb3 = (0x13 << 2); /* sdp version */
5497 out->sb[0] = 0x01; /* version */
5498 out->sb[1] = 0x1A; /* length */
5506 memcpy(&out->sb[i], &buf[4], 26);
5509 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5510 sizeof(out->sb), false);
5516 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5517 const struct drm_connector_state *new_state)
5519 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5520 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5522 if (old_blob != new_blob) {
5523 if (old_blob && new_blob &&
5524 old_blob->length == new_blob->length)
5525 return memcmp(old_blob->data, new_blob->data,
5535 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5536 struct drm_atomic_state *state)
5538 struct drm_connector_state *new_con_state =
5539 drm_atomic_get_new_connector_state(state, conn);
5540 struct drm_connector_state *old_con_state =
5541 drm_atomic_get_old_connector_state(state, conn);
5542 struct drm_crtc *crtc = new_con_state->crtc;
5543 struct drm_crtc_state *new_crtc_state;
5549 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5550 struct dc_info_packet hdr_infopacket;
5552 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5556 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5557 if (IS_ERR(new_crtc_state))
5558 return PTR_ERR(new_crtc_state);
5561 * DC considers the stream backends changed if the
5562 * static metadata changes. Forcing the modeset also
5563 * gives a simple way for userspace to switch from
5564 * 8bpc to 10bpc when setting the metadata to enter
5567 * Changing the static metadata after it's been
5568 * set is permissible, however. So only force a
5569 * modeset if we're entering or exiting HDR.
5571 new_crtc_state->mode_changed =
5572 !old_con_state->hdr_output_metadata ||
5573 !new_con_state->hdr_output_metadata;
5579 static const struct drm_connector_helper_funcs
5580 amdgpu_dm_connector_helper_funcs = {
5582 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5583 * modes will be filtered by drm_mode_validate_size(), and those modes
5584 * are missing after user start lightdm. So we need to renew modes list.
5585 * in get_modes call back, not just return the modes count
5587 .get_modes = get_modes,
5588 .mode_valid = amdgpu_dm_connector_mode_valid,
5589 .atomic_check = amdgpu_dm_connector_atomic_check,
5592 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5596 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5598 struct drm_atomic_state *state = new_crtc_state->state;
5599 struct drm_plane *plane;
5602 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5603 struct drm_plane_state *new_plane_state;
5605 /* Cursor planes are "fake". */
5606 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5609 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5611 if (!new_plane_state) {
5613 * The plane is enable on the CRTC and hasn't changed
5614 * state. This means that it previously passed
5615 * validation and is therefore enabled.
5621 /* We need a framebuffer to be considered enabled. */
5622 num_active += (new_plane_state->fb != NULL);
5628 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5629 struct drm_crtc_state *new_crtc_state)
5631 struct dm_crtc_state *dm_new_crtc_state =
5632 to_dm_crtc_state(new_crtc_state);
5634 dm_new_crtc_state->active_planes = 0;
5636 if (!dm_new_crtc_state->stream)
5639 dm_new_crtc_state->active_planes =
5640 count_crtc_active_planes(new_crtc_state);
5643 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5644 struct drm_crtc_state *state)
5646 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5647 struct dc *dc = adev->dm.dc;
5648 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5651 dm_update_crtc_active_planes(crtc, state);
5653 if (unlikely(!dm_crtc_state->stream &&
5654 modeset_required(state, NULL, dm_crtc_state->stream))) {
5660 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5661 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5662 * planes are disabled, which is not supported by the hardware. And there is legacy
5663 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5665 if (state->enable &&
5666 !(state->plane_mask & drm_plane_mask(crtc->primary)))
5669 /* In some use cases, like reset, no stream is attached */
5670 if (!dm_crtc_state->stream)
5673 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5679 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5680 const struct drm_display_mode *mode,
5681 struct drm_display_mode *adjusted_mode)
5686 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5687 .disable = dm_crtc_helper_disable,
5688 .atomic_check = dm_crtc_helper_atomic_check,
5689 .mode_fixup = dm_crtc_helper_mode_fixup,
5690 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5693 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5698 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5700 switch (display_color_depth) {
5701 case COLOR_DEPTH_666:
5703 case COLOR_DEPTH_888:
5705 case COLOR_DEPTH_101010:
5707 case COLOR_DEPTH_121212:
5709 case COLOR_DEPTH_141414:
5711 case COLOR_DEPTH_161616:
5719 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5720 struct drm_crtc_state *crtc_state,
5721 struct drm_connector_state *conn_state)
5723 struct drm_atomic_state *state = crtc_state->state;
5724 struct drm_connector *connector = conn_state->connector;
5725 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5726 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5727 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5728 struct drm_dp_mst_topology_mgr *mst_mgr;
5729 struct drm_dp_mst_port *mst_port;
5730 enum dc_color_depth color_depth;
5732 bool is_y420 = false;
5734 if (!aconnector->port || !aconnector->dc_sink)
5737 mst_port = aconnector->port;
5738 mst_mgr = &aconnector->mst_port->mst_mgr;
5740 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5743 if (!state->duplicated) {
5744 int max_bpc = conn_state->max_requested_bpc;
5745 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5746 aconnector->force_yuv420_output;
5747 color_depth = convert_color_depth_from_display_info(connector,
5750 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5751 clock = adjusted_mode->clock;
5752 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5754 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5757 dm_new_connector_state->pbn,
5758 dm_mst_get_pbn_divider(aconnector->dc_link));
5759 if (dm_new_connector_state->vcpi_slots < 0) {
5760 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5761 return dm_new_connector_state->vcpi_slots;
5766 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5767 .disable = dm_encoder_helper_disable,
5768 .atomic_check = dm_encoder_helper_atomic_check
5771 #if defined(CONFIG_DRM_AMD_DC_DCN)
5772 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5773 struct dc_state *dc_state)
5775 struct dc_stream_state *stream = NULL;
5776 struct drm_connector *connector;
5777 struct drm_connector_state *new_con_state, *old_con_state;
5778 struct amdgpu_dm_connector *aconnector;
5779 struct dm_connector_state *dm_conn_state;
5780 int i, j, clock, bpp;
5781 int vcpi, pbn_div, pbn = 0;
5783 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5785 aconnector = to_amdgpu_dm_connector(connector);
5787 if (!aconnector->port)
5790 if (!new_con_state || !new_con_state->crtc)
5793 dm_conn_state = to_dm_connector_state(new_con_state);
5795 for (j = 0; j < dc_state->stream_count; j++) {
5796 stream = dc_state->streams[j];
5800 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5809 if (stream->timing.flags.DSC != 1) {
5810 drm_dp_mst_atomic_enable_dsc(state,
5818 pbn_div = dm_mst_get_pbn_divider(stream->link);
5819 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5820 clock = stream->timing.pix_clk_100hz / 10;
5821 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5822 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5829 dm_conn_state->pbn = pbn;
5830 dm_conn_state->vcpi_slots = vcpi;
5836 static void dm_drm_plane_reset(struct drm_plane *plane)
5838 struct dm_plane_state *amdgpu_state = NULL;
5841 plane->funcs->atomic_destroy_state(plane, plane->state);
5843 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5844 WARN_ON(amdgpu_state == NULL);
5847 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5850 static struct drm_plane_state *
5851 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5853 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5855 old_dm_plane_state = to_dm_plane_state(plane->state);
5856 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5857 if (!dm_plane_state)
5860 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5862 if (old_dm_plane_state->dc_state) {
5863 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5864 dc_plane_state_retain(dm_plane_state->dc_state);
5867 /* Framebuffer hasn't been updated yet, so retain old flags. */
5868 dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5869 dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5871 return &dm_plane_state->base;
5874 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5875 struct drm_plane_state *state)
5877 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5879 if (dm_plane_state->dc_state)
5880 dc_plane_state_release(dm_plane_state->dc_state);
5882 drm_atomic_helper_plane_destroy_state(plane, state);
5885 static const struct drm_plane_funcs dm_plane_funcs = {
5886 .update_plane = drm_atomic_helper_update_plane,
5887 .disable_plane = drm_atomic_helper_disable_plane,
5888 .destroy = drm_primary_helper_destroy,
5889 .reset = dm_drm_plane_reset,
5890 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5891 .atomic_destroy_state = dm_drm_plane_destroy_state,
5894 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5895 struct drm_plane_state *new_state)
5897 struct amdgpu_framebuffer *afb;
5898 struct drm_gem_object *obj;
5899 struct amdgpu_device *adev;
5900 struct amdgpu_bo *rbo;
5901 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5902 struct list_head list;
5903 struct ttm_validate_buffer tv;
5904 struct ww_acquire_ctx ticket;
5908 if (!new_state->fb) {
5909 DRM_DEBUG_DRIVER("No FB bound\n");
5913 afb = to_amdgpu_framebuffer(new_state->fb);
5914 obj = new_state->fb->obj[0];
5915 rbo = gem_to_amdgpu_bo(obj);
5916 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5917 INIT_LIST_HEAD(&list);
5921 list_add(&tv.head, &list);
5923 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5925 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5929 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5930 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5932 domain = AMDGPU_GEM_DOMAIN_VRAM;
5934 r = amdgpu_bo_pin(rbo, domain);
5935 if (unlikely(r != 0)) {
5936 if (r != -ERESTARTSYS)
5937 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5938 ttm_eu_backoff_reservation(&ticket, &list);
5942 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5943 if (unlikely(r != 0)) {
5944 amdgpu_bo_unpin(rbo);
5945 ttm_eu_backoff_reservation(&ticket, &list);
5946 DRM_ERROR("%p bind failed\n", rbo);
5950 ttm_eu_backoff_reservation(&ticket, &list);
5952 afb->address = amdgpu_bo_gpu_offset(rbo);
5957 * We don't do surface updates on planes that have been newly created,
5958 * but we also don't have the afb->address during atomic check.
5960 * Fill in buffer attributes depending on the address here, but only on
5961 * newly created planes since they're not being used by DC yet and this
5962 * won't modify global state.
5964 dm_plane_state_old = to_dm_plane_state(plane->state);
5965 dm_plane_state_new = to_dm_plane_state(new_state);
5967 if (dm_plane_state_new->dc_state &&
5968 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5969 struct dc_plane_state *plane_state =
5970 dm_plane_state_new->dc_state;
5971 bool force_disable_dcc = !plane_state->dcc.enable;
5973 fill_plane_buffer_attributes(
5974 adev, afb, plane_state->format, plane_state->rotation,
5975 dm_plane_state_new->tiling_flags,
5976 &plane_state->tiling_info, &plane_state->plane_size,
5977 &plane_state->dcc, &plane_state->address,
5978 dm_plane_state_new->tmz_surface, force_disable_dcc);
5984 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5985 struct drm_plane_state *old_state)
5987 struct amdgpu_bo *rbo;
5993 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5994 r = amdgpu_bo_reserve(rbo, false);
5996 DRM_ERROR("failed to reserve rbo before unpin\n");
6000 amdgpu_bo_unpin(rbo);
6001 amdgpu_bo_unreserve(rbo);
6002 amdgpu_bo_unref(&rbo);
6005 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6006 struct drm_crtc_state *new_crtc_state)
6008 int max_downscale = 0;
6009 int max_upscale = INT_MAX;
6011 /* TODO: These should be checked against DC plane caps */
6012 return drm_atomic_helper_check_plane_state(
6013 state, new_crtc_state, max_downscale, max_upscale, true, true);
6016 static int dm_plane_atomic_check(struct drm_plane *plane,
6017 struct drm_plane_state *state)
6019 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6020 struct dc *dc = adev->dm.dc;
6021 struct dm_plane_state *dm_plane_state;
6022 struct dc_scaling_info scaling_info;
6023 struct drm_crtc_state *new_crtc_state;
6026 dm_plane_state = to_dm_plane_state(state);
6028 if (!dm_plane_state->dc_state)
6032 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6033 if (!new_crtc_state)
6036 ret = dm_plane_helper_check_state(state, new_crtc_state);
6040 ret = fill_dc_scaling_info(state, &scaling_info);
6044 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6050 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6051 struct drm_plane_state *new_plane_state)
6053 /* Only support async updates on cursor planes. */
6054 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6060 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6061 struct drm_plane_state *new_state)
6063 struct drm_plane_state *old_state =
6064 drm_atomic_get_old_plane_state(new_state->state, plane);
6066 swap(plane->state->fb, new_state->fb);
6068 plane->state->src_x = new_state->src_x;
6069 plane->state->src_y = new_state->src_y;
6070 plane->state->src_w = new_state->src_w;
6071 plane->state->src_h = new_state->src_h;
6072 plane->state->crtc_x = new_state->crtc_x;
6073 plane->state->crtc_y = new_state->crtc_y;
6074 plane->state->crtc_w = new_state->crtc_w;
6075 plane->state->crtc_h = new_state->crtc_h;
6077 handle_cursor_update(plane, old_state);
6080 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6081 .prepare_fb = dm_plane_helper_prepare_fb,
6082 .cleanup_fb = dm_plane_helper_cleanup_fb,
6083 .atomic_check = dm_plane_atomic_check,
6084 .atomic_async_check = dm_plane_atomic_async_check,
6085 .atomic_async_update = dm_plane_atomic_async_update
6089 * TODO: these are currently initialized to rgb formats only.
6090 * For future use cases we should either initialize them dynamically based on
6091 * plane capabilities, or initialize this array to all formats, so internal drm
6092 * check will succeed, and let DC implement proper check
6094 static const uint32_t rgb_formats[] = {
6095 DRM_FORMAT_XRGB8888,
6096 DRM_FORMAT_ARGB8888,
6097 DRM_FORMAT_RGBA8888,
6098 DRM_FORMAT_XRGB2101010,
6099 DRM_FORMAT_XBGR2101010,
6100 DRM_FORMAT_ARGB2101010,
6101 DRM_FORMAT_ABGR2101010,
6102 DRM_FORMAT_XBGR8888,
6103 DRM_FORMAT_ABGR8888,
6107 static const uint32_t overlay_formats[] = {
6108 DRM_FORMAT_XRGB8888,
6109 DRM_FORMAT_ARGB8888,
6110 DRM_FORMAT_RGBA8888,
6111 DRM_FORMAT_XBGR8888,
6112 DRM_FORMAT_ABGR8888,
6116 static const u32 cursor_formats[] = {
6120 static int get_plane_formats(const struct drm_plane *plane,
6121 const struct dc_plane_cap *plane_cap,
6122 uint32_t *formats, int max_formats)
6124 int i, num_formats = 0;
6127 * TODO: Query support for each group of formats directly from
6128 * DC plane caps. This will require adding more formats to the
6132 switch (plane->type) {
6133 case DRM_PLANE_TYPE_PRIMARY:
6134 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6135 if (num_formats >= max_formats)
6138 formats[num_formats++] = rgb_formats[i];
6141 if (plane_cap && plane_cap->pixel_format_support.nv12)
6142 formats[num_formats++] = DRM_FORMAT_NV12;
6143 if (plane_cap && plane_cap->pixel_format_support.p010)
6144 formats[num_formats++] = DRM_FORMAT_P010;
6145 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6146 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6147 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6148 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6149 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6153 case DRM_PLANE_TYPE_OVERLAY:
6154 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6155 if (num_formats >= max_formats)
6158 formats[num_formats++] = overlay_formats[i];
6162 case DRM_PLANE_TYPE_CURSOR:
6163 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6164 if (num_formats >= max_formats)
6167 formats[num_formats++] = cursor_formats[i];
6175 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6176 struct drm_plane *plane,
6177 unsigned long possible_crtcs,
6178 const struct dc_plane_cap *plane_cap)
6180 uint32_t formats[32];
6183 unsigned int supported_rotations;
6185 num_formats = get_plane_formats(plane, plane_cap, formats,
6186 ARRAY_SIZE(formats));
6188 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6189 &dm_plane_funcs, formats, num_formats,
6190 NULL, plane->type, NULL);
6194 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6195 plane_cap && plane_cap->per_pixel_alpha) {
6196 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6197 BIT(DRM_MODE_BLEND_PREMULTI);
6199 drm_plane_create_alpha_property(plane);
6200 drm_plane_create_blend_mode_property(plane, blend_caps);
6203 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6205 (plane_cap->pixel_format_support.nv12 ||
6206 plane_cap->pixel_format_support.p010)) {
6207 /* This only affects YUV formats. */
6208 drm_plane_create_color_properties(
6210 BIT(DRM_COLOR_YCBCR_BT601) |
6211 BIT(DRM_COLOR_YCBCR_BT709) |
6212 BIT(DRM_COLOR_YCBCR_BT2020),
6213 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6214 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6215 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6218 supported_rotations =
6219 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6220 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6222 if (dm->adev->asic_type >= CHIP_BONAIRE)
6223 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6224 supported_rotations);
6226 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6228 /* Create (reset) the plane state */
6229 if (plane->funcs->reset)
6230 plane->funcs->reset(plane);
6235 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6236 struct drm_plane *plane,
6237 uint32_t crtc_index)
6239 struct amdgpu_crtc *acrtc = NULL;
6240 struct drm_plane *cursor_plane;
6244 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6248 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6249 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6251 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6255 res = drm_crtc_init_with_planes(
6260 &amdgpu_dm_crtc_funcs, NULL);
6265 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6267 /* Create (reset) the plane state */
6268 if (acrtc->base.funcs->reset)
6269 acrtc->base.funcs->reset(&acrtc->base);
6271 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6272 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6274 acrtc->crtc_id = crtc_index;
6275 acrtc->base.enabled = false;
6276 acrtc->otg_inst = -1;
6278 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6279 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6280 true, MAX_COLOR_LUT_ENTRIES);
6281 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6287 kfree(cursor_plane);
6292 static int to_drm_connector_type(enum signal_type st)
6295 case SIGNAL_TYPE_HDMI_TYPE_A:
6296 return DRM_MODE_CONNECTOR_HDMIA;
6297 case SIGNAL_TYPE_EDP:
6298 return DRM_MODE_CONNECTOR_eDP;
6299 case SIGNAL_TYPE_LVDS:
6300 return DRM_MODE_CONNECTOR_LVDS;
6301 case SIGNAL_TYPE_RGB:
6302 return DRM_MODE_CONNECTOR_VGA;
6303 case SIGNAL_TYPE_DISPLAY_PORT:
6304 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6305 return DRM_MODE_CONNECTOR_DisplayPort;
6306 case SIGNAL_TYPE_DVI_DUAL_LINK:
6307 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6308 return DRM_MODE_CONNECTOR_DVID;
6309 case SIGNAL_TYPE_VIRTUAL:
6310 return DRM_MODE_CONNECTOR_VIRTUAL;
6313 return DRM_MODE_CONNECTOR_Unknown;
6317 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6319 struct drm_encoder *encoder;
6321 /* There is only one encoder per connector */
6322 drm_connector_for_each_possible_encoder(connector, encoder)
6328 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6330 struct drm_encoder *encoder;
6331 struct amdgpu_encoder *amdgpu_encoder;
6333 encoder = amdgpu_dm_connector_to_encoder(connector);
6335 if (encoder == NULL)
6338 amdgpu_encoder = to_amdgpu_encoder(encoder);
6340 amdgpu_encoder->native_mode.clock = 0;
6342 if (!list_empty(&connector->probed_modes)) {
6343 struct drm_display_mode *preferred_mode = NULL;
6345 list_for_each_entry(preferred_mode,
6346 &connector->probed_modes,
6348 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6349 amdgpu_encoder->native_mode = *preferred_mode;
6357 static struct drm_display_mode *
6358 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6360 int hdisplay, int vdisplay)
6362 struct drm_device *dev = encoder->dev;
6363 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6364 struct drm_display_mode *mode = NULL;
6365 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6367 mode = drm_mode_duplicate(dev, native_mode);
6372 mode->hdisplay = hdisplay;
6373 mode->vdisplay = vdisplay;
6374 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6375 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6381 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6382 struct drm_connector *connector)
6384 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6385 struct drm_display_mode *mode = NULL;
6386 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6387 struct amdgpu_dm_connector *amdgpu_dm_connector =
6388 to_amdgpu_dm_connector(connector);
6392 char name[DRM_DISPLAY_MODE_LEN];
6395 } common_modes[] = {
6396 { "640x480", 640, 480},
6397 { "800x600", 800, 600},
6398 { "1024x768", 1024, 768},
6399 { "1280x720", 1280, 720},
6400 { "1280x800", 1280, 800},
6401 {"1280x1024", 1280, 1024},
6402 { "1440x900", 1440, 900},
6403 {"1680x1050", 1680, 1050},
6404 {"1600x1200", 1600, 1200},
6405 {"1920x1080", 1920, 1080},
6406 {"1920x1200", 1920, 1200}
6409 n = ARRAY_SIZE(common_modes);
6411 for (i = 0; i < n; i++) {
6412 struct drm_display_mode *curmode = NULL;
6413 bool mode_existed = false;
6415 if (common_modes[i].w > native_mode->hdisplay ||
6416 common_modes[i].h > native_mode->vdisplay ||
6417 (common_modes[i].w == native_mode->hdisplay &&
6418 common_modes[i].h == native_mode->vdisplay))
6421 list_for_each_entry(curmode, &connector->probed_modes, head) {
6422 if (common_modes[i].w == curmode->hdisplay &&
6423 common_modes[i].h == curmode->vdisplay) {
6424 mode_existed = true;
6432 mode = amdgpu_dm_create_common_mode(encoder,
6433 common_modes[i].name, common_modes[i].w,
6435 drm_mode_probed_add(connector, mode);
6436 amdgpu_dm_connector->num_modes++;
6440 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6443 struct amdgpu_dm_connector *amdgpu_dm_connector =
6444 to_amdgpu_dm_connector(connector);
6447 /* empty probed_modes */
6448 INIT_LIST_HEAD(&connector->probed_modes);
6449 amdgpu_dm_connector->num_modes =
6450 drm_add_edid_modes(connector, edid);
6452 /* sorting the probed modes before calling function
6453 * amdgpu_dm_get_native_mode() since EDID can have
6454 * more than one preferred mode. The modes that are
6455 * later in the probed mode list could be of higher
6456 * and preferred resolution. For example, 3840x2160
6457 * resolution in base EDID preferred timing and 4096x2160
6458 * preferred resolution in DID extension block later.
6460 drm_mode_sort(&connector->probed_modes);
6461 amdgpu_dm_get_native_mode(connector);
6463 amdgpu_dm_connector->num_modes = 0;
6467 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6469 struct amdgpu_dm_connector *amdgpu_dm_connector =
6470 to_amdgpu_dm_connector(connector);
6471 struct drm_encoder *encoder;
6472 struct edid *edid = amdgpu_dm_connector->edid;
6474 encoder = amdgpu_dm_connector_to_encoder(connector);
6476 if (!edid || !drm_edid_is_valid(edid)) {
6477 amdgpu_dm_connector->num_modes =
6478 drm_add_modes_noedid(connector, 640, 480);
6480 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6481 amdgpu_dm_connector_add_common_modes(encoder, connector);
6483 amdgpu_dm_fbc_init(connector);
6485 return amdgpu_dm_connector->num_modes;
6488 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6489 struct amdgpu_dm_connector *aconnector,
6491 struct dc_link *link,
6494 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6497 * Some of the properties below require access to state, like bpc.
6498 * Allocate some default initial connector state with our reset helper.
6500 if (aconnector->base.funcs->reset)
6501 aconnector->base.funcs->reset(&aconnector->base);
6503 aconnector->connector_id = link_index;
6504 aconnector->dc_link = link;
6505 aconnector->base.interlace_allowed = false;
6506 aconnector->base.doublescan_allowed = false;
6507 aconnector->base.stereo_allowed = false;
6508 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6509 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6510 aconnector->audio_inst = -1;
6511 mutex_init(&aconnector->hpd_lock);
6514 * configure support HPD hot plug connector_>polled default value is 0
6515 * which means HPD hot plug not supported
6517 switch (connector_type) {
6518 case DRM_MODE_CONNECTOR_HDMIA:
6519 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6520 aconnector->base.ycbcr_420_allowed =
6521 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6523 case DRM_MODE_CONNECTOR_DisplayPort:
6524 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6525 aconnector->base.ycbcr_420_allowed =
6526 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6528 case DRM_MODE_CONNECTOR_DVID:
6529 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6535 drm_object_attach_property(&aconnector->base.base,
6536 dm->ddev->mode_config.scaling_mode_property,
6537 DRM_MODE_SCALE_NONE);
6539 drm_object_attach_property(&aconnector->base.base,
6540 adev->mode_info.underscan_property,
6542 drm_object_attach_property(&aconnector->base.base,
6543 adev->mode_info.underscan_hborder_property,
6545 drm_object_attach_property(&aconnector->base.base,
6546 adev->mode_info.underscan_vborder_property,
6549 if (!aconnector->mst_port)
6550 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6552 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6553 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6554 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6556 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6557 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6558 drm_object_attach_property(&aconnector->base.base,
6559 adev->mode_info.abm_level_property, 0);
6562 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6563 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6564 connector_type == DRM_MODE_CONNECTOR_eDP) {
6565 drm_object_attach_property(
6566 &aconnector->base.base,
6567 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6569 if (!aconnector->mst_port)
6570 drm_connector_attach_vrr_capable_property(&aconnector->base);
6572 #ifdef CONFIG_DRM_AMD_DC_HDCP
6573 if (adev->dm.hdcp_workqueue)
6574 drm_connector_attach_content_protection_property(&aconnector->base, true);
6579 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6580 struct i2c_msg *msgs, int num)
6582 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6583 struct ddc_service *ddc_service = i2c->ddc_service;
6584 struct i2c_command cmd;
6588 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6593 cmd.number_of_payloads = num;
6594 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6597 for (i = 0; i < num; i++) {
6598 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6599 cmd.payloads[i].address = msgs[i].addr;
6600 cmd.payloads[i].length = msgs[i].len;
6601 cmd.payloads[i].data = msgs[i].buf;
6605 ddc_service->ctx->dc,
6606 ddc_service->ddc_pin->hw_info.ddc_channel,
6610 kfree(cmd.payloads);
6614 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6616 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6619 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6620 .master_xfer = amdgpu_dm_i2c_xfer,
6621 .functionality = amdgpu_dm_i2c_func,
6624 static struct amdgpu_i2c_adapter *
6625 create_i2c(struct ddc_service *ddc_service,
6629 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6630 struct amdgpu_i2c_adapter *i2c;
6632 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6635 i2c->base.owner = THIS_MODULE;
6636 i2c->base.class = I2C_CLASS_DDC;
6637 i2c->base.dev.parent = &adev->pdev->dev;
6638 i2c->base.algo = &amdgpu_dm_i2c_algo;
6639 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6640 i2c_set_adapdata(&i2c->base, i2c);
6641 i2c->ddc_service = ddc_service;
6642 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6649 * Note: this function assumes that dc_link_detect() was called for the
6650 * dc_link which will be represented by this aconnector.
6652 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6653 struct amdgpu_dm_connector *aconnector,
6654 uint32_t link_index,
6655 struct amdgpu_encoder *aencoder)
6659 struct dc *dc = dm->dc;
6660 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6661 struct amdgpu_i2c_adapter *i2c;
6663 link->priv = aconnector;
6665 DRM_DEBUG_DRIVER("%s()\n", __func__);
6667 i2c = create_i2c(link->ddc, link->link_index, &res);
6669 DRM_ERROR("Failed to create i2c adapter data\n");
6673 aconnector->i2c = i2c;
6674 res = i2c_add_adapter(&i2c->base);
6677 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6681 connector_type = to_drm_connector_type(link->connector_signal);
6683 res = drm_connector_init_with_ddc(
6686 &amdgpu_dm_connector_funcs,
6691 DRM_ERROR("connector_init failed\n");
6692 aconnector->connector_id = -1;
6696 drm_connector_helper_add(
6698 &amdgpu_dm_connector_helper_funcs);
6700 amdgpu_dm_connector_init_helper(
6707 drm_connector_attach_encoder(
6708 &aconnector->base, &aencoder->base);
6710 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6711 || connector_type == DRM_MODE_CONNECTOR_eDP)
6712 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6717 aconnector->i2c = NULL;
6722 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6724 switch (adev->mode_info.num_crtc) {
6741 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6742 struct amdgpu_encoder *aencoder,
6743 uint32_t link_index)
6745 struct amdgpu_device *adev = drm_to_adev(dev);
6747 int res = drm_encoder_init(dev,
6749 &amdgpu_dm_encoder_funcs,
6750 DRM_MODE_ENCODER_TMDS,
6753 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6756 aencoder->encoder_id = link_index;
6758 aencoder->encoder_id = -1;
6760 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6765 static void manage_dm_interrupts(struct amdgpu_device *adev,
6766 struct amdgpu_crtc *acrtc,
6770 * We have no guarantee that the frontend index maps to the same
6771 * backend index - some even map to more than one.
6773 * TODO: Use a different interrupt or check DC itself for the mapping.
6776 amdgpu_display_crtc_idx_to_irq_type(
6781 drm_crtc_vblank_on(&acrtc->base);
6784 &adev->pageflip_irq,
6790 &adev->pageflip_irq,
6792 drm_crtc_vblank_off(&acrtc->base);
6796 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6797 struct amdgpu_crtc *acrtc)
6800 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6803 * This reads the current state for the IRQ and force reapplies
6804 * the setting to hardware.
6806 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6810 is_scaling_state_different(const struct dm_connector_state *dm_state,
6811 const struct dm_connector_state *old_dm_state)
6813 if (dm_state->scaling != old_dm_state->scaling)
6815 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6816 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6818 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6819 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6821 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6822 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6827 #ifdef CONFIG_DRM_AMD_DC_HDCP
6828 static bool is_content_protection_different(struct drm_connector_state *state,
6829 const struct drm_connector_state *old_state,
6830 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6832 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6834 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6835 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6836 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6840 /* CP is being re enabled, ignore this */
6841 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6842 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6843 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6847 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6848 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6849 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6850 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6852 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6853 * hot-plug, headless s3, dpms
6855 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6856 aconnector->dc_sink != NULL)
6859 if (old_state->content_protection == state->content_protection)
6862 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6869 static void remove_stream(struct amdgpu_device *adev,
6870 struct amdgpu_crtc *acrtc,
6871 struct dc_stream_state *stream)
6873 /* this is the update mode case */
6875 acrtc->otg_inst = -1;
6876 acrtc->enabled = false;
6879 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6880 struct dc_cursor_position *position)
6882 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6884 int xorigin = 0, yorigin = 0;
6886 position->enable = false;
6890 if (!crtc || !plane->state->fb)
6893 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6894 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6895 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6897 plane->state->crtc_w,
6898 plane->state->crtc_h);
6902 x = plane->state->crtc_x;
6903 y = plane->state->crtc_y;
6905 if (x <= -amdgpu_crtc->max_cursor_width ||
6906 y <= -amdgpu_crtc->max_cursor_height)
6910 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6914 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6917 position->enable = true;
6918 position->translate_by_source = true;
6921 position->x_hotspot = xorigin;
6922 position->y_hotspot = yorigin;
6927 static void handle_cursor_update(struct drm_plane *plane,
6928 struct drm_plane_state *old_plane_state)
6930 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6931 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6932 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6933 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6934 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6935 uint64_t address = afb ? afb->address : 0;
6936 struct dc_cursor_position position;
6937 struct dc_cursor_attributes attributes;
6940 if (!plane->state->fb && !old_plane_state->fb)
6943 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6945 amdgpu_crtc->crtc_id,
6946 plane->state->crtc_w,
6947 plane->state->crtc_h);
6949 ret = get_cursor_position(plane, crtc, &position);
6953 if (!position.enable) {
6954 /* turn off cursor */
6955 if (crtc_state && crtc_state->stream) {
6956 mutex_lock(&adev->dm.dc_lock);
6957 dc_stream_set_cursor_position(crtc_state->stream,
6959 mutex_unlock(&adev->dm.dc_lock);
6964 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6965 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6967 memset(&attributes, 0, sizeof(attributes));
6968 attributes.address.high_part = upper_32_bits(address);
6969 attributes.address.low_part = lower_32_bits(address);
6970 attributes.width = plane->state->crtc_w;
6971 attributes.height = plane->state->crtc_h;
6972 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6973 attributes.rotation_angle = 0;
6974 attributes.attribute_flags.value = 0;
6976 attributes.pitch = attributes.width;
6978 if (crtc_state->stream) {
6979 mutex_lock(&adev->dm.dc_lock);
6980 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6982 DRM_ERROR("DC failed to set cursor attributes\n");
6984 if (!dc_stream_set_cursor_position(crtc_state->stream,
6986 DRM_ERROR("DC failed to set cursor position\n");
6987 mutex_unlock(&adev->dm.dc_lock);
6991 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6994 assert_spin_locked(&acrtc->base.dev->event_lock);
6995 WARN_ON(acrtc->event);
6997 acrtc->event = acrtc->base.state->event;
6999 /* Set the flip status */
7000 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7002 /* Mark this event as consumed */
7003 acrtc->base.state->event = NULL;
7005 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7009 static void update_freesync_state_on_stream(
7010 struct amdgpu_display_manager *dm,
7011 struct dm_crtc_state *new_crtc_state,
7012 struct dc_stream_state *new_stream,
7013 struct dc_plane_state *surface,
7014 u32 flip_timestamp_in_us)
7016 struct mod_vrr_params vrr_params;
7017 struct dc_info_packet vrr_infopacket = {0};
7018 struct amdgpu_device *adev = dm->adev;
7019 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7020 unsigned long flags;
7026 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7027 * For now it's sufficient to just guard against these conditions.
7030 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7033 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7034 vrr_params = acrtc->dm_irq_params.vrr_params;
7037 mod_freesync_handle_preflip(
7038 dm->freesync_module,
7041 flip_timestamp_in_us,
7044 if (adev->family < AMDGPU_FAMILY_AI &&
7045 amdgpu_dm_vrr_active(new_crtc_state)) {
7046 mod_freesync_handle_v_update(dm->freesync_module,
7047 new_stream, &vrr_params);
7049 /* Need to call this before the frame ends. */
7050 dc_stream_adjust_vmin_vmax(dm->dc,
7051 new_crtc_state->stream,
7052 &vrr_params.adjust);
7056 mod_freesync_build_vrr_infopacket(
7057 dm->freesync_module,
7061 TRANSFER_FUNC_UNKNOWN,
7064 new_crtc_state->freesync_timing_changed |=
7065 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7067 sizeof(vrr_params.adjust)) != 0);
7069 new_crtc_state->freesync_vrr_info_changed |=
7070 (memcmp(&new_crtc_state->vrr_infopacket,
7072 sizeof(vrr_infopacket)) != 0);
7074 acrtc->dm_irq_params.vrr_params = vrr_params;
7075 new_crtc_state->vrr_infopacket = vrr_infopacket;
7077 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7078 new_stream->vrr_infopacket = vrr_infopacket;
7080 if (new_crtc_state->freesync_vrr_info_changed)
7081 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7082 new_crtc_state->base.crtc->base.id,
7083 (int)new_crtc_state->base.vrr_enabled,
7084 (int)vrr_params.state);
7086 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7089 static void update_stream_irq_parameters(
7090 struct amdgpu_display_manager *dm,
7091 struct dm_crtc_state *new_crtc_state)
7093 struct dc_stream_state *new_stream = new_crtc_state->stream;
7094 struct mod_vrr_params vrr_params;
7095 struct mod_freesync_config config = new_crtc_state->freesync_config;
7096 struct amdgpu_device *adev = dm->adev;
7097 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7098 unsigned long flags;
7104 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7105 * For now it's sufficient to just guard against these conditions.
7107 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7110 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7111 vrr_params = acrtc->dm_irq_params.vrr_params;
7113 if (new_crtc_state->vrr_supported &&
7114 config.min_refresh_in_uhz &&
7115 config.max_refresh_in_uhz) {
7116 config.state = new_crtc_state->base.vrr_enabled ?
7117 VRR_STATE_ACTIVE_VARIABLE :
7120 config.state = VRR_STATE_UNSUPPORTED;
7123 mod_freesync_build_vrr_params(dm->freesync_module,
7125 &config, &vrr_params);
7127 new_crtc_state->freesync_timing_changed |=
7128 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7129 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7131 new_crtc_state->freesync_config = config;
7132 /* Copy state for access from DM IRQ handler */
7133 acrtc->dm_irq_params.freesync_config = config;
7134 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7135 acrtc->dm_irq_params.vrr_params = vrr_params;
7136 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7139 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7140 struct dm_crtc_state *new_state)
7142 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7143 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7145 if (!old_vrr_active && new_vrr_active) {
7146 /* Transition VRR inactive -> active:
7147 * While VRR is active, we must not disable vblank irq, as a
7148 * reenable after disable would compute bogus vblank/pflip
7149 * timestamps if it likely happened inside display front-porch.
7151 * We also need vupdate irq for the actual core vblank handling
7154 dm_set_vupdate_irq(new_state->base.crtc, true);
7155 drm_crtc_vblank_get(new_state->base.crtc);
7156 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7157 __func__, new_state->base.crtc->base.id);
7158 } else if (old_vrr_active && !new_vrr_active) {
7159 /* Transition VRR active -> inactive:
7160 * Allow vblank irq disable again for fixed refresh rate.
7162 dm_set_vupdate_irq(new_state->base.crtc, false);
7163 drm_crtc_vblank_put(new_state->base.crtc);
7164 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7165 __func__, new_state->base.crtc->base.id);
7169 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7171 struct drm_plane *plane;
7172 struct drm_plane_state *old_plane_state, *new_plane_state;
7176 * TODO: Make this per-stream so we don't issue redundant updates for
7177 * commits with multiple streams.
7179 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7181 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7182 handle_cursor_update(plane, old_plane_state);
7185 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7186 struct dc_state *dc_state,
7187 struct drm_device *dev,
7188 struct amdgpu_display_manager *dm,
7189 struct drm_crtc *pcrtc,
7190 bool wait_for_vblank)
7193 uint64_t timestamp_ns;
7194 struct drm_plane *plane;
7195 struct drm_plane_state *old_plane_state, *new_plane_state;
7196 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7197 struct drm_crtc_state *new_pcrtc_state =
7198 drm_atomic_get_new_crtc_state(state, pcrtc);
7199 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7200 struct dm_crtc_state *dm_old_crtc_state =
7201 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7202 int planes_count = 0, vpos, hpos;
7204 unsigned long flags;
7205 struct amdgpu_bo *abo;
7206 uint32_t target_vblank, last_flip_vblank;
7207 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7208 bool pflip_present = false;
7210 struct dc_surface_update surface_updates[MAX_SURFACES];
7211 struct dc_plane_info plane_infos[MAX_SURFACES];
7212 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7213 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7214 struct dc_stream_update stream_update;
7217 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7220 dm_error("Failed to allocate update bundle\n");
7225 * Disable the cursor first if we're disabling all the planes.
7226 * It'll remain on the screen after the planes are re-enabled
7229 if (acrtc_state->active_planes == 0)
7230 amdgpu_dm_commit_cursors(state);
7232 /* update planes when needed */
7233 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7234 struct drm_crtc *crtc = new_plane_state->crtc;
7235 struct drm_crtc_state *new_crtc_state;
7236 struct drm_framebuffer *fb = new_plane_state->fb;
7237 bool plane_needs_flip;
7238 struct dc_plane_state *dc_plane;
7239 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7241 /* Cursor plane is handled after stream updates */
7242 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7245 if (!fb || !crtc || pcrtc != crtc)
7248 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7249 if (!new_crtc_state->active)
7252 dc_plane = dm_new_plane_state->dc_state;
7254 bundle->surface_updates[planes_count].surface = dc_plane;
7255 if (new_pcrtc_state->color_mgmt_changed) {
7256 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7257 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7258 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7261 fill_dc_scaling_info(new_plane_state,
7262 &bundle->scaling_infos[planes_count]);
7264 bundle->surface_updates[planes_count].scaling_info =
7265 &bundle->scaling_infos[planes_count];
7267 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7269 pflip_present = pflip_present || plane_needs_flip;
7271 if (!plane_needs_flip) {
7276 abo = gem_to_amdgpu_bo(fb->obj[0]);
7279 * Wait for all fences on this FB. Do limited wait to avoid
7280 * deadlock during GPU reset when this fence will not signal
7281 * but we hold reservation lock for the BO.
7283 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7285 msecs_to_jiffies(5000));
7286 if (unlikely(r <= 0))
7287 DRM_ERROR("Waiting for fences timed out!");
7289 fill_dc_plane_info_and_addr(
7290 dm->adev, new_plane_state,
7291 dm_new_plane_state->tiling_flags,
7292 &bundle->plane_infos[planes_count],
7293 &bundle->flip_addrs[planes_count].address,
7294 dm_new_plane_state->tmz_surface, false);
7296 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7297 new_plane_state->plane->index,
7298 bundle->plane_infos[planes_count].dcc.enable);
7300 bundle->surface_updates[planes_count].plane_info =
7301 &bundle->plane_infos[planes_count];
7304 * Only allow immediate flips for fast updates that don't
7305 * change FB pitch, DCC state, rotation or mirroing.
7307 bundle->flip_addrs[planes_count].flip_immediate =
7308 crtc->state->async_flip &&
7309 acrtc_state->update_type == UPDATE_TYPE_FAST;
7311 timestamp_ns = ktime_get_ns();
7312 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7313 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7314 bundle->surface_updates[planes_count].surface = dc_plane;
7316 if (!bundle->surface_updates[planes_count].surface) {
7317 DRM_ERROR("No surface for CRTC: id=%d\n",
7318 acrtc_attach->crtc_id);
7322 if (plane == pcrtc->primary)
7323 update_freesync_state_on_stream(
7326 acrtc_state->stream,
7328 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7330 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7332 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7333 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7339 if (pflip_present) {
7341 /* Use old throttling in non-vrr fixed refresh rate mode
7342 * to keep flip scheduling based on target vblank counts
7343 * working in a backwards compatible way, e.g., for
7344 * clients using the GLX_OML_sync_control extension or
7345 * DRI3/Present extension with defined target_msc.
7347 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7350 /* For variable refresh rate mode only:
7351 * Get vblank of last completed flip to avoid > 1 vrr
7352 * flips per video frame by use of throttling, but allow
7353 * flip programming anywhere in the possibly large
7354 * variable vrr vblank interval for fine-grained flip
7355 * timing control and more opportunity to avoid stutter
7356 * on late submission of flips.
7358 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7359 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7360 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7363 target_vblank = last_flip_vblank + wait_for_vblank;
7366 * Wait until we're out of the vertical blank period before the one
7367 * targeted by the flip
7369 while ((acrtc_attach->enabled &&
7370 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7371 0, &vpos, &hpos, NULL,
7372 NULL, &pcrtc->hwmode)
7373 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7374 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7375 (int)(target_vblank -
7376 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7377 usleep_range(1000, 1100);
7381 * Prepare the flip event for the pageflip interrupt to handle.
7383 * This only works in the case where we've already turned on the
7384 * appropriate hardware blocks (eg. HUBP) so in the transition case
7385 * from 0 -> n planes we have to skip a hardware generated event
7386 * and rely on sending it from software.
7388 if (acrtc_attach->base.state->event &&
7389 acrtc_state->active_planes > 0) {
7390 drm_crtc_vblank_get(pcrtc);
7392 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7394 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7395 prepare_flip_isr(acrtc_attach);
7397 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7400 if (acrtc_state->stream) {
7401 if (acrtc_state->freesync_vrr_info_changed)
7402 bundle->stream_update.vrr_infopacket =
7403 &acrtc_state->stream->vrr_infopacket;
7407 /* Update the planes if changed or disable if we don't have any. */
7408 if ((planes_count || acrtc_state->active_planes == 0) &&
7409 acrtc_state->stream) {
7410 bundle->stream_update.stream = acrtc_state->stream;
7411 if (new_pcrtc_state->mode_changed) {
7412 bundle->stream_update.src = acrtc_state->stream->src;
7413 bundle->stream_update.dst = acrtc_state->stream->dst;
7416 if (new_pcrtc_state->color_mgmt_changed) {
7418 * TODO: This isn't fully correct since we've actually
7419 * already modified the stream in place.
7421 bundle->stream_update.gamut_remap =
7422 &acrtc_state->stream->gamut_remap_matrix;
7423 bundle->stream_update.output_csc_transform =
7424 &acrtc_state->stream->csc_color_matrix;
7425 bundle->stream_update.out_transfer_func =
7426 acrtc_state->stream->out_transfer_func;
7429 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7430 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7431 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7434 * If FreeSync state on the stream has changed then we need to
7435 * re-adjust the min/max bounds now that DC doesn't handle this
7436 * as part of commit.
7438 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7439 amdgpu_dm_vrr_active(acrtc_state)) {
7440 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7441 dc_stream_adjust_vmin_vmax(
7442 dm->dc, acrtc_state->stream,
7443 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7444 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7446 mutex_lock(&dm->dc_lock);
7447 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7448 acrtc_state->stream->link->psr_settings.psr_allow_active)
7449 amdgpu_dm_psr_disable(acrtc_state->stream);
7451 dc_commit_updates_for_stream(dm->dc,
7452 bundle->surface_updates,
7454 acrtc_state->stream,
7455 &bundle->stream_update,
7459 * Enable or disable the interrupts on the backend.
7461 * Most pipes are put into power gating when unused.
7463 * When power gating is enabled on a pipe we lose the
7464 * interrupt enablement state when power gating is disabled.
7466 * So we need to update the IRQ control state in hardware
7467 * whenever the pipe turns on (since it could be previously
7468 * power gated) or off (since some pipes can't be power gated
7471 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7472 dm_update_pflip_irq_state(drm_to_adev(dev),
7475 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7476 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7477 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7478 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7479 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7480 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7481 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7482 amdgpu_dm_psr_enable(acrtc_state->stream);
7485 mutex_unlock(&dm->dc_lock);
7489 * Update cursor state *after* programming all the planes.
7490 * This avoids redundant programming in the case where we're going
7491 * to be disabling a single plane - those pipes are being disabled.
7493 if (acrtc_state->active_planes)
7494 amdgpu_dm_commit_cursors(state);
7500 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7501 struct drm_atomic_state *state)
7503 struct amdgpu_device *adev = drm_to_adev(dev);
7504 struct amdgpu_dm_connector *aconnector;
7505 struct drm_connector *connector;
7506 struct drm_connector_state *old_con_state, *new_con_state;
7507 struct drm_crtc_state *new_crtc_state;
7508 struct dm_crtc_state *new_dm_crtc_state;
7509 const struct dc_stream_status *status;
7512 /* Notify device removals. */
7513 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7514 if (old_con_state->crtc != new_con_state->crtc) {
7515 /* CRTC changes require notification. */
7519 if (!new_con_state->crtc)
7522 new_crtc_state = drm_atomic_get_new_crtc_state(
7523 state, new_con_state->crtc);
7525 if (!new_crtc_state)
7528 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7532 aconnector = to_amdgpu_dm_connector(connector);
7534 mutex_lock(&adev->dm.audio_lock);
7535 inst = aconnector->audio_inst;
7536 aconnector->audio_inst = -1;
7537 mutex_unlock(&adev->dm.audio_lock);
7539 amdgpu_dm_audio_eld_notify(adev, inst);
7542 /* Notify audio device additions. */
7543 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7544 if (!new_con_state->crtc)
7547 new_crtc_state = drm_atomic_get_new_crtc_state(
7548 state, new_con_state->crtc);
7550 if (!new_crtc_state)
7553 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7556 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7557 if (!new_dm_crtc_state->stream)
7560 status = dc_stream_get_status(new_dm_crtc_state->stream);
7564 aconnector = to_amdgpu_dm_connector(connector);
7566 mutex_lock(&adev->dm.audio_lock);
7567 inst = status->audio_inst;
7568 aconnector->audio_inst = inst;
7569 mutex_unlock(&adev->dm.audio_lock);
7571 amdgpu_dm_audio_eld_notify(adev, inst);
7576 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7577 * @crtc_state: the DRM CRTC state
7578 * @stream_state: the DC stream state.
7580 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7581 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7583 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7584 struct dc_stream_state *stream_state)
7586 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7589 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7590 struct drm_atomic_state *state,
7594 * Add check here for SoC's that support hardware cursor plane, to
7595 * unset legacy_cursor_update
7598 return drm_atomic_helper_commit(dev, state, nonblock);
7600 /*TODO Handle EINTR, reenable IRQ*/
7604 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7605 * @state: The atomic state to commit
7607 * This will tell DC to commit the constructed DC state from atomic_check,
7608 * programming the hardware. Any failures here implies a hardware failure, since
7609 * atomic check should have filtered anything non-kosher.
7611 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7613 struct drm_device *dev = state->dev;
7614 struct amdgpu_device *adev = drm_to_adev(dev);
7615 struct amdgpu_display_manager *dm = &adev->dm;
7616 struct dm_atomic_state *dm_state;
7617 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7619 struct drm_crtc *crtc;
7620 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7621 unsigned long flags;
7622 bool wait_for_vblank = true;
7623 struct drm_connector *connector;
7624 struct drm_connector_state *old_con_state, *new_con_state;
7625 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7626 int crtc_disable_count = 0;
7627 bool mode_set_reset_required = false;
7629 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7631 dm_state = dm_atomic_get_new_state(state);
7632 if (dm_state && dm_state->context) {
7633 dc_state = dm_state->context;
7635 /* No state changes, retain current state. */
7636 dc_state_temp = dc_create_state(dm->dc);
7637 ASSERT(dc_state_temp);
7638 dc_state = dc_state_temp;
7639 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7642 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7643 new_crtc_state, i) {
7644 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7646 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7648 if (old_crtc_state->active &&
7649 (!new_crtc_state->active ||
7650 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7651 manage_dm_interrupts(adev, acrtc, false);
7652 dc_stream_release(dm_old_crtc_state->stream);
7656 /* update changed items */
7657 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7658 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7660 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7661 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7664 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7665 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7666 "connectors_changed:%d\n",
7668 new_crtc_state->enable,
7669 new_crtc_state->active,
7670 new_crtc_state->planes_changed,
7671 new_crtc_state->mode_changed,
7672 new_crtc_state->active_changed,
7673 new_crtc_state->connectors_changed);
7675 /* Copy all transient state flags into dc state */
7676 if (dm_new_crtc_state->stream) {
7677 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7678 dm_new_crtc_state->stream);
7681 /* handles headless hotplug case, updating new_state and
7682 * aconnector as needed
7685 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7687 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7689 if (!dm_new_crtc_state->stream) {
7691 * this could happen because of issues with
7692 * userspace notifications delivery.
7693 * In this case userspace tries to set mode on
7694 * display which is disconnected in fact.
7695 * dc_sink is NULL in this case on aconnector.
7696 * We expect reset mode will come soon.
7698 * This can also happen when unplug is done
7699 * during resume sequence ended
7701 * In this case, we want to pretend we still
7702 * have a sink to keep the pipe running so that
7703 * hw state is consistent with the sw state
7705 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7706 __func__, acrtc->base.base.id);
7710 if (dm_old_crtc_state->stream)
7711 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7713 pm_runtime_get_noresume(dev->dev);
7715 acrtc->enabled = true;
7716 acrtc->hw_mode = new_crtc_state->mode;
7717 crtc->hwmode = new_crtc_state->mode;
7718 mode_set_reset_required = true;
7719 } else if (modereset_required(new_crtc_state)) {
7720 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7721 /* i.e. reset mode */
7722 if (dm_old_crtc_state->stream)
7723 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7724 mode_set_reset_required = true;
7726 } /* for_each_crtc_in_state() */
7729 /* if there mode set or reset, disable eDP PSR */
7730 if (mode_set_reset_required)
7731 amdgpu_dm_psr_disable_all(dm);
7733 dm_enable_per_frame_crtc_master_sync(dc_state);
7734 mutex_lock(&dm->dc_lock);
7735 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7736 mutex_unlock(&dm->dc_lock);
7739 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7740 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7742 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7744 if (dm_new_crtc_state->stream != NULL) {
7745 const struct dc_stream_status *status =
7746 dc_stream_get_status(dm_new_crtc_state->stream);
7749 status = dc_stream_get_status_from_state(dc_state,
7750 dm_new_crtc_state->stream);
7752 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7754 acrtc->otg_inst = status->primary_otg_inst;
7757 #ifdef CONFIG_DRM_AMD_DC_HDCP
7758 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7759 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7760 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7761 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7763 new_crtc_state = NULL;
7766 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7768 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7770 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7771 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7772 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7773 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7777 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7778 hdcp_update_display(
7779 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7780 new_con_state->hdcp_content_type,
7781 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7786 /* Handle connector state changes */
7787 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7788 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7789 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7790 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7791 struct dc_surface_update dummy_updates[MAX_SURFACES];
7792 struct dc_stream_update stream_update;
7793 struct dc_info_packet hdr_packet;
7794 struct dc_stream_status *status = NULL;
7795 bool abm_changed, hdr_changed, scaling_changed;
7797 memset(&dummy_updates, 0, sizeof(dummy_updates));
7798 memset(&stream_update, 0, sizeof(stream_update));
7801 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7802 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7805 /* Skip any modesets/resets */
7806 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7809 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7810 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7812 scaling_changed = is_scaling_state_different(dm_new_con_state,
7815 abm_changed = dm_new_crtc_state->abm_level !=
7816 dm_old_crtc_state->abm_level;
7819 is_hdr_metadata_different(old_con_state, new_con_state);
7821 if (!scaling_changed && !abm_changed && !hdr_changed)
7824 stream_update.stream = dm_new_crtc_state->stream;
7825 if (scaling_changed) {
7826 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7827 dm_new_con_state, dm_new_crtc_state->stream);
7829 stream_update.src = dm_new_crtc_state->stream->src;
7830 stream_update.dst = dm_new_crtc_state->stream->dst;
7834 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7836 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7840 fill_hdr_info_packet(new_con_state, &hdr_packet);
7841 stream_update.hdr_static_metadata = &hdr_packet;
7844 status = dc_stream_get_status(dm_new_crtc_state->stream);
7846 WARN_ON(!status->plane_count);
7849 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7850 * Here we create an empty update on each plane.
7851 * To fix this, DC should permit updating only stream properties.
7853 for (j = 0; j < status->plane_count; j++)
7854 dummy_updates[j].surface = status->plane_states[0];
7857 mutex_lock(&dm->dc_lock);
7858 dc_commit_updates_for_stream(dm->dc,
7860 status->plane_count,
7861 dm_new_crtc_state->stream,
7864 mutex_unlock(&dm->dc_lock);
7867 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7868 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7869 new_crtc_state, i) {
7870 if (old_crtc_state->active && !new_crtc_state->active)
7871 crtc_disable_count++;
7873 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7874 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7876 /* For freesync config update on crtc state and params for irq */
7877 update_stream_irq_parameters(dm, dm_new_crtc_state);
7879 /* Handle vrr on->off / off->on transitions */
7880 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7885 * Enable interrupts for CRTCs that are newly enabled or went through
7886 * a modeset. It was intentionally deferred until after the front end
7887 * state was modified to wait until the OTG was on and so the IRQ
7888 * handlers didn't access stale or invalid state.
7890 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7891 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7893 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7895 if (new_crtc_state->active &&
7896 (!old_crtc_state->active ||
7897 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7898 dc_stream_retain(dm_new_crtc_state->stream);
7899 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7900 manage_dm_interrupts(adev, acrtc, true);
7902 #ifdef CONFIG_DEBUG_FS
7904 * Frontend may have changed so reapply the CRC capture
7905 * settings for the stream.
7907 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7909 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7910 amdgpu_dm_crtc_configure_crc_source(
7911 crtc, dm_new_crtc_state,
7912 dm_new_crtc_state->crc_src);
7918 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7919 if (new_crtc_state->async_flip)
7920 wait_for_vblank = false;
7922 /* update planes when needed per crtc*/
7923 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7924 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7926 if (dm_new_crtc_state->stream)
7927 amdgpu_dm_commit_planes(state, dc_state, dev,
7928 dm, crtc, wait_for_vblank);
7931 /* Update audio instances for each connector. */
7932 amdgpu_dm_commit_audio(dev, state);
7935 * send vblank event on all events not handled in flip and
7936 * mark consumed event for drm_atomic_helper_commit_hw_done
7938 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7939 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7941 if (new_crtc_state->event)
7942 drm_send_event_locked(dev, &new_crtc_state->event->base);
7944 new_crtc_state->event = NULL;
7946 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7948 /* Signal HW programming completion */
7949 drm_atomic_helper_commit_hw_done(state);
7951 if (wait_for_vblank)
7952 drm_atomic_helper_wait_for_flip_done(dev, state);
7954 drm_atomic_helper_cleanup_planes(dev, state);
7957 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7958 * so we can put the GPU into runtime suspend if we're not driving any
7961 for (i = 0; i < crtc_disable_count; i++)
7962 pm_runtime_put_autosuspend(dev->dev);
7963 pm_runtime_mark_last_busy(dev->dev);
7966 dc_release_state(dc_state_temp);
7970 static int dm_force_atomic_commit(struct drm_connector *connector)
7973 struct drm_device *ddev = connector->dev;
7974 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7975 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7976 struct drm_plane *plane = disconnected_acrtc->base.primary;
7977 struct drm_connector_state *conn_state;
7978 struct drm_crtc_state *crtc_state;
7979 struct drm_plane_state *plane_state;
7984 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7986 /* Construct an atomic state to restore previous display setting */
7989 * Attach connectors to drm_atomic_state
7991 conn_state = drm_atomic_get_connector_state(state, connector);
7993 ret = PTR_ERR_OR_ZERO(conn_state);
7997 /* Attach crtc to drm_atomic_state*/
7998 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8000 ret = PTR_ERR_OR_ZERO(crtc_state);
8004 /* force a restore */
8005 crtc_state->mode_changed = true;
8007 /* Attach plane to drm_atomic_state */
8008 plane_state = drm_atomic_get_plane_state(state, plane);
8010 ret = PTR_ERR_OR_ZERO(plane_state);
8015 /* Call commit internally with the state we just constructed */
8016 ret = drm_atomic_commit(state);
8021 DRM_ERROR("Restoring old state failed with %i\n", ret);
8022 drm_atomic_state_put(state);
8028 * This function handles all cases when set mode does not come upon hotplug.
8029 * This includes when a display is unplugged then plugged back into the
8030 * same port and when running without usermode desktop manager supprot
8032 void dm_restore_drm_connector_state(struct drm_device *dev,
8033 struct drm_connector *connector)
8035 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8036 struct amdgpu_crtc *disconnected_acrtc;
8037 struct dm_crtc_state *acrtc_state;
8039 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8042 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8043 if (!disconnected_acrtc)
8046 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8047 if (!acrtc_state->stream)
8051 * If the previous sink is not released and different from the current,
8052 * we deduce we are in a state where we can not rely on usermode call
8053 * to turn on the display, so we do it here
8055 if (acrtc_state->stream->sink != aconnector->dc_sink)
8056 dm_force_atomic_commit(&aconnector->base);
8060 * Grabs all modesetting locks to serialize against any blocking commits,
8061 * Waits for completion of all non blocking commits.
8063 static int do_aquire_global_lock(struct drm_device *dev,
8064 struct drm_atomic_state *state)
8066 struct drm_crtc *crtc;
8067 struct drm_crtc_commit *commit;
8071 * Adding all modeset locks to aquire_ctx will
8072 * ensure that when the framework release it the
8073 * extra locks we are locking here will get released to
8075 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8079 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8080 spin_lock(&crtc->commit_lock);
8081 commit = list_first_entry_or_null(&crtc->commit_list,
8082 struct drm_crtc_commit, commit_entry);
8084 drm_crtc_commit_get(commit);
8085 spin_unlock(&crtc->commit_lock);
8091 * Make sure all pending HW programming completed and
8094 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8097 ret = wait_for_completion_interruptible_timeout(
8098 &commit->flip_done, 10*HZ);
8101 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8102 "timed out\n", crtc->base.id, crtc->name);
8104 drm_crtc_commit_put(commit);
8107 return ret < 0 ? ret : 0;
8110 static void get_freesync_config_for_crtc(
8111 struct dm_crtc_state *new_crtc_state,
8112 struct dm_connector_state *new_con_state)
8114 struct mod_freesync_config config = {0};
8115 struct amdgpu_dm_connector *aconnector =
8116 to_amdgpu_dm_connector(new_con_state->base.connector);
8117 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8118 int vrefresh = drm_mode_vrefresh(mode);
8120 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8121 vrefresh >= aconnector->min_vfreq &&
8122 vrefresh <= aconnector->max_vfreq;
8124 if (new_crtc_state->vrr_supported) {
8125 new_crtc_state->stream->ignore_msa_timing_param = true;
8126 config.state = new_crtc_state->base.vrr_enabled ?
8127 VRR_STATE_ACTIVE_VARIABLE :
8129 config.min_refresh_in_uhz =
8130 aconnector->min_vfreq * 1000000;
8131 config.max_refresh_in_uhz =
8132 aconnector->max_vfreq * 1000000;
8133 config.vsif_supported = true;
8137 new_crtc_state->freesync_config = config;
8140 static void reset_freesync_config_for_crtc(
8141 struct dm_crtc_state *new_crtc_state)
8143 new_crtc_state->vrr_supported = false;
8145 memset(&new_crtc_state->vrr_infopacket, 0,
8146 sizeof(new_crtc_state->vrr_infopacket));
8149 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8150 struct drm_atomic_state *state,
8151 struct drm_crtc *crtc,
8152 struct drm_crtc_state *old_crtc_state,
8153 struct drm_crtc_state *new_crtc_state,
8155 bool *lock_and_validation_needed)
8157 struct dm_atomic_state *dm_state = NULL;
8158 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8159 struct dc_stream_state *new_stream;
8163 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8164 * update changed items
8166 struct amdgpu_crtc *acrtc = NULL;
8167 struct amdgpu_dm_connector *aconnector = NULL;
8168 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8169 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8173 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8174 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8175 acrtc = to_amdgpu_crtc(crtc);
8176 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8178 /* TODO This hack should go away */
8179 if (aconnector && enable) {
8180 /* Make sure fake sink is created in plug-in scenario */
8181 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8183 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8186 if (IS_ERR(drm_new_conn_state)) {
8187 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8191 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8192 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8194 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8197 new_stream = create_validate_stream_for_sink(aconnector,
8198 &new_crtc_state->mode,
8200 dm_old_crtc_state->stream);
8203 * we can have no stream on ACTION_SET if a display
8204 * was disconnected during S3, in this case it is not an
8205 * error, the OS will be updated after detection, and
8206 * will do the right thing on next atomic commit
8210 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8211 __func__, acrtc->base.base.id);
8217 * TODO: Check VSDB bits to decide whether this should
8218 * be enabled or not.
8220 new_stream->triggered_crtc_reset.enabled =
8221 dm->force_timing_sync;
8223 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8225 ret = fill_hdr_info_packet(drm_new_conn_state,
8226 &new_stream->hdr_static_metadata);
8231 * If we already removed the old stream from the context
8232 * (and set the new stream to NULL) then we can't reuse
8233 * the old stream even if the stream and scaling are unchanged.
8234 * We'll hit the BUG_ON and black screen.
8236 * TODO: Refactor this function to allow this check to work
8237 * in all conditions.
8239 if (dm_new_crtc_state->stream &&
8240 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8241 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8242 new_crtc_state->mode_changed = false;
8243 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8244 new_crtc_state->mode_changed);
8248 /* mode_changed flag may get updated above, need to check again */
8249 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8253 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8254 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8255 "connectors_changed:%d\n",
8257 new_crtc_state->enable,
8258 new_crtc_state->active,
8259 new_crtc_state->planes_changed,
8260 new_crtc_state->mode_changed,
8261 new_crtc_state->active_changed,
8262 new_crtc_state->connectors_changed);
8264 /* Remove stream for any changed/disabled CRTC */
8267 if (!dm_old_crtc_state->stream)
8270 ret = dm_atomic_get_state(state, &dm_state);
8274 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8277 /* i.e. reset mode */
8278 if (dc_remove_stream_from_ctx(
8281 dm_old_crtc_state->stream) != DC_OK) {
8286 dc_stream_release(dm_old_crtc_state->stream);
8287 dm_new_crtc_state->stream = NULL;
8289 reset_freesync_config_for_crtc(dm_new_crtc_state);
8291 *lock_and_validation_needed = true;
8293 } else {/* Add stream for any updated/enabled CRTC */
8295 * Quick fix to prevent NULL pointer on new_stream when
8296 * added MST connectors not found in existing crtc_state in the chained mode
8297 * TODO: need to dig out the root cause of that
8299 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8302 if (modereset_required(new_crtc_state))
8305 if (modeset_required(new_crtc_state, new_stream,
8306 dm_old_crtc_state->stream)) {
8308 WARN_ON(dm_new_crtc_state->stream);
8310 ret = dm_atomic_get_state(state, &dm_state);
8314 dm_new_crtc_state->stream = new_stream;
8316 dc_stream_retain(new_stream);
8318 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8321 if (dc_add_stream_to_ctx(
8324 dm_new_crtc_state->stream) != DC_OK) {
8329 *lock_and_validation_needed = true;
8334 /* Release extra reference */
8336 dc_stream_release(new_stream);
8339 * We want to do dc stream updates that do not require a
8340 * full modeset below.
8342 if (!(enable && aconnector && new_crtc_state->active))
8345 * Given above conditions, the dc state cannot be NULL because:
8346 * 1. We're in the process of enabling CRTCs (just been added
8347 * to the dc context, or already is on the context)
8348 * 2. Has a valid connector attached, and
8349 * 3. Is currently active and enabled.
8350 * => The dc stream state currently exists.
8352 BUG_ON(dm_new_crtc_state->stream == NULL);
8354 /* Scaling or underscan settings */
8355 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8356 update_stream_scaling_settings(
8357 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8360 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8363 * Color management settings. We also update color properties
8364 * when a modeset is needed, to ensure it gets reprogrammed.
8366 if (dm_new_crtc_state->base.color_mgmt_changed ||
8367 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8368 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8373 /* Update Freesync settings. */
8374 get_freesync_config_for_crtc(dm_new_crtc_state,
8381 dc_stream_release(new_stream);
8385 static bool should_reset_plane(struct drm_atomic_state *state,
8386 struct drm_plane *plane,
8387 struct drm_plane_state *old_plane_state,
8388 struct drm_plane_state *new_plane_state)
8390 struct drm_plane *other;
8391 struct drm_plane_state *old_other_state, *new_other_state;
8392 struct drm_crtc_state *new_crtc_state;
8396 * TODO: Remove this hack once the checks below are sufficient
8397 * enough to determine when we need to reset all the planes on
8400 if (state->allow_modeset)
8403 /* Exit early if we know that we're adding or removing the plane. */
8404 if (old_plane_state->crtc != new_plane_state->crtc)
8407 /* old crtc == new_crtc == NULL, plane not in context. */
8408 if (!new_plane_state->crtc)
8412 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8414 if (!new_crtc_state)
8417 /* CRTC Degamma changes currently require us to recreate planes. */
8418 if (new_crtc_state->color_mgmt_changed)
8421 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8425 * If there are any new primary or overlay planes being added or
8426 * removed then the z-order can potentially change. To ensure
8427 * correct z-order and pipe acquisition the current DC architecture
8428 * requires us to remove and recreate all existing planes.
8430 * TODO: Come up with a more elegant solution for this.
8432 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8433 struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8435 if (other->type == DRM_PLANE_TYPE_CURSOR)
8438 if (old_other_state->crtc != new_plane_state->crtc &&
8439 new_other_state->crtc != new_plane_state->crtc)
8442 if (old_other_state->crtc != new_other_state->crtc)
8445 /* Src/dst size and scaling updates. */
8446 if (old_other_state->src_w != new_other_state->src_w ||
8447 old_other_state->src_h != new_other_state->src_h ||
8448 old_other_state->crtc_w != new_other_state->crtc_w ||
8449 old_other_state->crtc_h != new_other_state->crtc_h)
8452 /* Rotation / mirroring updates. */
8453 if (old_other_state->rotation != new_other_state->rotation)
8456 /* Blending updates. */
8457 if (old_other_state->pixel_blend_mode !=
8458 new_other_state->pixel_blend_mode)
8461 /* Alpha updates. */
8462 if (old_other_state->alpha != new_other_state->alpha)
8465 /* Colorspace changes. */
8466 if (old_other_state->color_range != new_other_state->color_range ||
8467 old_other_state->color_encoding != new_other_state->color_encoding)
8470 /* Framebuffer checks fall at the end. */
8471 if (!old_other_state->fb || !new_other_state->fb)
8474 /* Pixel format changes can require bandwidth updates. */
8475 if (old_other_state->fb->format != new_other_state->fb->format)
8478 old_dm_plane_state = to_dm_plane_state(old_other_state);
8479 new_dm_plane_state = to_dm_plane_state(new_other_state);
8481 /* Tiling and DCC changes also require bandwidth updates. */
8482 if (old_dm_plane_state->tiling_flags !=
8483 new_dm_plane_state->tiling_flags)
8490 static int dm_update_plane_state(struct dc *dc,
8491 struct drm_atomic_state *state,
8492 struct drm_plane *plane,
8493 struct drm_plane_state *old_plane_state,
8494 struct drm_plane_state *new_plane_state,
8496 bool *lock_and_validation_needed)
8499 struct dm_atomic_state *dm_state = NULL;
8500 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8501 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8502 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8503 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8504 struct amdgpu_crtc *new_acrtc;
8509 new_plane_crtc = new_plane_state->crtc;
8510 old_plane_crtc = old_plane_state->crtc;
8511 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8512 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8514 /*TODO Implement better atomic check for cursor plane */
8515 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8516 if (!enable || !new_plane_crtc ||
8517 drm_atomic_plane_disabling(plane->state, new_plane_state))
8520 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8522 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8523 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8524 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8525 new_plane_state->crtc_w, new_plane_state->crtc_h);
8532 needs_reset = should_reset_plane(state, plane, old_plane_state,
8535 /* Remove any changed/removed planes */
8540 if (!old_plane_crtc)
8543 old_crtc_state = drm_atomic_get_old_crtc_state(
8544 state, old_plane_crtc);
8545 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8547 if (!dm_old_crtc_state->stream)
8550 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8551 plane->base.id, old_plane_crtc->base.id);
8553 ret = dm_atomic_get_state(state, &dm_state);
8557 if (!dc_remove_plane_from_context(
8559 dm_old_crtc_state->stream,
8560 dm_old_plane_state->dc_state,
8561 dm_state->context)) {
8567 dc_plane_state_release(dm_old_plane_state->dc_state);
8568 dm_new_plane_state->dc_state = NULL;
8570 *lock_and_validation_needed = true;
8572 } else { /* Add new planes */
8573 struct dc_plane_state *dc_new_plane_state;
8575 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8578 if (!new_plane_crtc)
8581 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8582 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8584 if (!dm_new_crtc_state->stream)
8590 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8594 WARN_ON(dm_new_plane_state->dc_state);
8596 dc_new_plane_state = dc_create_plane_state(dc);
8597 if (!dc_new_plane_state)
8600 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8601 plane->base.id, new_plane_crtc->base.id);
8603 ret = fill_dc_plane_attributes(
8604 drm_to_adev(new_plane_crtc->dev),
8609 dc_plane_state_release(dc_new_plane_state);
8613 ret = dm_atomic_get_state(state, &dm_state);
8615 dc_plane_state_release(dc_new_plane_state);
8620 * Any atomic check errors that occur after this will
8621 * not need a release. The plane state will be attached
8622 * to the stream, and therefore part of the atomic
8623 * state. It'll be released when the atomic state is
8626 if (!dc_add_plane_to_context(
8628 dm_new_crtc_state->stream,
8630 dm_state->context)) {
8632 dc_plane_state_release(dc_new_plane_state);
8636 dm_new_plane_state->dc_state = dc_new_plane_state;
8638 /* Tell DC to do a full surface update every time there
8639 * is a plane change. Inefficient, but works for now.
8641 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8643 *lock_and_validation_needed = true;
8650 #if defined(CONFIG_DRM_AMD_DC_DCN)
8651 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8653 struct drm_connector *connector;
8654 struct drm_connector_state *conn_state;
8655 struct amdgpu_dm_connector *aconnector = NULL;
8657 for_each_new_connector_in_state(state, connector, conn_state, i) {
8658 if (conn_state->crtc != crtc)
8661 aconnector = to_amdgpu_dm_connector(connector);
8662 if (!aconnector->port || !aconnector->mst_port)
8671 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8676 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8677 * @dev: The DRM device
8678 * @state: The atomic state to commit
8680 * Validate that the given atomic state is programmable by DC into hardware.
8681 * This involves constructing a &struct dc_state reflecting the new hardware
8682 * state we wish to commit, then querying DC to see if it is programmable. It's
8683 * important not to modify the existing DC state. Otherwise, atomic_check
8684 * may unexpectedly commit hardware changes.
8686 * When validating the DC state, it's important that the right locks are
8687 * acquired. For full updates case which removes/adds/updates streams on one
8688 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8689 * that any such full update commit will wait for completion of any outstanding
8690 * flip using DRMs synchronization events.
8692 * Note that DM adds the affected connectors for all CRTCs in state, when that
8693 * might not seem necessary. This is because DC stream creation requires the
8694 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8695 * be possible but non-trivial - a possible TODO item.
8697 * Return: -Error code if validation failed.
8699 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8700 struct drm_atomic_state *state)
8702 struct amdgpu_device *adev = drm_to_adev(dev);
8703 struct dm_atomic_state *dm_state = NULL;
8704 struct dc *dc = adev->dm.dc;
8705 struct drm_connector *connector;
8706 struct drm_connector_state *old_con_state, *new_con_state;
8707 struct drm_crtc *crtc;
8708 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8709 struct drm_plane *plane;
8710 struct drm_plane_state *old_plane_state, *new_plane_state;
8711 enum dc_status status;
8713 bool lock_and_validation_needed = false;
8715 amdgpu_check_debugfs_connector_property_change(adev, state);
8717 ret = drm_atomic_helper_check_modeset(dev, state);
8721 /* Check connector changes */
8722 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8723 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8724 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8726 /* Skip connectors that are disabled or part of modeset already. */
8727 if (!old_con_state->crtc && !new_con_state->crtc)
8730 if (!new_con_state->crtc)
8733 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8734 if (IS_ERR(new_crtc_state)) {
8735 ret = PTR_ERR(new_crtc_state);
8739 if (dm_old_con_state->abm_level !=
8740 dm_new_con_state->abm_level)
8741 new_crtc_state->connectors_changed = true;
8744 #if defined(CONFIG_DRM_AMD_DC_DCN)
8745 if (adev->asic_type >= CHIP_NAVI10) {
8746 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8747 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8748 ret = add_affected_mst_dsc_crtcs(state, crtc);
8755 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8756 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8757 !new_crtc_state->color_mgmt_changed &&
8758 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8761 if (!new_crtc_state->enable)
8764 ret = drm_atomic_add_affected_connectors(state, crtc);
8768 ret = drm_atomic_add_affected_planes(state, crtc);
8774 * Add all primary and overlay planes on the CRTC to the state
8775 * whenever a plane is enabled to maintain correct z-ordering
8776 * and to enable fast surface updates.
8778 drm_for_each_crtc(crtc, dev) {
8779 bool modified = false;
8781 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8782 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8785 if (new_plane_state->crtc == crtc ||
8786 old_plane_state->crtc == crtc) {
8795 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8796 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8800 drm_atomic_get_plane_state(state, plane);
8802 if (IS_ERR(new_plane_state)) {
8803 ret = PTR_ERR(new_plane_state);
8809 /* Prepass for updating tiling flags on new planes. */
8810 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8811 struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8812 struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8814 ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8815 &new_dm_plane_state->tmz_surface);
8820 /* Remove exiting planes if they are modified */
8821 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8822 ret = dm_update_plane_state(dc, state, plane,
8826 &lock_and_validation_needed);
8831 /* Disable all crtcs which require disable */
8832 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8833 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8837 &lock_and_validation_needed);
8842 /* Enable all crtcs which require enable */
8843 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8844 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8848 &lock_and_validation_needed);
8853 /* Add new/modified planes */
8854 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8855 ret = dm_update_plane_state(dc, state, plane,
8859 &lock_and_validation_needed);
8864 /* Run this here since we want to validate the streams we created */
8865 ret = drm_atomic_helper_check_planes(dev, state);
8869 if (state->legacy_cursor_update) {
8871 * This is a fast cursor update coming from the plane update
8872 * helper, check if it can be done asynchronously for better
8875 state->async_update =
8876 !drm_atomic_helper_async_check(dev, state);
8879 * Skip the remaining global validation if this is an async
8880 * update. Cursor updates can be done without affecting
8881 * state or bandwidth calcs and this avoids the performance
8882 * penalty of locking the private state object and
8883 * allocating a new dc_state.
8885 if (state->async_update)
8889 /* Check scaling and underscan changes*/
8890 /* TODO Removed scaling changes validation due to inability to commit
8891 * new stream into context w\o causing full reset. Need to
8892 * decide how to handle.
8894 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8895 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8896 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8897 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8899 /* Skip any modesets/resets */
8900 if (!acrtc || drm_atomic_crtc_needs_modeset(
8901 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8904 /* Skip any thing not scale or underscan changes */
8905 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8908 lock_and_validation_needed = true;
8912 * Streams and planes are reset when there are changes that affect
8913 * bandwidth. Anything that affects bandwidth needs to go through
8914 * DC global validation to ensure that the configuration can be applied
8917 * We have to currently stall out here in atomic_check for outstanding
8918 * commits to finish in this case because our IRQ handlers reference
8919 * DRM state directly - we can end up disabling interrupts too early
8922 * TODO: Remove this stall and drop DM state private objects.
8924 if (lock_and_validation_needed) {
8925 ret = dm_atomic_get_state(state, &dm_state);
8929 ret = do_aquire_global_lock(dev, state);
8933 #if defined(CONFIG_DRM_AMD_DC_DCN)
8934 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8937 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8943 * Perform validation of MST topology in the state:
8944 * We need to perform MST atomic check before calling
8945 * dc_validate_global_state(), or there is a chance
8946 * to get stuck in an infinite loop and hang eventually.
8948 ret = drm_dp_mst_atomic_check(state);
8951 status = dc_validate_global_state(dc, dm_state->context, false);
8952 if (status != DC_OK) {
8953 DC_LOG_WARNING("DC global validation failure: %s (%d)",
8954 dc_status_to_str(status), status);
8960 * The commit is a fast update. Fast updates shouldn't change
8961 * the DC context, affect global validation, and can have their
8962 * commit work done in parallel with other commits not touching
8963 * the same resource. If we have a new DC context as part of
8964 * the DM atomic state from validation we need to free it and
8965 * retain the existing one instead.
8967 * Furthermore, since the DM atomic state only contains the DC
8968 * context and can safely be annulled, we can free the state
8969 * and clear the associated private object now to free
8970 * some memory and avoid a possible use-after-free later.
8973 for (i = 0; i < state->num_private_objs; i++) {
8974 struct drm_private_obj *obj = state->private_objs[i].ptr;
8976 if (obj->funcs == adev->dm.atomic_obj.funcs) {
8977 int j = state->num_private_objs-1;
8979 dm_atomic_destroy_state(obj,
8980 state->private_objs[i].state);
8982 /* If i is not at the end of the array then the
8983 * last element needs to be moved to where i was
8984 * before the array can safely be truncated.
8987 state->private_objs[i] =
8988 state->private_objs[j];
8990 state->private_objs[j].ptr = NULL;
8991 state->private_objs[j].state = NULL;
8992 state->private_objs[j].old_state = NULL;
8993 state->private_objs[j].new_state = NULL;
8995 state->num_private_objs = j;
9001 /* Store the overall update type for use later in atomic check. */
9002 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9003 struct dm_crtc_state *dm_new_crtc_state =
9004 to_dm_crtc_state(new_crtc_state);
9006 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9011 /* Must be success */
9016 if (ret == -EDEADLK)
9017 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9018 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9019 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9021 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9026 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9027 struct amdgpu_dm_connector *amdgpu_dm_connector)
9030 bool capable = false;
9032 if (amdgpu_dm_connector->dc_link &&
9033 dm_helpers_dp_read_dpcd(
9035 amdgpu_dm_connector->dc_link,
9036 DP_DOWN_STREAM_PORT_COUNT,
9038 sizeof(dpcd_data))) {
9039 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9044 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9048 bool edid_check_required;
9049 struct detailed_timing *timing;
9050 struct detailed_non_pixel *data;
9051 struct detailed_data_monitor_range *range;
9052 struct amdgpu_dm_connector *amdgpu_dm_connector =
9053 to_amdgpu_dm_connector(connector);
9054 struct dm_connector_state *dm_con_state = NULL;
9056 struct drm_device *dev = connector->dev;
9057 struct amdgpu_device *adev = drm_to_adev(dev);
9058 bool freesync_capable = false;
9060 if (!connector->state) {
9061 DRM_ERROR("%s - Connector has no state", __func__);
9066 dm_con_state = to_dm_connector_state(connector->state);
9068 amdgpu_dm_connector->min_vfreq = 0;
9069 amdgpu_dm_connector->max_vfreq = 0;
9070 amdgpu_dm_connector->pixel_clock_mhz = 0;
9075 dm_con_state = to_dm_connector_state(connector->state);
9077 edid_check_required = false;
9078 if (!amdgpu_dm_connector->dc_sink) {
9079 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9082 if (!adev->dm.freesync_module)
9085 * if edid non zero restrict freesync only for dp and edp
9088 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9089 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9090 edid_check_required = is_dp_capable_without_timing_msa(
9092 amdgpu_dm_connector);
9095 if (edid_check_required == true && (edid->version > 1 ||
9096 (edid->version == 1 && edid->revision > 1))) {
9097 for (i = 0; i < 4; i++) {
9099 timing = &edid->detailed_timings[i];
9100 data = &timing->data.other_data;
9101 range = &data->data.range;
9103 * Check if monitor has continuous frequency mode
9105 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9108 * Check for flag range limits only. If flag == 1 then
9109 * no additional timing information provided.
9110 * Default GTF, GTF Secondary curve and CVT are not
9113 if (range->flags != 1)
9116 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9117 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9118 amdgpu_dm_connector->pixel_clock_mhz =
9119 range->pixel_clock_mhz * 10;
9123 if (amdgpu_dm_connector->max_vfreq -
9124 amdgpu_dm_connector->min_vfreq > 10) {
9126 freesync_capable = true;
9132 dm_con_state->freesync_capable = freesync_capable;
9134 if (connector->vrr_capable_property)
9135 drm_connector_set_vrr_capable_property(connector,
9139 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9141 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9143 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9145 if (link->type == dc_connection_none)
9147 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9148 dpcd_data, sizeof(dpcd_data))) {
9149 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9151 if (dpcd_data[0] == 0) {
9152 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9153 link->psr_settings.psr_feature_enabled = false;
9155 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9156 link->psr_settings.psr_feature_enabled = true;
9159 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9164 * amdgpu_dm_link_setup_psr() - configure psr link
9165 * @stream: stream state
9167 * Return: true if success
9169 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9171 struct dc_link *link = NULL;
9172 struct psr_config psr_config = {0};
9173 struct psr_context psr_context = {0};
9179 link = stream->link;
9181 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9183 if (psr_config.psr_version > 0) {
9184 psr_config.psr_exit_link_training_required = 0x1;
9185 psr_config.psr_frame_capture_indication_req = 0;
9186 psr_config.psr_rfb_setup_time = 0x37;
9187 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9188 psr_config.allow_smu_optimizations = 0x0;
9190 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9193 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9199 * amdgpu_dm_psr_enable() - enable psr f/w
9200 * @stream: stream state
9202 * Return: true if success
9204 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9206 struct dc_link *link = stream->link;
9207 unsigned int vsync_rate_hz = 0;
9208 struct dc_static_screen_params params = {0};
9209 /* Calculate number of static frames before generating interrupt to
9212 // Init fail safe of 2 frames static
9213 unsigned int num_frames_static = 2;
9215 DRM_DEBUG_DRIVER("Enabling psr...\n");
9217 vsync_rate_hz = div64_u64(div64_u64((
9218 stream->timing.pix_clk_100hz * 100),
9219 stream->timing.v_total),
9220 stream->timing.h_total);
9223 * Calculate number of frames such that at least 30 ms of time has
9226 if (vsync_rate_hz != 0) {
9227 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9228 num_frames_static = (30000 / frame_time_microsec) + 1;
9231 params.triggers.cursor_update = true;
9232 params.triggers.overlay_update = true;
9233 params.triggers.surface_update = true;
9234 params.num_frames = num_frames_static;
9236 dc_stream_set_static_screen_params(link->ctx->dc,
9240 return dc_link_set_psr_allow_active(link, true, false);
9244 * amdgpu_dm_psr_disable() - disable psr f/w
9245 * @stream: stream state
9247 * Return: true if success
9249 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9252 DRM_DEBUG_DRIVER("Disabling psr...\n");
9254 return dc_link_set_psr_allow_active(stream->link, false, true);
9258 * amdgpu_dm_psr_disable() - disable psr f/w
9259 * if psr is enabled on any stream
9261 * Return: true if success
9263 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9265 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9266 return dc_set_psr_allow_active(dm->dc, false);
9269 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9271 struct amdgpu_device *adev = drm_to_adev(dev);
9272 struct dc *dc = adev->dm.dc;
9275 mutex_lock(&adev->dm.dc_lock);
9276 if (dc->current_state) {
9277 for (i = 0; i < dc->current_state->stream_count; ++i)
9278 dc->current_state->streams[i]
9279 ->triggered_crtc_reset.enabled =
9280 adev->dm.force_timing_sync;
9282 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9283 dc_trigger_sync(dc, dc->current_state);
9285 mutex_unlock(&adev->dm.dc_lock);