2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
89 #include "soc15_common.h"
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
112 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
124 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126 * requests into DC requests, and DC responses into DRM responses.
128 * The root control structure is &struct amdgpu_display_manager.
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
172 * initializes drm_device display related structures, based on the information
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
176 * Returns 0 on success
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 struct drm_plane *plane,
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
192 struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
204 static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 * dm_vblank_get_counter
220 * Get counter for number of vertical blanks
223 * struct amdgpu_device *adev - [in] desired amdgpu device
224 * int disp_idx - [in] which CRTC to get the counter from
227 * Counter for vertical blanks
229 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
231 if (crtc >= adev->mode_info.num_crtc)
234 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
236 if (acrtc->dm_irq_params.stream == NULL) {
237 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
247 u32 *vbl, u32 *position)
249 uint32_t v_blank_start, v_blank_end, h_position, v_position;
251 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
254 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
256 if (acrtc->dm_irq_params.stream == NULL) {
257 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 * TODO rework base driver to use values directly.
264 * for now parse it back into reg-format
266 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 *position = v_position | (h_position << 16);
273 *vbl = v_blank_start | (v_blank_end << 16);
279 static bool dm_is_idle(void *handle)
285 static int dm_wait_for_idle(void *handle)
291 static bool dm_check_soft_reset(void *handle)
296 static int dm_soft_reset(void *handle)
302 static struct amdgpu_crtc *
303 get_crtc_by_otg_inst(struct amdgpu_device *adev,
306 struct drm_device *dev = adev_to_drm(adev);
307 struct drm_crtc *crtc;
308 struct amdgpu_crtc *amdgpu_crtc;
310 if (otg_inst == -1) {
312 return adev->mode_info.crtcs[0];
315 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
316 amdgpu_crtc = to_amdgpu_crtc(crtc);
318 if (amdgpu_crtc->otg_inst == otg_inst)
325 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
327 return acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_VARIABLE ||
329 acrtc->dm_irq_params.freesync_config.state ==
330 VRR_STATE_ACTIVE_FIXED;
333 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
335 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
336 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 * dm_pflip_high_irq() - Handle pageflip interrupt
341 * @interrupt_params: ignored
343 * Handles the pageflip interrupt by notifying all interested parties
344 * that the pageflip has been completed.
346 static void dm_pflip_high_irq(void *interrupt_params)
348 struct amdgpu_crtc *amdgpu_crtc;
349 struct common_irq_params *irq_params = interrupt_params;
350 struct amdgpu_device *adev = irq_params->adev;
352 struct drm_pending_vblank_event *e;
353 uint32_t vpos, hpos, v_blank_start, v_blank_end;
356 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
358 /* IRQ could occur when in initial stage */
359 /* TODO work and BO cleanup */
360 if (amdgpu_crtc == NULL) {
361 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
365 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
367 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
368 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
369 amdgpu_crtc->pflip_status,
370 AMDGPU_FLIP_SUBMITTED,
371 amdgpu_crtc->crtc_id,
373 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
377 /* page flip completed. */
378 e = amdgpu_crtc->event;
379 amdgpu_crtc->event = NULL;
384 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
386 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
388 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
389 &v_blank_end, &hpos, &vpos) ||
390 (vpos < v_blank_start)) {
391 /* Update to correct count and vblank timestamp if racing with
392 * vblank irq. This also updates to the correct vblank timestamp
393 * even in VRR mode, as scanout is past the front-porch atm.
395 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
397 /* Wake up userspace by sending the pageflip event with proper
398 * count and timestamp of vblank of flip completion.
401 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
403 /* Event sent, so done with vblank for this flip */
404 drm_crtc_vblank_put(&amdgpu_crtc->base);
407 /* VRR active and inside front-porch: vblank count and
408 * timestamp for pageflip event will only be up to date after
409 * drm_crtc_handle_vblank() has been executed from late vblank
410 * irq handler after start of back-porch (vline 0). We queue the
411 * pageflip event for send-out by drm_crtc_handle_vblank() with
412 * updated timestamp and count, once it runs after us.
414 * We need to open-code this instead of using the helper
415 * drm_crtc_arm_vblank_event(), as that helper would
416 * call drm_crtc_accurate_vblank_count(), which we must
417 * not call in VRR mode while we are in front-porch!
420 /* sequence will be replaced by real count during send-out. */
421 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
422 e->pipe = amdgpu_crtc->crtc_id;
424 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
428 /* Keep track of vblank of this flip for flip throttling. We use the
429 * cooked hw counter, as that one incremented at start of this vblank
430 * of pageflip completion, so last_flip_vblank is the forbidden count
431 * for queueing new pageflips if vsync + VRR is enabled.
433 amdgpu_crtc->dm_irq_params.last_flip_vblank =
434 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
436 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
437 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
439 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
440 amdgpu_crtc->crtc_id, amdgpu_crtc,
441 vrr_active, (int) !e);
444 static void dm_vupdate_high_irq(void *interrupt_params)
446 struct common_irq_params *irq_params = interrupt_params;
447 struct amdgpu_device *adev = irq_params->adev;
448 struct amdgpu_crtc *acrtc;
452 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
455 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
457 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
461 /* Core vblank handling is done here after end of front-porch in
462 * vrr mode, as vblank timestamping will give valid results
463 * while now done after front-porch. This will also deliver
464 * page-flip completion events that have been queued to us
465 * if a pageflip happened inside front-porch.
468 drm_crtc_handle_vblank(&acrtc->base);
470 /* BTR processing for pre-DCE12 ASICs */
471 if (acrtc->dm_irq_params.stream &&
472 adev->family < AMDGPU_FAMILY_AI) {
473 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
474 mod_freesync_handle_v_update(
475 adev->dm.freesync_module,
476 acrtc->dm_irq_params.stream,
477 &acrtc->dm_irq_params.vrr_params);
479 dc_stream_adjust_vmin_vmax(
481 acrtc->dm_irq_params.stream,
482 &acrtc->dm_irq_params.vrr_params.adjust);
483 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
490 * dm_crtc_high_irq() - Handles CRTC interrupt
491 * @interrupt_params: used for determining the CRTC instance
493 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
496 static void dm_crtc_high_irq(void *interrupt_params)
498 struct common_irq_params *irq_params = interrupt_params;
499 struct amdgpu_device *adev = irq_params->adev;
500 struct amdgpu_crtc *acrtc;
504 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
508 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
510 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
511 vrr_active, acrtc->dm_irq_params.active_planes);
514 * Core vblank handling at start of front-porch is only possible
515 * in non-vrr mode, as only there vblank timestamping will give
516 * valid results while done in front-porch. Otherwise defer it
517 * to dm_vupdate_high_irq after end of front-porch.
520 drm_crtc_handle_vblank(&acrtc->base);
523 * Following stuff must happen at start of vblank, for crc
524 * computation and below-the-range btr support in vrr mode.
526 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
528 /* BTR updates need to happen before VUPDATE on Vega and above. */
529 if (adev->family < AMDGPU_FAMILY_AI)
532 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
534 if (acrtc->dm_irq_params.stream &&
535 acrtc->dm_irq_params.vrr_params.supported &&
536 acrtc->dm_irq_params.freesync_config.state ==
537 VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(adev->dm.freesync_module,
539 acrtc->dm_irq_params.stream,
540 &acrtc->dm_irq_params.vrr_params);
542 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
543 &acrtc->dm_irq_params.vrr_params.adjust);
547 * If there aren't any active_planes then DCH HUBP may be clock-gated.
548 * In that case, pageflip completion interrupts won't fire and pageflip
549 * completion events won't get delivered. Prevent this by sending
550 * pending pageflip events from here if a flip is still pending.
552 * If any planes are enabled, use dm_pflip_high_irq() instead, to
553 * avoid race conditions between flip programming and completion,
554 * which could cause too early flip completion events.
556 if (adev->family >= AMDGPU_FAMILY_RV &&
557 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
558 acrtc->dm_irq_params.active_planes == 0) {
560 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
562 drm_crtc_vblank_put(&acrtc->base);
564 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
570 static int dm_set_clockgating_state(void *handle,
571 enum amd_clockgating_state state)
576 static int dm_set_powergating_state(void *handle,
577 enum amd_powergating_state state)
582 /* Prototypes of private functions */
583 static int dm_early_init(void* handle);
585 /* Allocate memory for FBC compressed data */
586 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
588 struct drm_device *dev = connector->dev;
589 struct amdgpu_device *adev = drm_to_adev(dev);
590 struct dm_compressor_info *compressor = &adev->dm.compressor;
591 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
592 struct drm_display_mode *mode;
593 unsigned long max_size = 0;
595 if (adev->dm.dc->fbc_compressor == NULL)
598 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
601 if (compressor->bo_ptr)
605 list_for_each_entry(mode, &connector->modes, head) {
606 if (max_size < mode->htotal * mode->vtotal)
607 max_size = mode->htotal * mode->vtotal;
611 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
612 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
613 &compressor->gpu_addr, &compressor->cpu_addr);
616 DRM_ERROR("DM: Failed to initialize FBC\n");
618 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
619 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
626 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
627 int pipe, bool *enabled,
628 unsigned char *buf, int max_bytes)
630 struct drm_device *dev = dev_get_drvdata(kdev);
631 struct amdgpu_device *adev = drm_to_adev(dev);
632 struct drm_connector *connector;
633 struct drm_connector_list_iter conn_iter;
634 struct amdgpu_dm_connector *aconnector;
639 mutex_lock(&adev->dm.audio_lock);
641 drm_connector_list_iter_begin(dev, &conn_iter);
642 drm_for_each_connector_iter(connector, &conn_iter) {
643 aconnector = to_amdgpu_dm_connector(connector);
644 if (aconnector->audio_inst != port)
648 ret = drm_eld_size(connector->eld);
649 memcpy(buf, connector->eld, min(max_bytes, ret));
653 drm_connector_list_iter_end(&conn_iter);
655 mutex_unlock(&adev->dm.audio_lock);
657 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
663 .get_eld = amdgpu_dm_audio_component_get_eld,
666 static int amdgpu_dm_audio_component_bind(struct device *kdev,
667 struct device *hda_kdev, void *data)
669 struct drm_device *dev = dev_get_drvdata(kdev);
670 struct amdgpu_device *adev = drm_to_adev(dev);
671 struct drm_audio_component *acomp = data;
673 acomp->ops = &amdgpu_dm_audio_component_ops;
675 adev->dm.audio_component = acomp;
680 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
681 struct device *hda_kdev, void *data)
683 struct drm_device *dev = dev_get_drvdata(kdev);
684 struct amdgpu_device *adev = drm_to_adev(dev);
685 struct drm_audio_component *acomp = data;
689 adev->dm.audio_component = NULL;
692 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
693 .bind = amdgpu_dm_audio_component_bind,
694 .unbind = amdgpu_dm_audio_component_unbind,
697 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
704 adev->mode_info.audio.enabled = true;
706 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
708 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709 adev->mode_info.audio.pin[i].channels = -1;
710 adev->mode_info.audio.pin[i].rate = -1;
711 adev->mode_info.audio.pin[i].bits_per_sample = -1;
712 adev->mode_info.audio.pin[i].status_bits = 0;
713 adev->mode_info.audio.pin[i].category_code = 0;
714 adev->mode_info.audio.pin[i].connected = false;
715 adev->mode_info.audio.pin[i].id =
716 adev->dm.dc->res_pool->audios[i]->inst;
717 adev->mode_info.audio.pin[i].offset = 0;
720 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 adev->dm.audio_registered = true;
729 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734 if (!adev->mode_info.audio.enabled)
737 if (adev->dm.audio_registered) {
738 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739 adev->dm.audio_registered = false;
742 /* TODO: Disable audio? */
744 adev->mode_info.audio.enabled = false;
747 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
749 struct drm_audio_component *acomp = adev->dm.audio_component;
751 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
754 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759 static int dm_dmub_hw_init(struct amdgpu_device *adev)
761 const struct dmcub_firmware_header_v1_0 *hdr;
762 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
763 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
764 const struct firmware *dmub_fw = adev->dm.dmub_fw;
765 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766 struct abm *abm = adev->dm.dc->res_pool->abm;
767 struct dmub_srv_hw_params hw_params;
768 enum dmub_status status;
769 const unsigned char *fw_inst_const, *fw_bss_data;
770 uint32_t i, fw_inst_const_size, fw_bss_data_size;
774 /* DMUB isn't supported on the ASIC. */
778 DRM_ERROR("No framebuffer info for DMUB service.\n");
783 /* Firmware required for DMUB support. */
784 DRM_ERROR("No firmware provided for DMUB.\n");
788 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789 if (status != DMUB_STATUS_OK) {
790 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 if (!has_hw_support) {
795 DRM_INFO("DMUB unsupported on ASIC\n");
799 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
801 fw_inst_const = dmub_fw->data +
802 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 fw_bss_data = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 le32_to_cpu(hdr->inst_const_bytes);
809 /* Copy firmware and bios info into FB memory. */
810 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
811 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
813 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
815 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816 * amdgpu_ucode_init_single_fw will load dmub firmware
817 * fw_inst_const part to cw0; otherwise, the firmware back door load
818 * will be done by dm_dmub_hw_init
820 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 if (fw_bss_data_size)
826 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
827 fw_bss_data, fw_bss_data_size);
829 /* Copy firmware bios info into FB memory. */
830 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
833 /* Reset regions that need to be reset. */
834 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
835 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
837 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
838 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
840 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
841 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
843 /* Initialize hardware. */
844 memset(&hw_params, 0, sizeof(hw_params));
845 hw_params.fb_base = adev->gmc.fb_start;
846 hw_params.fb_offset = adev->gmc.aper_base;
848 /* backdoor load firmware and trigger dmub running */
849 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850 hw_params.load_inst_const = true;
853 hw_params.psp_version = dmcu->psp_version;
855 for (i = 0; i < fb_info->num_fb; ++i)
856 hw_params.fb[i] = &fb_info->fb[i];
858 status = dmub_srv_hw_init(dmub_srv, &hw_params);
859 if (status != DMUB_STATUS_OK) {
860 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
864 /* Wait for firmware load to finish. */
865 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866 if (status != DMUB_STATUS_OK)
867 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
869 /* Init DMCU and ABM if available. */
871 dmcu->funcs->dmcu_init(dmcu);
872 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
875 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
876 if (!adev->dm.dc->ctx->dmub_srv) {
877 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
881 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
882 adev->dm.dmcub_fw_version);
887 #if defined(CONFIG_DRM_AMD_DC_DCN)
888 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
891 uint32_t logical_addr_low;
892 uint32_t logical_addr_high;
893 uint32_t agp_base, agp_bot, agp_top;
894 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
896 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
897 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
899 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
901 * Raven2 has a HW issue that it is unable to use the vram which
902 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
903 * workaround that increase system aperture high address (add 1)
904 * to get rid of the VM fault and hardware hang.
906 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
908 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
911 agp_bot = adev->gmc.agp_start >> 24;
912 agp_top = adev->gmc.agp_end >> 24;
915 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
916 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
917 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
918 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
919 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
920 page_table_base.low_part = lower_32_bits(pt_base);
922 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
923 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
925 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
926 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
927 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
929 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
930 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
931 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
933 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
934 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
935 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
937 pa_config->is_hvm_enabled = 0;
942 #ifdef CONFIG_DEBUG_FS
943 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
945 dm->crc_win_x_start_property =
946 drm_property_create_range(adev_to_drm(dm->adev),
947 DRM_MODE_PROP_ATOMIC,
948 "AMD_CRC_WIN_X_START", 0, U16_MAX);
949 if (!dm->crc_win_x_start_property)
952 dm->crc_win_y_start_property =
953 drm_property_create_range(adev_to_drm(dm->adev),
954 DRM_MODE_PROP_ATOMIC,
955 "AMD_CRC_WIN_Y_START", 0, U16_MAX);
956 if (!dm->crc_win_y_start_property)
959 dm->crc_win_x_end_property =
960 drm_property_create_range(adev_to_drm(dm->adev),
961 DRM_MODE_PROP_ATOMIC,
962 "AMD_CRC_WIN_X_END", 0, U16_MAX);
963 if (!dm->crc_win_x_end_property)
966 dm->crc_win_y_end_property =
967 drm_property_create_range(adev_to_drm(dm->adev),
968 DRM_MODE_PROP_ATOMIC,
969 "AMD_CRC_WIN_Y_END", 0, U16_MAX);
970 if (!dm->crc_win_y_end_property)
977 static int amdgpu_dm_init(struct amdgpu_device *adev)
979 struct dc_init_data init_data;
980 #ifdef CONFIG_DRM_AMD_DC_HDCP
981 struct dc_callback_init init_params;
985 adev->dm.ddev = adev_to_drm(adev);
986 adev->dm.adev = adev;
988 /* Zero all the fields */
989 memset(&init_data, 0, sizeof(init_data));
990 #ifdef CONFIG_DRM_AMD_DC_HDCP
991 memset(&init_params, 0, sizeof(init_params));
994 mutex_init(&adev->dm.dc_lock);
995 mutex_init(&adev->dm.audio_lock);
997 if(amdgpu_dm_irq_init(adev)) {
998 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1002 init_data.asic_id.chip_family = adev->family;
1004 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1005 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1007 init_data.asic_id.vram_width = adev->gmc.vram_width;
1008 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1009 init_data.asic_id.atombios_base_address =
1010 adev->mode_info.atom_context->bios;
1012 init_data.driver = adev;
1014 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1016 if (!adev->dm.cgs_device) {
1017 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1021 init_data.cgs_device = adev->dm.cgs_device;
1023 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1025 switch (adev->asic_type) {
1030 init_data.flags.gpu_vm_support = true;
1031 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1032 init_data.flags.disable_dmcu = true;
1034 #if defined(CONFIG_DRM_AMD_DC_DCN)
1036 init_data.flags.gpu_vm_support = true;
1043 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1044 init_data.flags.fbc_support = true;
1046 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1047 init_data.flags.multi_mon_pp_mclk_switch = true;
1049 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1050 init_data.flags.disable_fractional_pwm = true;
1052 init_data.flags.power_down_display_on_boot = true;
1054 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1056 /* Display Core create. */
1057 adev->dm.dc = dc_create(&init_data);
1060 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1062 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1066 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1067 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1068 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1071 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1072 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1074 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1075 adev->dm.dc->debug.disable_stutter = true;
1077 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1078 adev->dm.dc->debug.disable_dsc = true;
1080 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1081 adev->dm.dc->debug.disable_clock_gate = true;
1083 r = dm_dmub_hw_init(adev);
1085 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1089 dc_hardware_init(adev->dm.dc);
1091 #if defined(CONFIG_DRM_AMD_DC_DCN)
1092 if (adev->apu_flags) {
1093 struct dc_phy_addr_space_config pa_config;
1095 mmhub_read_system_context(adev, &pa_config);
1097 // Call the DC init_memory func
1098 dc_setup_system_context(adev->dm.dc, &pa_config);
1102 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1103 if (!adev->dm.freesync_module) {
1105 "amdgpu: failed to initialize freesync_module.\n");
1107 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1108 adev->dm.freesync_module);
1110 amdgpu_dm_init_color_mod();
1112 #ifdef CONFIG_DRM_AMD_DC_HDCP
1113 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1114 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1116 if (!adev->dm.hdcp_workqueue)
1117 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1119 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1121 dc_init_callbacks(adev->dm.dc, &init_params);
1124 #ifdef CONFIG_DEBUG_FS
1125 if (create_crtc_crc_properties(&adev->dm))
1126 DRM_ERROR("amdgpu: failed to create crc property.\n");
1128 if (amdgpu_dm_initialize_drm_device(adev)) {
1130 "amdgpu: failed to initialize sw for display support.\n");
1134 /* create fake encoders for MST */
1135 dm_dp_create_fake_mst_encoders(adev);
1137 /* TODO: Add_display_info? */
1139 /* TODO use dynamic cursor width */
1140 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1141 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1143 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1145 "amdgpu: failed to initialize sw for display support.\n");
1150 DRM_DEBUG_DRIVER("KMS initialized.\n");
1154 amdgpu_dm_fini(adev);
1159 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1163 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1164 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1167 amdgpu_dm_audio_fini(adev);
1169 amdgpu_dm_destroy_drm_device(&adev->dm);
1171 #ifdef CONFIG_DRM_AMD_DC_HDCP
1172 if (adev->dm.hdcp_workqueue) {
1173 hdcp_destroy(adev->dm.hdcp_workqueue);
1174 adev->dm.hdcp_workqueue = NULL;
1178 dc_deinit_callbacks(adev->dm.dc);
1180 if (adev->dm.dc->ctx->dmub_srv) {
1181 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1182 adev->dm.dc->ctx->dmub_srv = NULL;
1185 if (adev->dm.dmub_bo)
1186 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1187 &adev->dm.dmub_bo_gpu_addr,
1188 &adev->dm.dmub_bo_cpu_addr);
1190 /* DC Destroy TODO: Replace destroy DAL */
1192 dc_destroy(&adev->dm.dc);
1194 * TODO: pageflip, vlank interrupt
1196 * amdgpu_dm_irq_fini(adev);
1199 if (adev->dm.cgs_device) {
1200 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1201 adev->dm.cgs_device = NULL;
1203 if (adev->dm.freesync_module) {
1204 mod_freesync_destroy(adev->dm.freesync_module);
1205 adev->dm.freesync_module = NULL;
1208 mutex_destroy(&adev->dm.audio_lock);
1209 mutex_destroy(&adev->dm.dc_lock);
1214 static int load_dmcu_fw(struct amdgpu_device *adev)
1216 const char *fw_name_dmcu = NULL;
1218 const struct dmcu_firmware_header_v1_0 *hdr;
1220 switch(adev->asic_type) {
1221 #if defined(CONFIG_DRM_AMD_DC_SI)
1236 case CHIP_POLARIS11:
1237 case CHIP_POLARIS10:
1238 case CHIP_POLARIS12:
1246 case CHIP_SIENNA_CICHLID:
1247 case CHIP_NAVY_FLOUNDER:
1248 case CHIP_DIMGREY_CAVEFISH:
1252 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1255 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1256 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1257 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1258 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1263 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1267 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1268 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1272 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1274 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1275 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1276 adev->dm.fw_dmcu = NULL;
1280 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1285 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1287 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1289 release_firmware(adev->dm.fw_dmcu);
1290 adev->dm.fw_dmcu = NULL;
1294 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1295 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1296 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1297 adev->firmware.fw_size +=
1298 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1300 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1301 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1302 adev->firmware.fw_size +=
1303 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1305 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1307 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1312 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1314 struct amdgpu_device *adev = ctx;
1316 return dm_read_reg(adev->dm.dc->ctx, address);
1319 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1322 struct amdgpu_device *adev = ctx;
1324 return dm_write_reg(adev->dm.dc->ctx, address, value);
1327 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1329 struct dmub_srv_create_params create_params;
1330 struct dmub_srv_region_params region_params;
1331 struct dmub_srv_region_info region_info;
1332 struct dmub_srv_fb_params fb_params;
1333 struct dmub_srv_fb_info *fb_info;
1334 struct dmub_srv *dmub_srv;
1335 const struct dmcub_firmware_header_v1_0 *hdr;
1336 const char *fw_name_dmub;
1337 enum dmub_asic dmub_asic;
1338 enum dmub_status status;
1341 switch (adev->asic_type) {
1343 dmub_asic = DMUB_ASIC_DCN21;
1344 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1345 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1346 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1348 case CHIP_SIENNA_CICHLID:
1349 dmub_asic = DMUB_ASIC_DCN30;
1350 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1352 case CHIP_NAVY_FLOUNDER:
1353 dmub_asic = DMUB_ASIC_DCN30;
1354 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1357 dmub_asic = DMUB_ASIC_DCN301;
1358 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1360 case CHIP_DIMGREY_CAVEFISH:
1361 dmub_asic = DMUB_ASIC_DCN302;
1362 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1366 /* ASIC doesn't support DMUB. */
1370 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1372 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1376 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1378 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1382 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1384 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1385 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1386 AMDGPU_UCODE_ID_DMCUB;
1387 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1389 adev->firmware.fw_size +=
1390 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1392 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1393 adev->dm.dmcub_fw_version);
1396 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1398 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1399 dmub_srv = adev->dm.dmub_srv;
1402 DRM_ERROR("Failed to allocate DMUB service!\n");
1406 memset(&create_params, 0, sizeof(create_params));
1407 create_params.user_ctx = adev;
1408 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1409 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1410 create_params.asic = dmub_asic;
1412 /* Create the DMUB service. */
1413 status = dmub_srv_create(dmub_srv, &create_params);
1414 if (status != DMUB_STATUS_OK) {
1415 DRM_ERROR("Error creating DMUB service: %d\n", status);
1419 /* Calculate the size of all the regions for the DMUB service. */
1420 memset(®ion_params, 0, sizeof(region_params));
1422 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1423 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1424 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1425 region_params.vbios_size = adev->bios_size;
1426 region_params.fw_bss_data = region_params.bss_data_size ?
1427 adev->dm.dmub_fw->data +
1428 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1429 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1430 region_params.fw_inst_const =
1431 adev->dm.dmub_fw->data +
1432 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1435 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1438 if (status != DMUB_STATUS_OK) {
1439 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1444 * Allocate a framebuffer based on the total size of all the regions.
1445 * TODO: Move this into GART.
1447 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1448 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1449 &adev->dm.dmub_bo_gpu_addr,
1450 &adev->dm.dmub_bo_cpu_addr);
1454 /* Rebase the regions on the framebuffer address. */
1455 memset(&fb_params, 0, sizeof(fb_params));
1456 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1457 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1458 fb_params.region_info = ®ion_info;
1460 adev->dm.dmub_fb_info =
1461 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1462 fb_info = adev->dm.dmub_fb_info;
1466 "Failed to allocate framebuffer info for DMUB service!\n");
1470 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1471 if (status != DMUB_STATUS_OK) {
1472 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1479 static int dm_sw_init(void *handle)
1481 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1484 r = dm_dmub_sw_init(adev);
1488 return load_dmcu_fw(adev);
1491 static int dm_sw_fini(void *handle)
1493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1495 kfree(adev->dm.dmub_fb_info);
1496 adev->dm.dmub_fb_info = NULL;
1498 if (adev->dm.dmub_srv) {
1499 dmub_srv_destroy(adev->dm.dmub_srv);
1500 adev->dm.dmub_srv = NULL;
1503 release_firmware(adev->dm.dmub_fw);
1504 adev->dm.dmub_fw = NULL;
1506 release_firmware(adev->dm.fw_dmcu);
1507 adev->dm.fw_dmcu = NULL;
1512 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1514 struct amdgpu_dm_connector *aconnector;
1515 struct drm_connector *connector;
1516 struct drm_connector_list_iter iter;
1519 drm_connector_list_iter_begin(dev, &iter);
1520 drm_for_each_connector_iter(connector, &iter) {
1521 aconnector = to_amdgpu_dm_connector(connector);
1522 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1523 aconnector->mst_mgr.aux) {
1524 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1526 aconnector->base.base.id);
1528 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1530 DRM_ERROR("DM_MST: Failed to start MST\n");
1531 aconnector->dc_link->type =
1532 dc_connection_single;
1537 drm_connector_list_iter_end(&iter);
1542 static int dm_late_init(void *handle)
1544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546 struct dmcu_iram_parameters params;
1547 unsigned int linear_lut[16];
1549 struct dmcu *dmcu = NULL;
1552 dmcu = adev->dm.dc->res_pool->dmcu;
1554 for (i = 0; i < 16; i++)
1555 linear_lut[i] = 0xFFFF * i / 15;
1558 params.backlight_ramping_start = 0xCCCC;
1559 params.backlight_ramping_reduction = 0xCCCCCCCC;
1560 params.backlight_lut_array_size = 16;
1561 params.backlight_lut_array = linear_lut;
1563 /* Min backlight level after ABM reduction, Don't allow below 1%
1564 * 0xFFFF x 0.01 = 0x28F
1566 params.min_abm_backlight = 0x28F;
1568 /* In the case where abm is implemented on dmcub,
1569 * dmcu object will be null.
1570 * ABM 2.4 and up are implemented on dmcub.
1573 ret = dmcu_load_iram(dmcu, params);
1574 else if (adev->dm.dc->ctx->dmub_srv)
1575 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1580 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1583 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1585 struct amdgpu_dm_connector *aconnector;
1586 struct drm_connector *connector;
1587 struct drm_connector_list_iter iter;
1588 struct drm_dp_mst_topology_mgr *mgr;
1590 bool need_hotplug = false;
1592 drm_connector_list_iter_begin(dev, &iter);
1593 drm_for_each_connector_iter(connector, &iter) {
1594 aconnector = to_amdgpu_dm_connector(connector);
1595 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1596 aconnector->mst_port)
1599 mgr = &aconnector->mst_mgr;
1602 drm_dp_mst_topology_mgr_suspend(mgr);
1604 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1606 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1607 need_hotplug = true;
1611 drm_connector_list_iter_end(&iter);
1614 drm_kms_helper_hotplug_event(dev);
1617 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1619 struct smu_context *smu = &adev->smu;
1622 if (!is_support_sw_smu(adev))
1625 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1626 * on window driver dc implementation.
1627 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1628 * should be passed to smu during boot up and resume from s3.
1629 * boot up: dc calculate dcn watermark clock settings within dc_create,
1630 * dcn20_resource_construct
1631 * then call pplib functions below to pass the settings to smu:
1632 * smu_set_watermarks_for_clock_ranges
1633 * smu_set_watermarks_table
1634 * navi10_set_watermarks_table
1635 * smu_write_watermarks_table
1637 * For Renoir, clock settings of dcn watermark are also fixed values.
1638 * dc has implemented different flow for window driver:
1639 * dc_hardware_init / dc_set_power_state
1644 * smu_set_watermarks_for_clock_ranges
1645 * renoir_set_watermarks_table
1646 * smu_write_watermarks_table
1649 * dc_hardware_init -> amdgpu_dm_init
1650 * dc_set_power_state --> dm_resume
1652 * therefore, this function apply to navi10/12/14 but not Renoir
1655 switch(adev->asic_type) {
1664 ret = smu_write_watermarks_table(smu);
1666 DRM_ERROR("Failed to update WMTABLE!\n");
1674 * dm_hw_init() - Initialize DC device
1675 * @handle: The base driver device containing the amdgpu_dm device.
1677 * Initialize the &struct amdgpu_display_manager device. This involves calling
1678 * the initializers of each DM component, then populating the struct with them.
1680 * Although the function implies hardware initialization, both hardware and
1681 * software are initialized here. Splitting them out to their relevant init
1682 * hooks is a future TODO item.
1684 * Some notable things that are initialized here:
1686 * - Display Core, both software and hardware
1687 * - DC modules that we need (freesync and color management)
1688 * - DRM software states
1689 * - Interrupt sources and handlers
1691 * - Debug FS entries, if enabled
1693 static int dm_hw_init(void *handle)
1695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696 /* Create DAL display manager */
1697 amdgpu_dm_init(adev);
1698 amdgpu_dm_hpd_init(adev);
1704 * dm_hw_fini() - Teardown DC device
1705 * @handle: The base driver device containing the amdgpu_dm device.
1707 * Teardown components within &struct amdgpu_display_manager that require
1708 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1709 * were loaded. Also flush IRQ workqueues and disable them.
1711 static int dm_hw_fini(void *handle)
1713 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1715 amdgpu_dm_hpd_fini(adev);
1717 amdgpu_dm_irq_fini(adev);
1718 amdgpu_dm_fini(adev);
1723 static int dm_enable_vblank(struct drm_crtc *crtc);
1724 static void dm_disable_vblank(struct drm_crtc *crtc);
1726 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1727 struct dc_state *state, bool enable)
1729 enum dc_irq_source irq_source;
1730 struct amdgpu_crtc *acrtc;
1734 for (i = 0; i < state->stream_count; i++) {
1735 acrtc = get_crtc_by_otg_inst(
1736 adev, state->stream_status[i].primary_otg_inst);
1738 if (acrtc && state->stream_status[i].plane_count != 0) {
1739 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1740 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1741 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1742 acrtc->crtc_id, enable ? "en" : "dis", rc);
1744 DRM_WARN("Failed to %s pflip interrupts\n",
1745 enable ? "enable" : "disable");
1748 rc = dm_enable_vblank(&acrtc->base);
1750 DRM_WARN("Failed to enable vblank interrupts\n");
1752 dm_disable_vblank(&acrtc->base);
1760 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1762 struct dc_state *context = NULL;
1763 enum dc_status res = DC_ERROR_UNEXPECTED;
1765 struct dc_stream_state *del_streams[MAX_PIPES];
1766 int del_streams_count = 0;
1768 memset(del_streams, 0, sizeof(del_streams));
1770 context = dc_create_state(dc);
1771 if (context == NULL)
1772 goto context_alloc_fail;
1774 dc_resource_state_copy_construct_current(dc, context);
1776 /* First remove from context all streams */
1777 for (i = 0; i < context->stream_count; i++) {
1778 struct dc_stream_state *stream = context->streams[i];
1780 del_streams[del_streams_count++] = stream;
1783 /* Remove all planes for removed streams and then remove the streams */
1784 for (i = 0; i < del_streams_count; i++) {
1785 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1786 res = DC_FAIL_DETACH_SURFACES;
1790 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1796 res = dc_validate_global_state(dc, context, false);
1799 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1803 res = dc_commit_state(dc, context);
1806 dc_release_state(context);
1812 static int dm_suspend(void *handle)
1814 struct amdgpu_device *adev = handle;
1815 struct amdgpu_display_manager *dm = &adev->dm;
1818 if (amdgpu_in_reset(adev)) {
1819 mutex_lock(&dm->dc_lock);
1820 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1822 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1824 amdgpu_dm_commit_zero_streams(dm->dc);
1826 amdgpu_dm_irq_suspend(adev);
1831 WARN_ON(adev->dm.cached_state);
1832 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1834 s3_handle_mst(adev_to_drm(adev), true);
1836 amdgpu_dm_irq_suspend(adev);
1839 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1844 static struct amdgpu_dm_connector *
1845 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1846 struct drm_crtc *crtc)
1849 struct drm_connector_state *new_con_state;
1850 struct drm_connector *connector;
1851 struct drm_crtc *crtc_from_state;
1853 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1854 crtc_from_state = new_con_state->crtc;
1856 if (crtc_from_state == crtc)
1857 return to_amdgpu_dm_connector(connector);
1863 static void emulated_link_detect(struct dc_link *link)
1865 struct dc_sink_init_data sink_init_data = { 0 };
1866 struct display_sink_capability sink_caps = { 0 };
1867 enum dc_edid_status edid_status;
1868 struct dc_context *dc_ctx = link->ctx;
1869 struct dc_sink *sink = NULL;
1870 struct dc_sink *prev_sink = NULL;
1872 link->type = dc_connection_none;
1873 prev_sink = link->local_sink;
1875 if (prev_sink != NULL)
1876 dc_sink_retain(prev_sink);
1878 switch (link->connector_signal) {
1879 case SIGNAL_TYPE_HDMI_TYPE_A: {
1880 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1881 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1885 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1886 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1887 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1891 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1892 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1893 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1897 case SIGNAL_TYPE_LVDS: {
1898 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1899 sink_caps.signal = SIGNAL_TYPE_LVDS;
1903 case SIGNAL_TYPE_EDP: {
1904 sink_caps.transaction_type =
1905 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1906 sink_caps.signal = SIGNAL_TYPE_EDP;
1910 case SIGNAL_TYPE_DISPLAY_PORT: {
1911 sink_caps.transaction_type =
1912 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1913 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1918 DC_ERROR("Invalid connector type! signal:%d\n",
1919 link->connector_signal);
1923 sink_init_data.link = link;
1924 sink_init_data.sink_signal = sink_caps.signal;
1926 sink = dc_sink_create(&sink_init_data);
1928 DC_ERROR("Failed to create sink!\n");
1932 /* dc_sink_create returns a new reference */
1933 link->local_sink = sink;
1935 edid_status = dm_helpers_read_local_edid(
1940 if (edid_status != EDID_OK)
1941 DC_ERROR("Failed to read EDID");
1945 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1946 struct amdgpu_display_manager *dm)
1949 struct dc_surface_update surface_updates[MAX_SURFACES];
1950 struct dc_plane_info plane_infos[MAX_SURFACES];
1951 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1952 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1953 struct dc_stream_update stream_update;
1957 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1960 dm_error("Failed to allocate update bundle\n");
1964 for (k = 0; k < dc_state->stream_count; k++) {
1965 bundle->stream_update.stream = dc_state->streams[k];
1967 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1968 bundle->surface_updates[m].surface =
1969 dc_state->stream_status->plane_states[m];
1970 bundle->surface_updates[m].surface->force_full_update =
1973 dc_commit_updates_for_stream(
1974 dm->dc, bundle->surface_updates,
1975 dc_state->stream_status->plane_count,
1976 dc_state->streams[k], &bundle->stream_update, dc_state);
1985 static void dm_set_dpms_off(struct dc_link *link)
1987 struct dc_stream_state *stream_state;
1988 struct amdgpu_dm_connector *aconnector = link->priv;
1989 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1990 struct dc_stream_update stream_update;
1991 bool dpms_off = true;
1993 memset(&stream_update, 0, sizeof(stream_update));
1994 stream_update.dpms_off = &dpms_off;
1996 mutex_lock(&adev->dm.dc_lock);
1997 stream_state = dc_stream_find_from_link(link);
1999 if (stream_state == NULL) {
2000 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2001 mutex_unlock(&adev->dm.dc_lock);
2005 stream_update.stream = stream_state;
2006 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2007 stream_state, &stream_update,
2008 stream_state->ctx->dc->current_state);
2009 mutex_unlock(&adev->dm.dc_lock);
2012 static int dm_resume(void *handle)
2014 struct amdgpu_device *adev = handle;
2015 struct drm_device *ddev = adev_to_drm(adev);
2016 struct amdgpu_display_manager *dm = &adev->dm;
2017 struct amdgpu_dm_connector *aconnector;
2018 struct drm_connector *connector;
2019 struct drm_connector_list_iter iter;
2020 struct drm_crtc *crtc;
2021 struct drm_crtc_state *new_crtc_state;
2022 struct dm_crtc_state *dm_new_crtc_state;
2023 struct drm_plane *plane;
2024 struct drm_plane_state *new_plane_state;
2025 struct dm_plane_state *dm_new_plane_state;
2026 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2027 enum dc_connection_type new_connection_type = dc_connection_none;
2028 struct dc_state *dc_state;
2031 if (amdgpu_in_reset(adev)) {
2032 dc_state = dm->cached_dc_state;
2034 r = dm_dmub_hw_init(adev);
2036 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2038 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2041 amdgpu_dm_irq_resume_early(adev);
2043 for (i = 0; i < dc_state->stream_count; i++) {
2044 dc_state->streams[i]->mode_changed = true;
2045 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2046 dc_state->stream_status->plane_states[j]->update_flags.raw
2051 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2053 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2055 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2057 dc_release_state(dm->cached_dc_state);
2058 dm->cached_dc_state = NULL;
2060 amdgpu_dm_irq_resume_late(adev);
2062 mutex_unlock(&dm->dc_lock);
2066 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2067 dc_release_state(dm_state->context);
2068 dm_state->context = dc_create_state(dm->dc);
2069 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2070 dc_resource_state_construct(dm->dc, dm_state->context);
2072 /* Before powering on DC we need to re-initialize DMUB. */
2073 r = dm_dmub_hw_init(adev);
2075 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2077 /* power on hardware */
2078 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2080 /* program HPD filter */
2084 * early enable HPD Rx IRQ, should be done before set mode as short
2085 * pulse interrupts are used for MST
2087 amdgpu_dm_irq_resume_early(adev);
2089 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2090 s3_handle_mst(ddev, false);
2093 drm_connector_list_iter_begin(ddev, &iter);
2094 drm_for_each_connector_iter(connector, &iter) {
2095 aconnector = to_amdgpu_dm_connector(connector);
2098 * this is the case when traversing through already created
2099 * MST connectors, should be skipped
2101 if (aconnector->mst_port)
2104 mutex_lock(&aconnector->hpd_lock);
2105 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2106 DRM_ERROR("KMS: Failed to detect connector\n");
2108 if (aconnector->base.force && new_connection_type == dc_connection_none)
2109 emulated_link_detect(aconnector->dc_link);
2111 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2113 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2114 aconnector->fake_enable = false;
2116 if (aconnector->dc_sink)
2117 dc_sink_release(aconnector->dc_sink);
2118 aconnector->dc_sink = NULL;
2119 amdgpu_dm_update_connector_after_detect(aconnector);
2120 mutex_unlock(&aconnector->hpd_lock);
2122 drm_connector_list_iter_end(&iter);
2124 /* Force mode set in atomic commit */
2125 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2126 new_crtc_state->active_changed = true;
2129 * atomic_check is expected to create the dc states. We need to release
2130 * them here, since they were duplicated as part of the suspend
2133 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2134 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2135 if (dm_new_crtc_state->stream) {
2136 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2137 dc_stream_release(dm_new_crtc_state->stream);
2138 dm_new_crtc_state->stream = NULL;
2142 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2143 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2144 if (dm_new_plane_state->dc_state) {
2145 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2146 dc_plane_state_release(dm_new_plane_state->dc_state);
2147 dm_new_plane_state->dc_state = NULL;
2151 drm_atomic_helper_resume(ddev, dm->cached_state);
2153 dm->cached_state = NULL;
2155 amdgpu_dm_irq_resume_late(adev);
2157 amdgpu_dm_smu_write_watermarks_table(adev);
2165 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2166 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2167 * the base driver's device list to be initialized and torn down accordingly.
2169 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2172 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2174 .early_init = dm_early_init,
2175 .late_init = dm_late_init,
2176 .sw_init = dm_sw_init,
2177 .sw_fini = dm_sw_fini,
2178 .hw_init = dm_hw_init,
2179 .hw_fini = dm_hw_fini,
2180 .suspend = dm_suspend,
2181 .resume = dm_resume,
2182 .is_idle = dm_is_idle,
2183 .wait_for_idle = dm_wait_for_idle,
2184 .check_soft_reset = dm_check_soft_reset,
2185 .soft_reset = dm_soft_reset,
2186 .set_clockgating_state = dm_set_clockgating_state,
2187 .set_powergating_state = dm_set_powergating_state,
2190 const struct amdgpu_ip_block_version dm_ip_block =
2192 .type = AMD_IP_BLOCK_TYPE_DCE,
2196 .funcs = &amdgpu_dm_funcs,
2206 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2207 .fb_create = amdgpu_display_user_framebuffer_create,
2208 .get_format_info = amd_get_format_info,
2209 .output_poll_changed = drm_fb_helper_output_poll_changed,
2210 .atomic_check = amdgpu_dm_atomic_check,
2211 .atomic_commit = drm_atomic_helper_commit,
2214 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2215 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2218 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2220 u32 max_cll, min_cll, max, min, q, r;
2221 struct amdgpu_dm_backlight_caps *caps;
2222 struct amdgpu_display_manager *dm;
2223 struct drm_connector *conn_base;
2224 struct amdgpu_device *adev;
2225 struct dc_link *link = NULL;
2226 static const u8 pre_computed_values[] = {
2227 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2228 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2230 if (!aconnector || !aconnector->dc_link)
2233 link = aconnector->dc_link;
2234 if (link->connector_signal != SIGNAL_TYPE_EDP)
2237 conn_base = &aconnector->base;
2238 adev = drm_to_adev(conn_base->dev);
2240 caps = &dm->backlight_caps;
2241 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2242 caps->aux_support = false;
2243 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2244 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2246 if (caps->ext_caps->bits.oled == 1 ||
2247 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2248 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2249 caps->aux_support = true;
2251 /* From the specification (CTA-861-G), for calculating the maximum
2252 * luminance we need to use:
2253 * Luminance = 50*2**(CV/32)
2254 * Where CV is a one-byte value.
2255 * For calculating this expression we may need float point precision;
2256 * to avoid this complexity level, we take advantage that CV is divided
2257 * by a constant. From the Euclids division algorithm, we know that CV
2258 * can be written as: CV = 32*q + r. Next, we replace CV in the
2259 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2260 * need to pre-compute the value of r/32. For pre-computing the values
2261 * We just used the following Ruby line:
2262 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2263 * The results of the above expressions can be verified at
2264 * pre_computed_values.
2268 max = (1 << q) * pre_computed_values[r];
2270 // min luminance: maxLum * (CV/255)^2 / 100
2271 q = DIV_ROUND_CLOSEST(min_cll, 255);
2272 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2274 caps->aux_max_input_signal = max;
2275 caps->aux_min_input_signal = min;
2278 void amdgpu_dm_update_connector_after_detect(
2279 struct amdgpu_dm_connector *aconnector)
2281 struct drm_connector *connector = &aconnector->base;
2282 struct drm_device *dev = connector->dev;
2283 struct dc_sink *sink;
2285 /* MST handled by drm_mst framework */
2286 if (aconnector->mst_mgr.mst_state == true)
2289 sink = aconnector->dc_link->local_sink;
2291 dc_sink_retain(sink);
2294 * Edid mgmt connector gets first update only in mode_valid hook and then
2295 * the connector sink is set to either fake or physical sink depends on link status.
2296 * Skip if already done during boot.
2298 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2299 && aconnector->dc_em_sink) {
2302 * For S3 resume with headless use eml_sink to fake stream
2303 * because on resume connector->sink is set to NULL
2305 mutex_lock(&dev->mode_config.mutex);
2308 if (aconnector->dc_sink) {
2309 amdgpu_dm_update_freesync_caps(connector, NULL);
2311 * retain and release below are used to
2312 * bump up refcount for sink because the link doesn't point
2313 * to it anymore after disconnect, so on next crtc to connector
2314 * reshuffle by UMD we will get into unwanted dc_sink release
2316 dc_sink_release(aconnector->dc_sink);
2318 aconnector->dc_sink = sink;
2319 dc_sink_retain(aconnector->dc_sink);
2320 amdgpu_dm_update_freesync_caps(connector,
2323 amdgpu_dm_update_freesync_caps(connector, NULL);
2324 if (!aconnector->dc_sink) {
2325 aconnector->dc_sink = aconnector->dc_em_sink;
2326 dc_sink_retain(aconnector->dc_sink);
2330 mutex_unlock(&dev->mode_config.mutex);
2333 dc_sink_release(sink);
2338 * TODO: temporary guard to look for proper fix
2339 * if this sink is MST sink, we should not do anything
2341 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2342 dc_sink_release(sink);
2346 if (aconnector->dc_sink == sink) {
2348 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2351 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2352 aconnector->connector_id);
2354 dc_sink_release(sink);
2358 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2359 aconnector->connector_id, aconnector->dc_sink, sink);
2361 mutex_lock(&dev->mode_config.mutex);
2364 * 1. Update status of the drm connector
2365 * 2. Send an event and let userspace tell us what to do
2369 * TODO: check if we still need the S3 mode update workaround.
2370 * If yes, put it here.
2372 if (aconnector->dc_sink)
2373 amdgpu_dm_update_freesync_caps(connector, NULL);
2375 aconnector->dc_sink = sink;
2376 dc_sink_retain(aconnector->dc_sink);
2377 if (sink->dc_edid.length == 0) {
2378 aconnector->edid = NULL;
2379 if (aconnector->dc_link->aux_mode) {
2380 drm_dp_cec_unset_edid(
2381 &aconnector->dm_dp_aux.aux);
2385 (struct edid *)sink->dc_edid.raw_edid;
2387 drm_connector_update_edid_property(connector,
2389 aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
2390 drm_connector_list_update(connector);
2392 if (aconnector->dc_link->aux_mode)
2393 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2397 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2398 update_connector_ext_caps(aconnector);
2400 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2401 amdgpu_dm_update_freesync_caps(connector, NULL);
2402 drm_connector_update_edid_property(connector, NULL);
2403 aconnector->num_modes = 0;
2404 dc_sink_release(aconnector->dc_sink);
2405 aconnector->dc_sink = NULL;
2406 aconnector->edid = NULL;
2407 #ifdef CONFIG_DRM_AMD_DC_HDCP
2408 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2409 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2410 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2414 mutex_unlock(&dev->mode_config.mutex);
2416 update_subconnector_property(aconnector);
2419 dc_sink_release(sink);
2422 static void handle_hpd_irq(void *param)
2424 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2425 struct drm_connector *connector = &aconnector->base;
2426 struct drm_device *dev = connector->dev;
2427 enum dc_connection_type new_connection_type = dc_connection_none;
2428 #ifdef CONFIG_DRM_AMD_DC_HDCP
2429 struct amdgpu_device *adev = drm_to_adev(dev);
2430 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2434 * In case of failure or MST no need to update connector status or notify the OS
2435 * since (for MST case) MST does this in its own context.
2437 mutex_lock(&aconnector->hpd_lock);
2439 #ifdef CONFIG_DRM_AMD_DC_HDCP
2440 if (adev->dm.hdcp_workqueue) {
2441 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2442 dm_con_state->update_hdcp = true;
2445 if (aconnector->fake_enable)
2446 aconnector->fake_enable = false;
2448 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2449 DRM_ERROR("KMS: Failed to detect connector\n");
2451 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2452 emulated_link_detect(aconnector->dc_link);
2455 drm_modeset_lock_all(dev);
2456 dm_restore_drm_connector_state(dev, connector);
2457 drm_modeset_unlock_all(dev);
2459 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2460 drm_kms_helper_hotplug_event(dev);
2462 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2463 if (new_connection_type == dc_connection_none &&
2464 aconnector->dc_link->type == dc_connection_none)
2465 dm_set_dpms_off(aconnector->dc_link);
2467 amdgpu_dm_update_connector_after_detect(aconnector);
2469 drm_modeset_lock_all(dev);
2470 dm_restore_drm_connector_state(dev, connector);
2471 drm_modeset_unlock_all(dev);
2473 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2474 drm_kms_helper_hotplug_event(dev);
2476 mutex_unlock(&aconnector->hpd_lock);
2480 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2482 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2484 bool new_irq_handled = false;
2486 int dpcd_bytes_to_read;
2488 const int max_process_count = 30;
2489 int process_count = 0;
2491 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2493 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2494 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2495 /* DPCD 0x200 - 0x201 for downstream IRQ */
2496 dpcd_addr = DP_SINK_COUNT;
2498 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2499 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2500 dpcd_addr = DP_SINK_COUNT_ESI;
2503 dret = drm_dp_dpcd_read(
2504 &aconnector->dm_dp_aux.aux,
2507 dpcd_bytes_to_read);
2509 while (dret == dpcd_bytes_to_read &&
2510 process_count < max_process_count) {
2516 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2517 /* handle HPD short pulse irq */
2518 if (aconnector->mst_mgr.mst_state)
2520 &aconnector->mst_mgr,
2524 if (new_irq_handled) {
2525 /* ACK at DPCD to notify down stream */
2526 const int ack_dpcd_bytes_to_write =
2527 dpcd_bytes_to_read - 1;
2529 for (retry = 0; retry < 3; retry++) {
2532 wret = drm_dp_dpcd_write(
2533 &aconnector->dm_dp_aux.aux,
2536 ack_dpcd_bytes_to_write);
2537 if (wret == ack_dpcd_bytes_to_write)
2541 /* check if there is new irq to be handled */
2542 dret = drm_dp_dpcd_read(
2543 &aconnector->dm_dp_aux.aux,
2546 dpcd_bytes_to_read);
2548 new_irq_handled = false;
2554 if (process_count == max_process_count)
2555 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2558 static void handle_hpd_rx_irq(void *param)
2560 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2561 struct drm_connector *connector = &aconnector->base;
2562 struct drm_device *dev = connector->dev;
2563 struct dc_link *dc_link = aconnector->dc_link;
2564 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2565 bool result = false;
2566 enum dc_connection_type new_connection_type = dc_connection_none;
2567 struct amdgpu_device *adev = drm_to_adev(dev);
2568 union hpd_irq_data hpd_irq_data;
2570 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2573 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2574 * conflict, after implement i2c helper, this mutex should be
2577 if (dc_link->type != dc_connection_mst_branch)
2578 mutex_lock(&aconnector->hpd_lock);
2580 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2582 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2583 (dc_link->type == dc_connection_mst_branch)) {
2584 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2586 dm_handle_hpd_rx_irq(aconnector);
2588 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2590 dm_handle_hpd_rx_irq(aconnector);
2595 mutex_lock(&adev->dm.dc_lock);
2596 #ifdef CONFIG_DRM_AMD_DC_HDCP
2597 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2599 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2601 mutex_unlock(&adev->dm.dc_lock);
2604 if (result && !is_mst_root_connector) {
2605 /* Downstream Port status changed. */
2606 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2607 DRM_ERROR("KMS: Failed to detect connector\n");
2609 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2610 emulated_link_detect(dc_link);
2612 if (aconnector->fake_enable)
2613 aconnector->fake_enable = false;
2615 amdgpu_dm_update_connector_after_detect(aconnector);
2618 drm_modeset_lock_all(dev);
2619 dm_restore_drm_connector_state(dev, connector);
2620 drm_modeset_unlock_all(dev);
2622 drm_kms_helper_hotplug_event(dev);
2623 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2625 if (aconnector->fake_enable)
2626 aconnector->fake_enable = false;
2628 amdgpu_dm_update_connector_after_detect(aconnector);
2631 drm_modeset_lock_all(dev);
2632 dm_restore_drm_connector_state(dev, connector);
2633 drm_modeset_unlock_all(dev);
2635 drm_kms_helper_hotplug_event(dev);
2638 #ifdef CONFIG_DRM_AMD_DC_HDCP
2639 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2640 if (adev->dm.hdcp_workqueue)
2641 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2645 if (dc_link->type != dc_connection_mst_branch) {
2646 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2647 mutex_unlock(&aconnector->hpd_lock);
2651 static void register_hpd_handlers(struct amdgpu_device *adev)
2653 struct drm_device *dev = adev_to_drm(adev);
2654 struct drm_connector *connector;
2655 struct amdgpu_dm_connector *aconnector;
2656 const struct dc_link *dc_link;
2657 struct dc_interrupt_params int_params = {0};
2659 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2660 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2662 list_for_each_entry(connector,
2663 &dev->mode_config.connector_list, head) {
2665 aconnector = to_amdgpu_dm_connector(connector);
2666 dc_link = aconnector->dc_link;
2668 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2669 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2670 int_params.irq_source = dc_link->irq_source_hpd;
2672 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2674 (void *) aconnector);
2677 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2679 /* Also register for DP short pulse (hpd_rx). */
2680 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2681 int_params.irq_source = dc_link->irq_source_hpd_rx;
2683 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2685 (void *) aconnector);
2690 #if defined(CONFIG_DRM_AMD_DC_SI)
2691 /* Register IRQ sources and initialize IRQ callbacks */
2692 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2694 struct dc *dc = adev->dm.dc;
2695 struct common_irq_params *c_irq_params;
2696 struct dc_interrupt_params int_params = {0};
2699 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2701 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2702 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2705 * Actions of amdgpu_irq_add_id():
2706 * 1. Register a set() function with base driver.
2707 * Base driver will call set() function to enable/disable an
2708 * interrupt in DC hardware.
2709 * 2. Register amdgpu_dm_irq_handler().
2710 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2711 * coming from DC hardware.
2712 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2713 * for acknowledging and handling. */
2715 /* Use VBLANK interrupt */
2716 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2717 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2719 DRM_ERROR("Failed to add crtc irq id!\n");
2723 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2724 int_params.irq_source =
2725 dc_interrupt_to_irq_source(dc, i+1 , 0);
2727 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2729 c_irq_params->adev = adev;
2730 c_irq_params->irq_src = int_params.irq_source;
2732 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2733 dm_crtc_high_irq, c_irq_params);
2736 /* Use GRPH_PFLIP interrupt */
2737 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2738 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2739 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2741 DRM_ERROR("Failed to add page flip irq id!\n");
2745 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2746 int_params.irq_source =
2747 dc_interrupt_to_irq_source(dc, i, 0);
2749 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2751 c_irq_params->adev = adev;
2752 c_irq_params->irq_src = int_params.irq_source;
2754 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2755 dm_pflip_high_irq, c_irq_params);
2760 r = amdgpu_irq_add_id(adev, client_id,
2761 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2763 DRM_ERROR("Failed to add hpd irq id!\n");
2767 register_hpd_handlers(adev);
2773 /* Register IRQ sources and initialize IRQ callbacks */
2774 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2776 struct dc *dc = adev->dm.dc;
2777 struct common_irq_params *c_irq_params;
2778 struct dc_interrupt_params int_params = {0};
2781 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2783 if (adev->asic_type >= CHIP_VEGA10)
2784 client_id = SOC15_IH_CLIENTID_DCE;
2786 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2790 * Actions of amdgpu_irq_add_id():
2791 * 1. Register a set() function with base driver.
2792 * Base driver will call set() function to enable/disable an
2793 * interrupt in DC hardware.
2794 * 2. Register amdgpu_dm_irq_handler().
2795 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2796 * coming from DC hardware.
2797 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2798 * for acknowledging and handling. */
2800 /* Use VBLANK interrupt */
2801 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2802 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2804 DRM_ERROR("Failed to add crtc irq id!\n");
2808 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2809 int_params.irq_source =
2810 dc_interrupt_to_irq_source(dc, i, 0);
2812 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2814 c_irq_params->adev = adev;
2815 c_irq_params->irq_src = int_params.irq_source;
2817 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2818 dm_crtc_high_irq, c_irq_params);
2821 /* Use VUPDATE interrupt */
2822 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2823 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2825 DRM_ERROR("Failed to add vupdate irq id!\n");
2829 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2830 int_params.irq_source =
2831 dc_interrupt_to_irq_source(dc, i, 0);
2833 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2835 c_irq_params->adev = adev;
2836 c_irq_params->irq_src = int_params.irq_source;
2838 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2839 dm_vupdate_high_irq, c_irq_params);
2842 /* Use GRPH_PFLIP interrupt */
2843 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2844 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2845 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2847 DRM_ERROR("Failed to add page flip irq id!\n");
2851 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2852 int_params.irq_source =
2853 dc_interrupt_to_irq_source(dc, i, 0);
2855 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2857 c_irq_params->adev = adev;
2858 c_irq_params->irq_src = int_params.irq_source;
2860 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2861 dm_pflip_high_irq, c_irq_params);
2866 r = amdgpu_irq_add_id(adev, client_id,
2867 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2869 DRM_ERROR("Failed to add hpd irq id!\n");
2873 register_hpd_handlers(adev);
2878 #if defined(CONFIG_DRM_AMD_DC_DCN)
2879 /* Register IRQ sources and initialize IRQ callbacks */
2880 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2882 struct dc *dc = adev->dm.dc;
2883 struct common_irq_params *c_irq_params;
2884 struct dc_interrupt_params int_params = {0};
2888 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2889 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2892 * Actions of amdgpu_irq_add_id():
2893 * 1. Register a set() function with base driver.
2894 * Base driver will call set() function to enable/disable an
2895 * interrupt in DC hardware.
2896 * 2. Register amdgpu_dm_irq_handler().
2897 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2898 * coming from DC hardware.
2899 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2900 * for acknowledging and handling.
2903 /* Use VSTARTUP interrupt */
2904 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2905 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2907 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2910 DRM_ERROR("Failed to add crtc irq id!\n");
2914 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2915 int_params.irq_source =
2916 dc_interrupt_to_irq_source(dc, i, 0);
2918 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2920 c_irq_params->adev = adev;
2921 c_irq_params->irq_src = int_params.irq_source;
2923 amdgpu_dm_irq_register_interrupt(
2924 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2927 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2928 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2929 * to trigger at end of each vblank, regardless of state of the lock,
2930 * matching DCE behaviour.
2932 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2933 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2935 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2938 DRM_ERROR("Failed to add vupdate irq id!\n");
2942 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2943 int_params.irq_source =
2944 dc_interrupt_to_irq_source(dc, i, 0);
2946 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2948 c_irq_params->adev = adev;
2949 c_irq_params->irq_src = int_params.irq_source;
2951 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2952 dm_vupdate_high_irq, c_irq_params);
2955 /* Use GRPH_PFLIP interrupt */
2956 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2957 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2959 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2961 DRM_ERROR("Failed to add page flip irq id!\n");
2965 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966 int_params.irq_source =
2967 dc_interrupt_to_irq_source(dc, i, 0);
2969 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2971 c_irq_params->adev = adev;
2972 c_irq_params->irq_src = int_params.irq_source;
2974 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2975 dm_pflip_high_irq, c_irq_params);
2980 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2983 DRM_ERROR("Failed to add hpd irq id!\n");
2987 register_hpd_handlers(adev);
2994 * Acquires the lock for the atomic state object and returns
2995 * the new atomic state.
2997 * This should only be called during atomic check.
2999 static int dm_atomic_get_state(struct drm_atomic_state *state,
3000 struct dm_atomic_state **dm_state)
3002 struct drm_device *dev = state->dev;
3003 struct amdgpu_device *adev = drm_to_adev(dev);
3004 struct amdgpu_display_manager *dm = &adev->dm;
3005 struct drm_private_state *priv_state;
3010 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3011 if (IS_ERR(priv_state))
3012 return PTR_ERR(priv_state);
3014 *dm_state = to_dm_atomic_state(priv_state);
3019 static struct dm_atomic_state *
3020 dm_atomic_get_new_state(struct drm_atomic_state *state)
3022 struct drm_device *dev = state->dev;
3023 struct amdgpu_device *adev = drm_to_adev(dev);
3024 struct amdgpu_display_manager *dm = &adev->dm;
3025 struct drm_private_obj *obj;
3026 struct drm_private_state *new_obj_state;
3029 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3030 if (obj->funcs == dm->atomic_obj.funcs)
3031 return to_dm_atomic_state(new_obj_state);
3037 static struct drm_private_state *
3038 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3040 struct dm_atomic_state *old_state, *new_state;
3042 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3046 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3048 old_state = to_dm_atomic_state(obj->state);
3050 if (old_state && old_state->context)
3051 new_state->context = dc_copy_state(old_state->context);
3053 if (!new_state->context) {
3058 return &new_state->base;
3061 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3062 struct drm_private_state *state)
3064 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3066 if (dm_state && dm_state->context)
3067 dc_release_state(dm_state->context);
3072 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3073 .atomic_duplicate_state = dm_atomic_duplicate_state,
3074 .atomic_destroy_state = dm_atomic_destroy_state,
3077 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3079 struct dm_atomic_state *state;
3082 adev->mode_info.mode_config_initialized = true;
3084 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3085 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3087 adev_to_drm(adev)->mode_config.max_width = 16384;
3088 adev_to_drm(adev)->mode_config.max_height = 16384;
3090 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3091 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3092 /* indicates support for immediate flip */
3093 adev_to_drm(adev)->mode_config.async_page_flip = true;
3095 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3097 state = kzalloc(sizeof(*state), GFP_KERNEL);
3101 state->context = dc_create_state(adev->dm.dc);
3102 if (!state->context) {
3107 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3109 drm_atomic_private_obj_init(adev_to_drm(adev),
3110 &adev->dm.atomic_obj,
3112 &dm_atomic_state_funcs);
3114 r = amdgpu_display_modeset_create_props(adev);
3116 dc_release_state(state->context);
3121 r = amdgpu_dm_audio_init(adev);
3123 dc_release_state(state->context);
3131 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3132 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3133 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3135 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3136 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3138 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3140 #if defined(CONFIG_ACPI)
3141 struct amdgpu_dm_backlight_caps caps;
3143 memset(&caps, 0, sizeof(caps));
3145 if (dm->backlight_caps.caps_valid)
3148 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3149 if (caps.caps_valid) {
3150 dm->backlight_caps.caps_valid = true;
3151 if (caps.aux_support)
3153 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3154 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3156 dm->backlight_caps.min_input_signal =
3157 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3158 dm->backlight_caps.max_input_signal =
3159 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3162 if (dm->backlight_caps.aux_support)
3165 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3166 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3170 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3177 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3178 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3183 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3184 unsigned *min, unsigned *max)
3189 if (caps->aux_support) {
3190 // Firmware limits are in nits, DC API wants millinits.
3191 *max = 1000 * caps->aux_max_input_signal;
3192 *min = 1000 * caps->aux_min_input_signal;
3194 // Firmware limits are 8-bit, PWM control is 16-bit.
3195 *max = 0x101 * caps->max_input_signal;
3196 *min = 0x101 * caps->min_input_signal;
3201 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3202 uint32_t brightness)
3206 if (!get_brightness_range(caps, &min, &max))
3209 // Rescale 0..255 to min..max
3210 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3211 AMDGPU_MAX_BL_LEVEL);
3214 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3215 uint32_t brightness)
3219 if (!get_brightness_range(caps, &min, &max))
3222 if (brightness < min)
3224 // Rescale min..max to 0..255
3225 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3229 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3231 struct amdgpu_display_manager *dm = bl_get_data(bd);
3232 struct amdgpu_dm_backlight_caps caps;
3233 struct dc_link *link = NULL;
3237 amdgpu_dm_update_backlight_caps(dm);
3238 caps = dm->backlight_caps;
3240 link = (struct dc_link *)dm->backlight_link;
3242 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3243 // Change brightness based on AUX property
3244 if (caps.aux_support)
3245 return set_backlight_via_aux(link, brightness);
3247 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3252 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3254 struct amdgpu_display_manager *dm = bl_get_data(bd);
3255 int ret = dc_link_get_backlight_level(dm->backlight_link);
3257 if (ret == DC_ERROR_UNEXPECTED)
3258 return bd->props.brightness;
3259 return convert_brightness_to_user(&dm->backlight_caps, ret);
3262 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3263 .options = BL_CORE_SUSPENDRESUME,
3264 .get_brightness = amdgpu_dm_backlight_get_brightness,
3265 .update_status = amdgpu_dm_backlight_update_status,
3269 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3272 struct backlight_properties props = { 0 };
3274 amdgpu_dm_update_backlight_caps(dm);
3276 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3277 props.brightness = AMDGPU_MAX_BL_LEVEL;
3278 props.type = BACKLIGHT_RAW;
3280 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3281 adev_to_drm(dm->adev)->primary->index);
3283 dm->backlight_dev = backlight_device_register(bl_name,
3284 adev_to_drm(dm->adev)->dev,
3286 &amdgpu_dm_backlight_ops,
3289 if (IS_ERR(dm->backlight_dev))
3290 DRM_ERROR("DM: Backlight registration failed!\n");
3292 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3297 static int initialize_plane(struct amdgpu_display_manager *dm,
3298 struct amdgpu_mode_info *mode_info, int plane_id,
3299 enum drm_plane_type plane_type,
3300 const struct dc_plane_cap *plane_cap)
3302 struct drm_plane *plane;
3303 unsigned long possible_crtcs;
3306 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3308 DRM_ERROR("KMS: Failed to allocate plane\n");
3311 plane->type = plane_type;
3314 * HACK: IGT tests expect that the primary plane for a CRTC
3315 * can only have one possible CRTC. Only expose support for
3316 * any CRTC if they're not going to be used as a primary plane
3317 * for a CRTC - like overlay or underlay planes.
3319 possible_crtcs = 1 << plane_id;
3320 if (plane_id >= dm->dc->caps.max_streams)
3321 possible_crtcs = 0xff;
3323 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3326 DRM_ERROR("KMS: Failed to initialize plane\n");
3332 mode_info->planes[plane_id] = plane;
3338 static void register_backlight_device(struct amdgpu_display_manager *dm,
3339 struct dc_link *link)
3341 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3342 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3344 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3345 link->type != dc_connection_none) {
3347 * Event if registration failed, we should continue with
3348 * DM initialization because not having a backlight control
3349 * is better then a black screen.
3351 amdgpu_dm_register_backlight_device(dm);
3353 if (dm->backlight_dev)
3354 dm->backlight_link = link;
3361 * In this architecture, the association
3362 * connector -> encoder -> crtc
3363 * id not really requried. The crtc and connector will hold the
3364 * display_index as an abstraction to use with DAL component
3366 * Returns 0 on success
3368 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3370 struct amdgpu_display_manager *dm = &adev->dm;
3372 struct amdgpu_dm_connector *aconnector = NULL;
3373 struct amdgpu_encoder *aencoder = NULL;
3374 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3376 int32_t primary_planes;
3377 enum dc_connection_type new_connection_type = dc_connection_none;
3378 const struct dc_plane_cap *plane;
3380 dm->display_indexes_num = dm->dc->caps.max_streams;
3381 /* Update the actual used number of crtc */
3382 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3384 link_cnt = dm->dc->caps.max_links;
3385 if (amdgpu_dm_mode_config_init(dm->adev)) {
3386 DRM_ERROR("DM: Failed to initialize mode config\n");
3390 /* There is one primary plane per CRTC */
3391 primary_planes = dm->dc->caps.max_streams;
3392 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3395 * Initialize primary planes, implicit planes for legacy IOCTLS.
3396 * Order is reversed to match iteration order in atomic check.
3398 for (i = (primary_planes - 1); i >= 0; i--) {
3399 plane = &dm->dc->caps.planes[i];
3401 if (initialize_plane(dm, mode_info, i,
3402 DRM_PLANE_TYPE_PRIMARY, plane)) {
3403 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3409 * Initialize overlay planes, index starting after primary planes.
3410 * These planes have a higher DRM index than the primary planes since
3411 * they should be considered as having a higher z-order.
3412 * Order is reversed to match iteration order in atomic check.
3414 * Only support DCN for now, and only expose one so we don't encourage
3415 * userspace to use up all the pipes.
3417 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3418 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3420 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3423 if (!plane->blends_with_above || !plane->blends_with_below)
3426 if (!plane->pixel_format_support.argb8888)
3429 if (initialize_plane(dm, NULL, primary_planes + i,
3430 DRM_PLANE_TYPE_OVERLAY, plane)) {
3431 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3435 /* Only create one overlay plane. */
3439 for (i = 0; i < dm->dc->caps.max_streams; i++)
3440 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3441 DRM_ERROR("KMS: Failed to initialize crtc\n");
3445 /* loops over all connectors on the board */
3446 for (i = 0; i < link_cnt; i++) {
3447 struct dc_link *link = NULL;
3449 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3451 "KMS: Cannot support more than %d display indexes\n",
3452 AMDGPU_DM_MAX_DISPLAY_INDEX);
3456 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3460 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3464 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3465 DRM_ERROR("KMS: Failed to initialize encoder\n");
3469 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3470 DRM_ERROR("KMS: Failed to initialize connector\n");
3474 link = dc_get_link_at_index(dm->dc, i);
3476 if (!dc_link_detect_sink(link, &new_connection_type))
3477 DRM_ERROR("KMS: Failed to detect connector\n");
3479 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3480 emulated_link_detect(link);
3481 amdgpu_dm_update_connector_after_detect(aconnector);
3483 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3484 amdgpu_dm_update_connector_after_detect(aconnector);
3485 register_backlight_device(dm, link);
3486 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3487 amdgpu_dm_set_psr_caps(link);
3493 /* Software is initialized. Now we can register interrupt handlers. */
3494 switch (adev->asic_type) {
3495 #if defined(CONFIG_DRM_AMD_DC_SI)
3500 if (dce60_register_irq_handlers(dm->adev)) {
3501 DRM_ERROR("DM: Failed to initialize IRQ\n");
3515 case CHIP_POLARIS11:
3516 case CHIP_POLARIS10:
3517 case CHIP_POLARIS12:
3522 if (dce110_register_irq_handlers(dm->adev)) {
3523 DRM_ERROR("DM: Failed to initialize IRQ\n");
3527 #if defined(CONFIG_DRM_AMD_DC_DCN)
3533 case CHIP_SIENNA_CICHLID:
3534 case CHIP_NAVY_FLOUNDER:
3535 case CHIP_DIMGREY_CAVEFISH:
3537 if (dcn10_register_irq_handlers(dm->adev)) {
3538 DRM_ERROR("DM: Failed to initialize IRQ\n");
3544 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3556 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3558 drm_mode_config_cleanup(dm->ddev);
3559 drm_atomic_private_obj_fini(&dm->atomic_obj);
3563 /******************************************************************************
3564 * amdgpu_display_funcs functions
3565 *****************************************************************************/
3568 * dm_bandwidth_update - program display watermarks
3570 * @adev: amdgpu_device pointer
3572 * Calculate and program the display watermarks and line buffer allocation.
3574 static void dm_bandwidth_update(struct amdgpu_device *adev)
3576 /* TODO: implement later */
3579 static const struct amdgpu_display_funcs dm_display_funcs = {
3580 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3581 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3582 .backlight_set_level = NULL, /* never called for DC */
3583 .backlight_get_level = NULL, /* never called for DC */
3584 .hpd_sense = NULL,/* called unconditionally */
3585 .hpd_set_polarity = NULL, /* called unconditionally */
3586 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3587 .page_flip_get_scanoutpos =
3588 dm_crtc_get_scanoutpos,/* called unconditionally */
3589 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3590 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3593 #if defined(CONFIG_DEBUG_KERNEL_DC)
3595 static ssize_t s3_debug_store(struct device *device,
3596 struct device_attribute *attr,
3602 struct drm_device *drm_dev = dev_get_drvdata(device);
3603 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3605 ret = kstrtoint(buf, 0, &s3_state);
3610 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3615 return ret == 0 ? count : 0;
3618 DEVICE_ATTR_WO(s3_debug);
3622 static int dm_early_init(void *handle)
3624 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3626 switch (adev->asic_type) {
3627 #if defined(CONFIG_DRM_AMD_DC_SI)
3631 adev->mode_info.num_crtc = 6;
3632 adev->mode_info.num_hpd = 6;
3633 adev->mode_info.num_dig = 6;
3636 adev->mode_info.num_crtc = 2;
3637 adev->mode_info.num_hpd = 2;
3638 adev->mode_info.num_dig = 2;
3643 adev->mode_info.num_crtc = 6;
3644 adev->mode_info.num_hpd = 6;
3645 adev->mode_info.num_dig = 6;
3648 adev->mode_info.num_crtc = 4;
3649 adev->mode_info.num_hpd = 6;
3650 adev->mode_info.num_dig = 7;
3654 adev->mode_info.num_crtc = 2;
3655 adev->mode_info.num_hpd = 6;
3656 adev->mode_info.num_dig = 6;
3660 adev->mode_info.num_crtc = 6;
3661 adev->mode_info.num_hpd = 6;
3662 adev->mode_info.num_dig = 7;
3665 adev->mode_info.num_crtc = 3;
3666 adev->mode_info.num_hpd = 6;
3667 adev->mode_info.num_dig = 9;
3670 adev->mode_info.num_crtc = 2;
3671 adev->mode_info.num_hpd = 6;
3672 adev->mode_info.num_dig = 9;
3674 case CHIP_POLARIS11:
3675 case CHIP_POLARIS12:
3676 adev->mode_info.num_crtc = 5;
3677 adev->mode_info.num_hpd = 5;
3678 adev->mode_info.num_dig = 5;
3680 case CHIP_POLARIS10:
3682 adev->mode_info.num_crtc = 6;
3683 adev->mode_info.num_hpd = 6;
3684 adev->mode_info.num_dig = 6;
3689 adev->mode_info.num_crtc = 6;
3690 adev->mode_info.num_hpd = 6;
3691 adev->mode_info.num_dig = 6;
3693 #if defined(CONFIG_DRM_AMD_DC_DCN)
3697 adev->mode_info.num_crtc = 4;
3698 adev->mode_info.num_hpd = 4;
3699 adev->mode_info.num_dig = 4;
3703 case CHIP_SIENNA_CICHLID:
3704 case CHIP_NAVY_FLOUNDER:
3705 adev->mode_info.num_crtc = 6;
3706 adev->mode_info.num_hpd = 6;
3707 adev->mode_info.num_dig = 6;
3710 case CHIP_DIMGREY_CAVEFISH:
3711 adev->mode_info.num_crtc = 5;
3712 adev->mode_info.num_hpd = 5;
3713 adev->mode_info.num_dig = 5;
3717 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3721 amdgpu_dm_set_irq_funcs(adev);
3723 if (adev->mode_info.funcs == NULL)
3724 adev->mode_info.funcs = &dm_display_funcs;
3727 * Note: Do NOT change adev->audio_endpt_rreg and
3728 * adev->audio_endpt_wreg because they are initialised in
3729 * amdgpu_device_init()
3731 #if defined(CONFIG_DEBUG_KERNEL_DC)
3733 adev_to_drm(adev)->dev,
3734 &dev_attr_s3_debug);
3740 static bool modeset_required(struct drm_crtc_state *crtc_state,
3741 struct dc_stream_state *new_stream,
3742 struct dc_stream_state *old_stream)
3744 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3747 static bool modereset_required(struct drm_crtc_state *crtc_state)
3749 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3752 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3754 drm_encoder_cleanup(encoder);
3758 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3759 .destroy = amdgpu_dm_encoder_destroy,
3763 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3764 struct dc_scaling_info *scaling_info)
3766 int scale_w, scale_h;
3768 memset(scaling_info, 0, sizeof(*scaling_info));
3770 /* Source is fixed 16.16 but we ignore mantissa for now... */
3771 scaling_info->src_rect.x = state->src_x >> 16;
3772 scaling_info->src_rect.y = state->src_y >> 16;
3774 scaling_info->src_rect.width = state->src_w >> 16;
3775 if (scaling_info->src_rect.width == 0)
3778 scaling_info->src_rect.height = state->src_h >> 16;
3779 if (scaling_info->src_rect.height == 0)
3782 scaling_info->dst_rect.x = state->crtc_x;
3783 scaling_info->dst_rect.y = state->crtc_y;
3785 if (state->crtc_w == 0)
3788 scaling_info->dst_rect.width = state->crtc_w;
3790 if (state->crtc_h == 0)
3793 scaling_info->dst_rect.height = state->crtc_h;
3795 /* DRM doesn't specify clipping on destination output. */
3796 scaling_info->clip_rect = scaling_info->dst_rect;
3798 /* TODO: Validate scaling per-format with DC plane caps */
3799 scale_w = scaling_info->dst_rect.width * 1000 /
3800 scaling_info->src_rect.width;
3802 if (scale_w < 250 || scale_w > 16000)
3805 scale_h = scaling_info->dst_rect.height * 1000 /
3806 scaling_info->src_rect.height;
3808 if (scale_h < 250 || scale_h > 16000)
3812 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3813 * assume reasonable defaults based on the format.
3820 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3821 uint64_t tiling_flags)
3823 /* Fill GFX8 params */
3824 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3825 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3827 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3828 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3829 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3830 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3831 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3833 /* XXX fix me for VI */
3834 tiling_info->gfx8.num_banks = num_banks;
3835 tiling_info->gfx8.array_mode =
3836 DC_ARRAY_2D_TILED_THIN1;
3837 tiling_info->gfx8.tile_split = tile_split;
3838 tiling_info->gfx8.bank_width = bankw;
3839 tiling_info->gfx8.bank_height = bankh;
3840 tiling_info->gfx8.tile_aspect = mtaspect;
3841 tiling_info->gfx8.tile_mode =
3842 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3843 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3844 == DC_ARRAY_1D_TILED_THIN1) {
3845 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3848 tiling_info->gfx8.pipe_config =
3849 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3853 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3854 union dc_tiling_info *tiling_info)
3856 tiling_info->gfx9.num_pipes =
3857 adev->gfx.config.gb_addr_config_fields.num_pipes;
3858 tiling_info->gfx9.num_banks =
3859 adev->gfx.config.gb_addr_config_fields.num_banks;
3860 tiling_info->gfx9.pipe_interleave =
3861 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3862 tiling_info->gfx9.num_shader_engines =
3863 adev->gfx.config.gb_addr_config_fields.num_se;
3864 tiling_info->gfx9.max_compressed_frags =
3865 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3866 tiling_info->gfx9.num_rb_per_se =
3867 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3868 tiling_info->gfx9.shaderEnable = 1;
3869 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3870 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3871 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3872 adev->asic_type == CHIP_VANGOGH)
3873 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3877 validate_dcc(struct amdgpu_device *adev,
3878 const enum surface_pixel_format format,
3879 const enum dc_rotation_angle rotation,
3880 const union dc_tiling_info *tiling_info,
3881 const struct dc_plane_dcc_param *dcc,
3882 const struct dc_plane_address *address,
3883 const struct plane_size *plane_size)
3885 struct dc *dc = adev->dm.dc;
3886 struct dc_dcc_surface_param input;
3887 struct dc_surface_dcc_cap output;
3889 memset(&input, 0, sizeof(input));
3890 memset(&output, 0, sizeof(output));
3895 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3896 !dc->cap_funcs.get_dcc_compression_cap)
3899 input.format = format;
3900 input.surface_size.width = plane_size->surface_size.width;
3901 input.surface_size.height = plane_size->surface_size.height;
3902 input.swizzle_mode = tiling_info->gfx9.swizzle;
3904 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3905 input.scan = SCAN_DIRECTION_HORIZONTAL;
3906 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3907 input.scan = SCAN_DIRECTION_VERTICAL;
3909 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3912 if (!output.capable)
3915 if (dcc->independent_64b_blks == 0 &&
3916 output.grph.rgb.independent_64b_blks != 0)
3923 modifier_has_dcc(uint64_t modifier)
3925 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3929 modifier_gfx9_swizzle_mode(uint64_t modifier)
3931 if (modifier == DRM_FORMAT_MOD_LINEAR)
3934 return AMD_FMT_MOD_GET(TILE, modifier);
3937 static const struct drm_format_info *
3938 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3940 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3944 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3945 union dc_tiling_info *tiling_info,
3948 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3949 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3950 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3951 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3953 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3955 if (!IS_AMD_FMT_MOD(modifier))
3958 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3959 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3961 if (adev->family >= AMDGPU_FAMILY_NV) {
3962 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3964 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3966 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3970 enum dm_micro_swizzle {
3971 MICRO_SWIZZLE_Z = 0,
3972 MICRO_SWIZZLE_S = 1,
3973 MICRO_SWIZZLE_D = 2,
3977 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3981 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3982 const struct drm_format_info *info = drm_format_info(format);
3984 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3990 * We always have to allow this modifier, because core DRM still
3991 * checks LINEAR support if userspace does not provide modifers.
3993 if (modifier == DRM_FORMAT_MOD_LINEAR)
3997 * The arbitrary tiling support for multiplane formats has not been hooked
4000 if (info->num_planes > 1)
4004 * For D swizzle the canonical modifier depends on the bpp, so check
4007 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4008 adev->family >= AMDGPU_FAMILY_NV) {
4009 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4013 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4017 if (modifier_has_dcc(modifier)) {
4018 /* Per radeonsi comments 16/64 bpp are more complicated. */
4019 if (info->cpp[0] != 4)
4027 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4032 if (*cap - *size < 1) {
4033 uint64_t new_cap = *cap * 2;
4034 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4042 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4048 (*mods)[*size] = mod;
4053 add_gfx9_modifiers(const struct amdgpu_device *adev,
4054 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4056 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4057 int pipe_xor_bits = min(8, pipes +
4058 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4059 int bank_xor_bits = min(8 - pipe_xor_bits,
4060 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4061 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4062 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4065 if (adev->family == AMDGPU_FAMILY_RV) {
4066 /* Raven2 and later */
4067 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4070 * No _D DCC swizzles yet because we only allow 32bpp, which
4071 * doesn't support _D on DCN
4074 if (has_constant_encode) {
4075 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4076 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4077 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4078 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4079 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4080 AMD_FMT_MOD_SET(DCC, 1) |
4081 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4082 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4083 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4086 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4087 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4088 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4089 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4090 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4091 AMD_FMT_MOD_SET(DCC, 1) |
4092 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4093 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4094 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4096 if (has_constant_encode) {
4097 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4098 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4099 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4100 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4101 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4102 AMD_FMT_MOD_SET(DCC, 1) |
4103 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4104 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4105 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4107 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4108 AMD_FMT_MOD_SET(RB, rb) |
4109 AMD_FMT_MOD_SET(PIPE, pipes));
4112 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4113 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4114 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4115 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4116 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4117 AMD_FMT_MOD_SET(DCC, 1) |
4118 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4119 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4120 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4121 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4122 AMD_FMT_MOD_SET(RB, rb) |
4123 AMD_FMT_MOD_SET(PIPE, pipes));
4127 * Only supported for 64bpp on Raven, will be filtered on format in
4128 * dm_plane_format_mod_supported.
4130 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4131 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4132 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4133 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4134 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4136 if (adev->family == AMDGPU_FAMILY_RV) {
4137 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4138 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4139 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4140 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4141 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4145 * Only supported for 64bpp on Raven, will be filtered on format in
4146 * dm_plane_format_mod_supported.
4148 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4149 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4150 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4152 if (adev->family == AMDGPU_FAMILY_RV) {
4153 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4154 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4155 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4160 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4161 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4163 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4165 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4166 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4167 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4168 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4169 AMD_FMT_MOD_SET(DCC, 1) |
4170 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4171 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4172 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4174 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4175 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4176 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4177 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4178 AMD_FMT_MOD_SET(DCC, 1) |
4179 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4180 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4181 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4182 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4184 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4185 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4186 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4187 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4189 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4190 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4191 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4192 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4195 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4196 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4197 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4198 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4200 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4201 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4202 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4206 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4207 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4209 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4210 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4212 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4213 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4214 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4215 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4216 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4217 AMD_FMT_MOD_SET(DCC, 1) |
4218 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4219 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4220 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4221 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4223 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4224 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4225 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4226 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4227 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4228 AMD_FMT_MOD_SET(DCC, 1) |
4229 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4230 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4231 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4232 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4233 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4235 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4236 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4237 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4238 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4239 AMD_FMT_MOD_SET(PACKERS, pkrs));
4241 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4242 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4243 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4244 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4245 AMD_FMT_MOD_SET(PACKERS, pkrs));
4247 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4248 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4249 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4250 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4252 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4253 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4254 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4258 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4260 uint64_t size = 0, capacity = 128;
4263 /* We have not hooked up any pre-GFX9 modifiers. */
4264 if (adev->family < AMDGPU_FAMILY_AI)
4267 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4269 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4270 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4271 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4272 return *mods ? 0 : -ENOMEM;
4275 switch (adev->family) {
4276 case AMDGPU_FAMILY_AI:
4277 case AMDGPU_FAMILY_RV:
4278 add_gfx9_modifiers(adev, mods, &size, &capacity);
4280 case AMDGPU_FAMILY_NV:
4281 case AMDGPU_FAMILY_VGH:
4282 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4283 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4285 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4289 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4291 /* INVALID marks the end of the list. */
4292 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4301 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4302 const struct amdgpu_framebuffer *afb,
4303 const enum surface_pixel_format format,
4304 const enum dc_rotation_angle rotation,
4305 const struct plane_size *plane_size,
4306 union dc_tiling_info *tiling_info,
4307 struct dc_plane_dcc_param *dcc,
4308 struct dc_plane_address *address,
4309 const bool force_disable_dcc)
4311 const uint64_t modifier = afb->base.modifier;
4314 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4315 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4317 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4318 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4321 dcc->meta_pitch = afb->base.pitches[1];
4322 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4324 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4325 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4328 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4336 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4337 const struct amdgpu_framebuffer *afb,
4338 const enum surface_pixel_format format,
4339 const enum dc_rotation_angle rotation,
4340 const uint64_t tiling_flags,
4341 union dc_tiling_info *tiling_info,
4342 struct plane_size *plane_size,
4343 struct dc_plane_dcc_param *dcc,
4344 struct dc_plane_address *address,
4346 bool force_disable_dcc)
4348 const struct drm_framebuffer *fb = &afb->base;
4351 memset(tiling_info, 0, sizeof(*tiling_info));
4352 memset(plane_size, 0, sizeof(*plane_size));
4353 memset(dcc, 0, sizeof(*dcc));
4354 memset(address, 0, sizeof(*address));
4356 address->tmz_surface = tmz_surface;
4358 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4359 uint64_t addr = afb->address + fb->offsets[0];
4361 plane_size->surface_size.x = 0;
4362 plane_size->surface_size.y = 0;
4363 plane_size->surface_size.width = fb->width;
4364 plane_size->surface_size.height = fb->height;
4365 plane_size->surface_pitch =
4366 fb->pitches[0] / fb->format->cpp[0];
4368 address->type = PLN_ADDR_TYPE_GRAPHICS;
4369 address->grph.addr.low_part = lower_32_bits(addr);
4370 address->grph.addr.high_part = upper_32_bits(addr);
4371 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4372 uint64_t luma_addr = afb->address + fb->offsets[0];
4373 uint64_t chroma_addr = afb->address + fb->offsets[1];
4375 plane_size->surface_size.x = 0;
4376 plane_size->surface_size.y = 0;
4377 plane_size->surface_size.width = fb->width;
4378 plane_size->surface_size.height = fb->height;
4379 plane_size->surface_pitch =
4380 fb->pitches[0] / fb->format->cpp[0];
4382 plane_size->chroma_size.x = 0;
4383 plane_size->chroma_size.y = 0;
4384 /* TODO: set these based on surface format */
4385 plane_size->chroma_size.width = fb->width / 2;
4386 plane_size->chroma_size.height = fb->height / 2;
4388 plane_size->chroma_pitch =
4389 fb->pitches[1] / fb->format->cpp[1];
4391 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4392 address->video_progressive.luma_addr.low_part =
4393 lower_32_bits(luma_addr);
4394 address->video_progressive.luma_addr.high_part =
4395 upper_32_bits(luma_addr);
4396 address->video_progressive.chroma_addr.low_part =
4397 lower_32_bits(chroma_addr);
4398 address->video_progressive.chroma_addr.high_part =
4399 upper_32_bits(chroma_addr);
4402 if (adev->family >= AMDGPU_FAMILY_AI) {
4403 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4404 rotation, plane_size,
4411 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4418 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4419 bool *per_pixel_alpha, bool *global_alpha,
4420 int *global_alpha_value)
4422 *per_pixel_alpha = false;
4423 *global_alpha = false;
4424 *global_alpha_value = 0xff;
4426 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4429 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4430 static const uint32_t alpha_formats[] = {
4431 DRM_FORMAT_ARGB8888,
4432 DRM_FORMAT_RGBA8888,
4433 DRM_FORMAT_ABGR8888,
4435 uint32_t format = plane_state->fb->format->format;
4438 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4439 if (format == alpha_formats[i]) {
4440 *per_pixel_alpha = true;
4446 if (plane_state->alpha < 0xffff) {
4447 *global_alpha = true;
4448 *global_alpha_value = plane_state->alpha >> 8;
4453 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4454 const enum surface_pixel_format format,
4455 enum dc_color_space *color_space)
4459 *color_space = COLOR_SPACE_SRGB;
4461 /* DRM color properties only affect non-RGB formats. */
4462 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4465 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4467 switch (plane_state->color_encoding) {
4468 case DRM_COLOR_YCBCR_BT601:
4470 *color_space = COLOR_SPACE_YCBCR601;
4472 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4475 case DRM_COLOR_YCBCR_BT709:
4477 *color_space = COLOR_SPACE_YCBCR709;
4479 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4482 case DRM_COLOR_YCBCR_BT2020:
4484 *color_space = COLOR_SPACE_2020_YCBCR;
4497 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4498 const struct drm_plane_state *plane_state,
4499 const uint64_t tiling_flags,
4500 struct dc_plane_info *plane_info,
4501 struct dc_plane_address *address,
4503 bool force_disable_dcc)
4505 const struct drm_framebuffer *fb = plane_state->fb;
4506 const struct amdgpu_framebuffer *afb =
4507 to_amdgpu_framebuffer(plane_state->fb);
4508 struct drm_format_name_buf format_name;
4511 memset(plane_info, 0, sizeof(*plane_info));
4513 switch (fb->format->format) {
4515 plane_info->format =
4516 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4518 case DRM_FORMAT_RGB565:
4519 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4521 case DRM_FORMAT_XRGB8888:
4522 case DRM_FORMAT_ARGB8888:
4523 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4525 case DRM_FORMAT_XRGB2101010:
4526 case DRM_FORMAT_ARGB2101010:
4527 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4529 case DRM_FORMAT_XBGR2101010:
4530 case DRM_FORMAT_ABGR2101010:
4531 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4533 case DRM_FORMAT_XBGR8888:
4534 case DRM_FORMAT_ABGR8888:
4535 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4537 case DRM_FORMAT_NV21:
4538 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4540 case DRM_FORMAT_NV12:
4541 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4543 case DRM_FORMAT_P010:
4544 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4546 case DRM_FORMAT_XRGB16161616F:
4547 case DRM_FORMAT_ARGB16161616F:
4548 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4550 case DRM_FORMAT_XBGR16161616F:
4551 case DRM_FORMAT_ABGR16161616F:
4552 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4556 "Unsupported screen format %s\n",
4557 drm_get_format_name(fb->format->format, &format_name));
4561 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4562 case DRM_MODE_ROTATE_0:
4563 plane_info->rotation = ROTATION_ANGLE_0;
4565 case DRM_MODE_ROTATE_90:
4566 plane_info->rotation = ROTATION_ANGLE_90;
4568 case DRM_MODE_ROTATE_180:
4569 plane_info->rotation = ROTATION_ANGLE_180;
4571 case DRM_MODE_ROTATE_270:
4572 plane_info->rotation = ROTATION_ANGLE_270;
4575 plane_info->rotation = ROTATION_ANGLE_0;
4579 plane_info->visible = true;
4580 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4582 plane_info->layer_index = 0;
4584 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4585 &plane_info->color_space);
4589 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4590 plane_info->rotation, tiling_flags,
4591 &plane_info->tiling_info,
4592 &plane_info->plane_size,
4593 &plane_info->dcc, address, tmz_surface,
4598 fill_blending_from_plane_state(
4599 plane_state, &plane_info->per_pixel_alpha,
4600 &plane_info->global_alpha, &plane_info->global_alpha_value);
4605 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4606 struct dc_plane_state *dc_plane_state,
4607 struct drm_plane_state *plane_state,
4608 struct drm_crtc_state *crtc_state)
4610 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4611 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4612 struct dc_scaling_info scaling_info;
4613 struct dc_plane_info plane_info;
4615 bool force_disable_dcc = false;
4617 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4621 dc_plane_state->src_rect = scaling_info.src_rect;
4622 dc_plane_state->dst_rect = scaling_info.dst_rect;
4623 dc_plane_state->clip_rect = scaling_info.clip_rect;
4624 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4626 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4627 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4630 &dc_plane_state->address,
4636 dc_plane_state->format = plane_info.format;
4637 dc_plane_state->color_space = plane_info.color_space;
4638 dc_plane_state->format = plane_info.format;
4639 dc_plane_state->plane_size = plane_info.plane_size;
4640 dc_plane_state->rotation = plane_info.rotation;
4641 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4642 dc_plane_state->stereo_format = plane_info.stereo_format;
4643 dc_plane_state->tiling_info = plane_info.tiling_info;
4644 dc_plane_state->visible = plane_info.visible;
4645 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4646 dc_plane_state->global_alpha = plane_info.global_alpha;
4647 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4648 dc_plane_state->dcc = plane_info.dcc;
4649 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4652 * Always set input transfer function, since plane state is refreshed
4655 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4662 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4663 const struct dm_connector_state *dm_state,
4664 struct dc_stream_state *stream)
4666 enum amdgpu_rmx_type rmx_type;
4668 struct rect src = { 0 }; /* viewport in composition space*/
4669 struct rect dst = { 0 }; /* stream addressable area */
4671 /* no mode. nothing to be done */
4675 /* Full screen scaling by default */
4676 src.width = mode->hdisplay;
4677 src.height = mode->vdisplay;
4678 dst.width = stream->timing.h_addressable;
4679 dst.height = stream->timing.v_addressable;
4682 rmx_type = dm_state->scaling;
4683 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4684 if (src.width * dst.height <
4685 src.height * dst.width) {
4686 /* height needs less upscaling/more downscaling */
4687 dst.width = src.width *
4688 dst.height / src.height;
4690 /* width needs less upscaling/more downscaling */
4691 dst.height = src.height *
4692 dst.width / src.width;
4694 } else if (rmx_type == RMX_CENTER) {
4698 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4699 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4701 if (dm_state->underscan_enable) {
4702 dst.x += dm_state->underscan_hborder / 2;
4703 dst.y += dm_state->underscan_vborder / 2;
4704 dst.width -= dm_state->underscan_hborder;
4705 dst.height -= dm_state->underscan_vborder;
4712 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4713 dst.x, dst.y, dst.width, dst.height);
4717 static enum dc_color_depth
4718 convert_color_depth_from_display_info(const struct drm_connector *connector,
4719 bool is_y420, int requested_bpc)
4726 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4727 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4729 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4731 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4734 bpc = (uint8_t)connector->display_info.bpc;
4735 /* Assume 8 bpc by default if no bpc is specified. */
4736 bpc = bpc ? bpc : 8;
4739 if (requested_bpc > 0) {
4741 * Cap display bpc based on the user requested value.
4743 * The value for state->max_bpc may not correctly updated
4744 * depending on when the connector gets added to the state
4745 * or if this was called outside of atomic check, so it
4746 * can't be used directly.
4748 bpc = min_t(u8, bpc, requested_bpc);
4750 /* Round down to the nearest even number. */
4751 bpc = bpc - (bpc & 1);
4757 * Temporary Work around, DRM doesn't parse color depth for
4758 * EDID revision before 1.4
4759 * TODO: Fix edid parsing
4761 return COLOR_DEPTH_888;
4763 return COLOR_DEPTH_666;
4765 return COLOR_DEPTH_888;
4767 return COLOR_DEPTH_101010;
4769 return COLOR_DEPTH_121212;
4771 return COLOR_DEPTH_141414;
4773 return COLOR_DEPTH_161616;
4775 return COLOR_DEPTH_UNDEFINED;
4779 static enum dc_aspect_ratio
4780 get_aspect_ratio(const struct drm_display_mode *mode_in)
4782 /* 1-1 mapping, since both enums follow the HDMI spec. */
4783 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4786 static enum dc_color_space
4787 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4789 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4791 switch (dc_crtc_timing->pixel_encoding) {
4792 case PIXEL_ENCODING_YCBCR422:
4793 case PIXEL_ENCODING_YCBCR444:
4794 case PIXEL_ENCODING_YCBCR420:
4797 * 27030khz is the separation point between HDTV and SDTV
4798 * according to HDMI spec, we use YCbCr709 and YCbCr601
4801 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4802 if (dc_crtc_timing->flags.Y_ONLY)
4804 COLOR_SPACE_YCBCR709_LIMITED;
4806 color_space = COLOR_SPACE_YCBCR709;
4808 if (dc_crtc_timing->flags.Y_ONLY)
4810 COLOR_SPACE_YCBCR601_LIMITED;
4812 color_space = COLOR_SPACE_YCBCR601;
4817 case PIXEL_ENCODING_RGB:
4818 color_space = COLOR_SPACE_SRGB;
4829 static bool adjust_colour_depth_from_display_info(
4830 struct dc_crtc_timing *timing_out,
4831 const struct drm_display_info *info)
4833 enum dc_color_depth depth = timing_out->display_color_depth;
4836 normalized_clk = timing_out->pix_clk_100hz / 10;
4837 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4838 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4839 normalized_clk /= 2;
4840 /* Adjusting pix clock following on HDMI spec based on colour depth */
4842 case COLOR_DEPTH_888:
4844 case COLOR_DEPTH_101010:
4845 normalized_clk = (normalized_clk * 30) / 24;
4847 case COLOR_DEPTH_121212:
4848 normalized_clk = (normalized_clk * 36) / 24;
4850 case COLOR_DEPTH_161616:
4851 normalized_clk = (normalized_clk * 48) / 24;
4854 /* The above depths are the only ones valid for HDMI. */
4857 if (normalized_clk <= info->max_tmds_clock) {
4858 timing_out->display_color_depth = depth;
4861 } while (--depth > COLOR_DEPTH_666);
4865 static void fill_stream_properties_from_drm_display_mode(
4866 struct dc_stream_state *stream,
4867 const struct drm_display_mode *mode_in,
4868 const struct drm_connector *connector,
4869 const struct drm_connector_state *connector_state,
4870 const struct dc_stream_state *old_stream,
4873 struct dc_crtc_timing *timing_out = &stream->timing;
4874 const struct drm_display_info *info = &connector->display_info;
4875 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4876 struct hdmi_vendor_infoframe hv_frame;
4877 struct hdmi_avi_infoframe avi_frame;
4879 memset(&hv_frame, 0, sizeof(hv_frame));
4880 memset(&avi_frame, 0, sizeof(avi_frame));
4882 timing_out->h_border_left = 0;
4883 timing_out->h_border_right = 0;
4884 timing_out->v_border_top = 0;
4885 timing_out->v_border_bottom = 0;
4886 /* TODO: un-hardcode */
4887 if (drm_mode_is_420_only(info, mode_in)
4888 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4889 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4890 else if (drm_mode_is_420_also(info, mode_in)
4891 && aconnector->force_yuv420_output)
4892 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4893 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4894 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4895 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4897 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4899 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4900 timing_out->display_color_depth = convert_color_depth_from_display_info(
4902 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4904 timing_out->scan_type = SCANNING_TYPE_NODATA;
4905 timing_out->hdmi_vic = 0;
4908 timing_out->vic = old_stream->timing.vic;
4909 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4910 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4912 timing_out->vic = drm_match_cea_mode(mode_in);
4913 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4914 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4915 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4916 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4919 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4920 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4921 timing_out->vic = avi_frame.video_code;
4922 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4923 timing_out->hdmi_vic = hv_frame.vic;
4926 timing_out->h_addressable = mode_in->crtc_hdisplay;
4927 timing_out->h_total = mode_in->crtc_htotal;
4928 timing_out->h_sync_width =
4929 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4930 timing_out->h_front_porch =
4931 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4932 timing_out->v_total = mode_in->crtc_vtotal;
4933 timing_out->v_addressable = mode_in->crtc_vdisplay;
4934 timing_out->v_front_porch =
4935 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4936 timing_out->v_sync_width =
4937 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4938 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4939 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4941 stream->output_color_space = get_output_color_space(timing_out);
4943 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4944 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4945 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4946 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4947 drm_mode_is_420_also(info, mode_in) &&
4948 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4949 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4950 adjust_colour_depth_from_display_info(timing_out, info);
4955 static void fill_audio_info(struct audio_info *audio_info,
4956 const struct drm_connector *drm_connector,
4957 const struct dc_sink *dc_sink)
4960 int cea_revision = 0;
4961 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4963 audio_info->manufacture_id = edid_caps->manufacturer_id;
4964 audio_info->product_id = edid_caps->product_id;
4966 cea_revision = drm_connector->display_info.cea_rev;
4968 strscpy(audio_info->display_name,
4969 edid_caps->display_name,
4970 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4972 if (cea_revision >= 3) {
4973 audio_info->mode_count = edid_caps->audio_mode_count;
4975 for (i = 0; i < audio_info->mode_count; ++i) {
4976 audio_info->modes[i].format_code =
4977 (enum audio_format_code)
4978 (edid_caps->audio_modes[i].format_code);
4979 audio_info->modes[i].channel_count =
4980 edid_caps->audio_modes[i].channel_count;
4981 audio_info->modes[i].sample_rates.all =
4982 edid_caps->audio_modes[i].sample_rate;
4983 audio_info->modes[i].sample_size =
4984 edid_caps->audio_modes[i].sample_size;
4988 audio_info->flags.all = edid_caps->speaker_flags;
4990 /* TODO: We only check for the progressive mode, check for interlace mode too */
4991 if (drm_connector->latency_present[0]) {
4992 audio_info->video_latency = drm_connector->video_latency[0];
4993 audio_info->audio_latency = drm_connector->audio_latency[0];
4996 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5001 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5002 struct drm_display_mode *dst_mode)
5004 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5005 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5006 dst_mode->crtc_clock = src_mode->crtc_clock;
5007 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5008 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5009 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5010 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5011 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5012 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5013 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5014 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5015 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5016 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5017 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5021 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5022 const struct drm_display_mode *native_mode,
5025 if (scale_enabled) {
5026 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5027 } else if (native_mode->clock == drm_mode->clock &&
5028 native_mode->htotal == drm_mode->htotal &&
5029 native_mode->vtotal == drm_mode->vtotal) {
5030 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5032 /* no scaling nor amdgpu inserted, no need to patch */
5036 static struct dc_sink *
5037 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5039 struct dc_sink_init_data sink_init_data = { 0 };
5040 struct dc_sink *sink = NULL;
5041 sink_init_data.link = aconnector->dc_link;
5042 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5044 sink = dc_sink_create(&sink_init_data);
5046 DRM_ERROR("Failed to create sink!\n");
5049 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5054 static void set_multisync_trigger_params(
5055 struct dc_stream_state *stream)
5057 if (stream->triggered_crtc_reset.enabled) {
5058 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5059 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5063 static void set_master_stream(struct dc_stream_state *stream_set[],
5066 int j, highest_rfr = 0, master_stream = 0;
5068 for (j = 0; j < stream_count; j++) {
5069 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5070 int refresh_rate = 0;
5072 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5073 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5074 if (refresh_rate > highest_rfr) {
5075 highest_rfr = refresh_rate;
5080 for (j = 0; j < stream_count; j++) {
5082 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5086 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5090 if (context->stream_count < 2)
5092 for (i = 0; i < context->stream_count ; i++) {
5093 if (!context->streams[i])
5096 * TODO: add a function to read AMD VSDB bits and set
5097 * crtc_sync_master.multi_sync_enabled flag
5098 * For now it's set to false
5100 set_multisync_trigger_params(context->streams[i]);
5102 set_master_stream(context->streams, context->stream_count);
5105 static struct dc_stream_state *
5106 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5107 const struct drm_display_mode *drm_mode,
5108 const struct dm_connector_state *dm_state,
5109 const struct dc_stream_state *old_stream,
5112 struct drm_display_mode *preferred_mode = NULL;
5113 struct drm_connector *drm_connector;
5114 const struct drm_connector_state *con_state =
5115 dm_state ? &dm_state->base : NULL;
5116 struct dc_stream_state *stream = NULL;
5117 struct drm_display_mode mode = *drm_mode;
5118 bool native_mode_found = false;
5119 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5121 int preferred_refresh = 0;
5122 #if defined(CONFIG_DRM_AMD_DC_DCN)
5123 struct dsc_dec_dpcd_caps dsc_caps;
5124 uint32_t link_bandwidth_kbps;
5126 struct dc_sink *sink = NULL;
5127 if (aconnector == NULL) {
5128 DRM_ERROR("aconnector is NULL!\n");
5132 drm_connector = &aconnector->base;
5134 if (!aconnector->dc_sink) {
5135 sink = create_fake_sink(aconnector);
5139 sink = aconnector->dc_sink;
5140 dc_sink_retain(sink);
5143 stream = dc_create_stream_for_sink(sink);
5145 if (stream == NULL) {
5146 DRM_ERROR("Failed to create stream for sink!\n");
5150 stream->dm_stream_context = aconnector;
5152 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5153 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5155 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5156 /* Search for preferred mode */
5157 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5158 native_mode_found = true;
5162 if (!native_mode_found)
5163 preferred_mode = list_first_entry_or_null(
5164 &aconnector->base.modes,
5165 struct drm_display_mode,
5168 mode_refresh = drm_mode_vrefresh(&mode);
5170 if (preferred_mode == NULL) {
5172 * This may not be an error, the use case is when we have no
5173 * usermode calls to reset and set mode upon hotplug. In this
5174 * case, we call set mode ourselves to restore the previous mode
5175 * and the modelist may not be filled in in time.
5177 DRM_DEBUG_DRIVER("No preferred mode found\n");
5179 decide_crtc_timing_for_drm_display_mode(
5180 &mode, preferred_mode,
5181 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5182 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5186 drm_mode_set_crtcinfo(&mode, 0);
5189 * If scaling is enabled and refresh rate didn't change
5190 * we copy the vic and polarities of the old timings
5192 if (!scale || mode_refresh != preferred_refresh)
5193 fill_stream_properties_from_drm_display_mode(stream,
5194 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5196 fill_stream_properties_from_drm_display_mode(stream,
5197 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5199 stream->timing.flags.DSC = 0;
5201 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5202 #if defined(CONFIG_DRM_AMD_DC_DCN)
5203 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5204 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5205 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5207 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5208 dc_link_get_link_cap(aconnector->dc_link));
5210 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5211 /* Set DSC policy according to dsc_clock_en */
5212 dc_dsc_policy_set_enable_dsc_when_not_needed(
5213 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5215 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5217 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5219 link_bandwidth_kbps,
5221 &stream->timing.dsc_cfg))
5222 stream->timing.flags.DSC = 1;
5223 /* Overwrite the stream flag if DSC is enabled through debugfs */
5224 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5225 stream->timing.flags.DSC = 1;
5227 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5228 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5230 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5231 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5233 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5234 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5239 update_stream_scaling_settings(&mode, dm_state, stream);
5242 &stream->audio_info,
5246 update_stream_signal(stream, sink);
5248 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5249 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5251 if (stream->link->psr_settings.psr_feature_enabled) {
5253 // should decide stream support vsc sdp colorimetry capability
5254 // before building vsc info packet
5256 stream->use_vsc_sdp_for_colorimetry = false;
5257 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5258 stream->use_vsc_sdp_for_colorimetry =
5259 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5261 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5262 stream->use_vsc_sdp_for_colorimetry = true;
5264 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5267 dc_sink_release(sink);
5272 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5274 drm_crtc_cleanup(crtc);
5278 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5279 struct drm_crtc_state *state)
5281 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5283 /* TODO Destroy dc_stream objects are stream object is flattened */
5285 dc_stream_release(cur->stream);
5288 __drm_atomic_helper_crtc_destroy_state(state);
5294 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5296 struct dm_crtc_state *state;
5299 dm_crtc_destroy_state(crtc, crtc->state);
5301 state = kzalloc(sizeof(*state), GFP_KERNEL);
5302 if (WARN_ON(!state))
5305 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5308 static struct drm_crtc_state *
5309 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5311 struct dm_crtc_state *state, *cur;
5313 cur = to_dm_crtc_state(crtc->state);
5315 if (WARN_ON(!crtc->state))
5318 state = kzalloc(sizeof(*state), GFP_KERNEL);
5322 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5325 state->stream = cur->stream;
5326 dc_stream_retain(state->stream);
5329 state->active_planes = cur->active_planes;
5330 state->vrr_infopacket = cur->vrr_infopacket;
5331 state->abm_level = cur->abm_level;
5332 state->vrr_supported = cur->vrr_supported;
5333 state->freesync_config = cur->freesync_config;
5334 state->crc_src = cur->crc_src;
5335 state->cm_has_degamma = cur->cm_has_degamma;
5336 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5337 #ifdef CONFIG_DEBUG_FS
5338 state->crc_window = cur->crc_window;
5340 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5342 return &state->base;
5345 #ifdef CONFIG_DEBUG_FS
5346 static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5347 struct drm_crtc_state *crtc_state,
5348 struct drm_property *property,
5351 struct drm_device *dev = crtc->dev;
5352 struct amdgpu_device *adev = drm_to_adev(dev);
5353 struct dm_crtc_state *dm_new_state =
5354 to_dm_crtc_state(crtc_state);
5356 if (property == adev->dm.crc_win_x_start_property)
5357 dm_new_state->crc_window.x_start = val;
5358 else if (property == adev->dm.crc_win_y_start_property)
5359 dm_new_state->crc_window.y_start = val;
5360 else if (property == adev->dm.crc_win_x_end_property)
5361 dm_new_state->crc_window.x_end = val;
5362 else if (property == adev->dm.crc_win_y_end_property)
5363 dm_new_state->crc_window.y_end = val;
5370 static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5371 const struct drm_crtc_state *state,
5372 struct drm_property *property,
5375 struct drm_device *dev = crtc->dev;
5376 struct amdgpu_device *adev = drm_to_adev(dev);
5377 struct dm_crtc_state *dm_state =
5378 to_dm_crtc_state(state);
5380 if (property == adev->dm.crc_win_x_start_property)
5381 *val = dm_state->crc_window.x_start;
5382 else if (property == adev->dm.crc_win_y_start_property)
5383 *val = dm_state->crc_window.y_start;
5384 else if (property == adev->dm.crc_win_x_end_property)
5385 *val = dm_state->crc_window.x_end;
5386 else if (property == adev->dm.crc_win_y_end_property)
5387 *val = dm_state->crc_window.y_end;
5395 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5397 enum dc_irq_source irq_source;
5398 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5399 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5402 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5404 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5406 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5407 acrtc->crtc_id, enable ? "en" : "dis", rc);
5411 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5413 enum dc_irq_source irq_source;
5414 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5415 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5416 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5420 /* vblank irq on -> Only need vupdate irq in vrr mode */
5421 if (amdgpu_dm_vrr_active(acrtc_state))
5422 rc = dm_set_vupdate_irq(crtc, true);
5424 /* vblank irq off -> vupdate irq off */
5425 rc = dm_set_vupdate_irq(crtc, false);
5431 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5432 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5435 static int dm_enable_vblank(struct drm_crtc *crtc)
5437 return dm_set_vblank(crtc, true);
5440 static void dm_disable_vblank(struct drm_crtc *crtc)
5442 dm_set_vblank(crtc, false);
5445 /* Implemented only the options currently availible for the driver */
5446 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5447 .reset = dm_crtc_reset_state,
5448 .destroy = amdgpu_dm_crtc_destroy,
5449 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5450 .set_config = drm_atomic_helper_set_config,
5451 .page_flip = drm_atomic_helper_page_flip,
5452 .atomic_duplicate_state = dm_crtc_duplicate_state,
5453 .atomic_destroy_state = dm_crtc_destroy_state,
5454 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5455 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5456 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5457 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5458 .enable_vblank = dm_enable_vblank,
5459 .disable_vblank = dm_disable_vblank,
5460 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5461 #ifdef CONFIG_DEBUG_FS
5462 .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5463 .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5467 static enum drm_connector_status
5468 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5471 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5475 * 1. This interface is NOT called in context of HPD irq.
5476 * 2. This interface *is called* in context of user-mode ioctl. Which
5477 * makes it a bad place for *any* MST-related activity.
5480 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5481 !aconnector->fake_enable)
5482 connected = (aconnector->dc_sink != NULL);
5484 connected = (aconnector->base.force == DRM_FORCE_ON);
5486 update_subconnector_property(aconnector);
5488 return (connected ? connector_status_connected :
5489 connector_status_disconnected);
5492 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5493 struct drm_connector_state *connector_state,
5494 struct drm_property *property,
5497 struct drm_device *dev = connector->dev;
5498 struct amdgpu_device *adev = drm_to_adev(dev);
5499 struct dm_connector_state *dm_old_state =
5500 to_dm_connector_state(connector->state);
5501 struct dm_connector_state *dm_new_state =
5502 to_dm_connector_state(connector_state);
5506 if (property == dev->mode_config.scaling_mode_property) {
5507 enum amdgpu_rmx_type rmx_type;
5510 case DRM_MODE_SCALE_CENTER:
5511 rmx_type = RMX_CENTER;
5513 case DRM_MODE_SCALE_ASPECT:
5514 rmx_type = RMX_ASPECT;
5516 case DRM_MODE_SCALE_FULLSCREEN:
5517 rmx_type = RMX_FULL;
5519 case DRM_MODE_SCALE_NONE:
5525 if (dm_old_state->scaling == rmx_type)
5528 dm_new_state->scaling = rmx_type;
5530 } else if (property == adev->mode_info.underscan_hborder_property) {
5531 dm_new_state->underscan_hborder = val;
5533 } else if (property == adev->mode_info.underscan_vborder_property) {
5534 dm_new_state->underscan_vborder = val;
5536 } else if (property == adev->mode_info.underscan_property) {
5537 dm_new_state->underscan_enable = val;
5539 } else if (property == adev->mode_info.abm_level_property) {
5540 dm_new_state->abm_level = val;
5547 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5548 const struct drm_connector_state *state,
5549 struct drm_property *property,
5552 struct drm_device *dev = connector->dev;
5553 struct amdgpu_device *adev = drm_to_adev(dev);
5554 struct dm_connector_state *dm_state =
5555 to_dm_connector_state(state);
5558 if (property == dev->mode_config.scaling_mode_property) {
5559 switch (dm_state->scaling) {
5561 *val = DRM_MODE_SCALE_CENTER;
5564 *val = DRM_MODE_SCALE_ASPECT;
5567 *val = DRM_MODE_SCALE_FULLSCREEN;
5571 *val = DRM_MODE_SCALE_NONE;
5575 } else if (property == adev->mode_info.underscan_hborder_property) {
5576 *val = dm_state->underscan_hborder;
5578 } else if (property == adev->mode_info.underscan_vborder_property) {
5579 *val = dm_state->underscan_vborder;
5581 } else if (property == adev->mode_info.underscan_property) {
5582 *val = dm_state->underscan_enable;
5584 } else if (property == adev->mode_info.abm_level_property) {
5585 *val = dm_state->abm_level;
5592 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5594 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5596 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5599 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5601 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5602 const struct dc_link *link = aconnector->dc_link;
5603 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5604 struct amdgpu_display_manager *dm = &adev->dm;
5607 * Call only if mst_mgr was iniitalized before since it's not done
5608 * for all connector types.
5610 if (aconnector->mst_mgr.dev)
5611 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5613 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5614 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5616 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5617 link->type != dc_connection_none &&
5618 dm->backlight_dev) {
5619 backlight_device_unregister(dm->backlight_dev);
5620 dm->backlight_dev = NULL;
5624 if (aconnector->dc_em_sink)
5625 dc_sink_release(aconnector->dc_em_sink);
5626 aconnector->dc_em_sink = NULL;
5627 if (aconnector->dc_sink)
5628 dc_sink_release(aconnector->dc_sink);
5629 aconnector->dc_sink = NULL;
5631 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5632 drm_connector_unregister(connector);
5633 drm_connector_cleanup(connector);
5634 if (aconnector->i2c) {
5635 i2c_del_adapter(&aconnector->i2c->base);
5636 kfree(aconnector->i2c);
5638 kfree(aconnector->dm_dp_aux.aux.name);
5643 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5645 struct dm_connector_state *state =
5646 to_dm_connector_state(connector->state);
5648 if (connector->state)
5649 __drm_atomic_helper_connector_destroy_state(connector->state);
5653 state = kzalloc(sizeof(*state), GFP_KERNEL);
5656 state->scaling = RMX_OFF;
5657 state->underscan_enable = false;
5658 state->underscan_hborder = 0;
5659 state->underscan_vborder = 0;
5660 state->base.max_requested_bpc = 8;
5661 state->vcpi_slots = 0;
5663 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5664 state->abm_level = amdgpu_dm_abm_level;
5666 __drm_atomic_helper_connector_reset(connector, &state->base);
5670 struct drm_connector_state *
5671 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5673 struct dm_connector_state *state =
5674 to_dm_connector_state(connector->state);
5676 struct dm_connector_state *new_state =
5677 kmemdup(state, sizeof(*state), GFP_KERNEL);
5682 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5684 new_state->freesync_capable = state->freesync_capable;
5685 new_state->abm_level = state->abm_level;
5686 new_state->scaling = state->scaling;
5687 new_state->underscan_enable = state->underscan_enable;
5688 new_state->underscan_hborder = state->underscan_hborder;
5689 new_state->underscan_vborder = state->underscan_vborder;
5690 new_state->vcpi_slots = state->vcpi_slots;
5691 new_state->pbn = state->pbn;
5692 return &new_state->base;
5696 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5698 struct amdgpu_dm_connector *amdgpu_dm_connector =
5699 to_amdgpu_dm_connector(connector);
5702 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5703 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5704 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5705 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5710 #if defined(CONFIG_DEBUG_FS)
5711 connector_debugfs_init(amdgpu_dm_connector);
5717 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5718 .reset = amdgpu_dm_connector_funcs_reset,
5719 .detect = amdgpu_dm_connector_detect,
5720 .fill_modes = drm_helper_probe_single_connector_modes,
5721 .destroy = amdgpu_dm_connector_destroy,
5722 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5723 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5724 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5725 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5726 .late_register = amdgpu_dm_connector_late_register,
5727 .early_unregister = amdgpu_dm_connector_unregister
5730 static int get_modes(struct drm_connector *connector)
5732 return amdgpu_dm_connector_get_modes(connector);
5735 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5737 struct dc_sink_init_data init_params = {
5738 .link = aconnector->dc_link,
5739 .sink_signal = SIGNAL_TYPE_VIRTUAL
5743 if (!aconnector->base.edid_blob_ptr) {
5744 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5745 aconnector->base.name);
5747 aconnector->base.force = DRM_FORCE_OFF;
5748 aconnector->base.override_edid = false;
5752 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5754 aconnector->edid = edid;
5756 aconnector->dc_em_sink = dc_link_add_remote_sink(
5757 aconnector->dc_link,
5759 (edid->extensions + 1) * EDID_LENGTH,
5762 if (aconnector->base.force == DRM_FORCE_ON) {
5763 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5764 aconnector->dc_link->local_sink :
5765 aconnector->dc_em_sink;
5766 dc_sink_retain(aconnector->dc_sink);
5770 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5772 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5775 * In case of headless boot with force on for DP managed connector
5776 * Those settings have to be != 0 to get initial modeset
5778 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5779 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5780 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5784 aconnector->base.override_edid = true;
5785 create_eml_sink(aconnector);
5788 static struct dc_stream_state *
5789 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5790 const struct drm_display_mode *drm_mode,
5791 const struct dm_connector_state *dm_state,
5792 const struct dc_stream_state *old_stream)
5794 struct drm_connector *connector = &aconnector->base;
5795 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5796 struct dc_stream_state *stream;
5797 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5798 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5799 enum dc_status dc_result = DC_OK;
5802 stream = create_stream_for_sink(aconnector, drm_mode,
5803 dm_state, old_stream,
5805 if (stream == NULL) {
5806 DRM_ERROR("Failed to create stream for sink!\n");
5810 dc_result = dc_validate_stream(adev->dm.dc, stream);
5812 if (dc_result != DC_OK) {
5813 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5818 dc_status_to_str(dc_result));
5820 dc_stream_release(stream);
5822 requested_bpc -= 2; /* lower bpc to retry validation */
5825 } while (stream == NULL && requested_bpc >= 6);
5830 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5831 struct drm_display_mode *mode)
5833 int result = MODE_ERROR;
5834 struct dc_sink *dc_sink;
5835 /* TODO: Unhardcode stream count */
5836 struct dc_stream_state *stream;
5837 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5839 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5840 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5844 * Only run this the first time mode_valid is called to initilialize
5847 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5848 !aconnector->dc_em_sink)
5849 handle_edid_mgmt(aconnector);
5851 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5853 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5854 aconnector->base.force != DRM_FORCE_ON) {
5855 DRM_ERROR("dc_sink is NULL!\n");
5859 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5861 dc_stream_release(stream);
5866 /* TODO: error handling*/
5870 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5871 struct dc_info_packet *out)
5873 struct hdmi_drm_infoframe frame;
5874 unsigned char buf[30]; /* 26 + 4 */
5878 memset(out, 0, sizeof(*out));
5880 if (!state->hdr_output_metadata)
5883 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5887 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5891 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5895 /* Prepare the infopacket for DC. */
5896 switch (state->connector->connector_type) {
5897 case DRM_MODE_CONNECTOR_HDMIA:
5898 out->hb0 = 0x87; /* type */
5899 out->hb1 = 0x01; /* version */
5900 out->hb2 = 0x1A; /* length */
5901 out->sb[0] = buf[3]; /* checksum */
5905 case DRM_MODE_CONNECTOR_DisplayPort:
5906 case DRM_MODE_CONNECTOR_eDP:
5907 out->hb0 = 0x00; /* sdp id, zero */
5908 out->hb1 = 0x87; /* type */
5909 out->hb2 = 0x1D; /* payload len - 1 */
5910 out->hb3 = (0x13 << 2); /* sdp version */
5911 out->sb[0] = 0x01; /* version */
5912 out->sb[1] = 0x1A; /* length */
5920 memcpy(&out->sb[i], &buf[4], 26);
5923 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5924 sizeof(out->sb), false);
5930 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5931 const struct drm_connector_state *new_state)
5933 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5934 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5936 if (old_blob != new_blob) {
5937 if (old_blob && new_blob &&
5938 old_blob->length == new_blob->length)
5939 return memcmp(old_blob->data, new_blob->data,
5949 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5950 struct drm_atomic_state *state)
5952 struct drm_connector_state *new_con_state =
5953 drm_atomic_get_new_connector_state(state, conn);
5954 struct drm_connector_state *old_con_state =
5955 drm_atomic_get_old_connector_state(state, conn);
5956 struct drm_crtc *crtc = new_con_state->crtc;
5957 struct drm_crtc_state *new_crtc_state;
5960 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5965 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5966 struct dc_info_packet hdr_infopacket;
5968 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5972 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5973 if (IS_ERR(new_crtc_state))
5974 return PTR_ERR(new_crtc_state);
5977 * DC considers the stream backends changed if the
5978 * static metadata changes. Forcing the modeset also
5979 * gives a simple way for userspace to switch from
5980 * 8bpc to 10bpc when setting the metadata to enter
5983 * Changing the static metadata after it's been
5984 * set is permissible, however. So only force a
5985 * modeset if we're entering or exiting HDR.
5987 new_crtc_state->mode_changed =
5988 !old_con_state->hdr_output_metadata ||
5989 !new_con_state->hdr_output_metadata;
5995 static const struct drm_connector_helper_funcs
5996 amdgpu_dm_connector_helper_funcs = {
5998 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5999 * modes will be filtered by drm_mode_validate_size(), and those modes
6000 * are missing after user start lightdm. So we need to renew modes list.
6001 * in get_modes call back, not just return the modes count
6003 .get_modes = get_modes,
6004 .mode_valid = amdgpu_dm_connector_mode_valid,
6005 .atomic_check = amdgpu_dm_connector_atomic_check,
6008 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6012 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6014 struct drm_atomic_state *state = new_crtc_state->state;
6015 struct drm_plane *plane;
6018 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6019 struct drm_plane_state *new_plane_state;
6021 /* Cursor planes are "fake". */
6022 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6025 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6027 if (!new_plane_state) {
6029 * The plane is enable on the CRTC and hasn't changed
6030 * state. This means that it previously passed
6031 * validation and is therefore enabled.
6037 /* We need a framebuffer to be considered enabled. */
6038 num_active += (new_plane_state->fb != NULL);
6044 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6045 struct drm_crtc_state *new_crtc_state)
6047 struct dm_crtc_state *dm_new_crtc_state =
6048 to_dm_crtc_state(new_crtc_state);
6050 dm_new_crtc_state->active_planes = 0;
6052 if (!dm_new_crtc_state->stream)
6055 dm_new_crtc_state->active_planes =
6056 count_crtc_active_planes(new_crtc_state);
6059 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6060 struct drm_atomic_state *state)
6062 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6064 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6065 struct dc *dc = adev->dm.dc;
6066 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6069 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6071 dm_update_crtc_active_planes(crtc, crtc_state);
6073 if (unlikely(!dm_crtc_state->stream &&
6074 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6080 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6081 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6082 * planes are disabled, which is not supported by the hardware. And there is legacy
6083 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6085 if (crtc_state->enable &&
6086 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6087 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6091 /* In some use cases, like reset, no stream is attached */
6092 if (!dm_crtc_state->stream)
6095 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6098 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6102 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6103 const struct drm_display_mode *mode,
6104 struct drm_display_mode *adjusted_mode)
6109 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6110 .disable = dm_crtc_helper_disable,
6111 .atomic_check = dm_crtc_helper_atomic_check,
6112 .mode_fixup = dm_crtc_helper_mode_fixup,
6113 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6116 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6121 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6123 switch (display_color_depth) {
6124 case COLOR_DEPTH_666:
6126 case COLOR_DEPTH_888:
6128 case COLOR_DEPTH_101010:
6130 case COLOR_DEPTH_121212:
6132 case COLOR_DEPTH_141414:
6134 case COLOR_DEPTH_161616:
6142 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6143 struct drm_crtc_state *crtc_state,
6144 struct drm_connector_state *conn_state)
6146 struct drm_atomic_state *state = crtc_state->state;
6147 struct drm_connector *connector = conn_state->connector;
6148 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6149 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6150 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6151 struct drm_dp_mst_topology_mgr *mst_mgr;
6152 struct drm_dp_mst_port *mst_port;
6153 enum dc_color_depth color_depth;
6155 bool is_y420 = false;
6157 if (!aconnector->port || !aconnector->dc_sink)
6160 mst_port = aconnector->port;
6161 mst_mgr = &aconnector->mst_port->mst_mgr;
6163 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6166 if (!state->duplicated) {
6167 int max_bpc = conn_state->max_requested_bpc;
6168 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6169 aconnector->force_yuv420_output;
6170 color_depth = convert_color_depth_from_display_info(connector,
6173 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6174 clock = adjusted_mode->clock;
6175 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6177 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6180 dm_new_connector_state->pbn,
6181 dm_mst_get_pbn_divider(aconnector->dc_link));
6182 if (dm_new_connector_state->vcpi_slots < 0) {
6183 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6184 return dm_new_connector_state->vcpi_slots;
6189 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6190 .disable = dm_encoder_helper_disable,
6191 .atomic_check = dm_encoder_helper_atomic_check
6194 #if defined(CONFIG_DRM_AMD_DC_DCN)
6195 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6196 struct dc_state *dc_state)
6198 struct dc_stream_state *stream = NULL;
6199 struct drm_connector *connector;
6200 struct drm_connector_state *new_con_state, *old_con_state;
6201 struct amdgpu_dm_connector *aconnector;
6202 struct dm_connector_state *dm_conn_state;
6203 int i, j, clock, bpp;
6204 int vcpi, pbn_div, pbn = 0;
6206 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6208 aconnector = to_amdgpu_dm_connector(connector);
6210 if (!aconnector->port)
6213 if (!new_con_state || !new_con_state->crtc)
6216 dm_conn_state = to_dm_connector_state(new_con_state);
6218 for (j = 0; j < dc_state->stream_count; j++) {
6219 stream = dc_state->streams[j];
6223 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6232 if (stream->timing.flags.DSC != 1) {
6233 drm_dp_mst_atomic_enable_dsc(state,
6241 pbn_div = dm_mst_get_pbn_divider(stream->link);
6242 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6243 clock = stream->timing.pix_clk_100hz / 10;
6244 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6245 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6252 dm_conn_state->pbn = pbn;
6253 dm_conn_state->vcpi_slots = vcpi;
6259 static void dm_drm_plane_reset(struct drm_plane *plane)
6261 struct dm_plane_state *amdgpu_state = NULL;
6264 plane->funcs->atomic_destroy_state(plane, plane->state);
6266 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6267 WARN_ON(amdgpu_state == NULL);
6270 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6273 static struct drm_plane_state *
6274 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6276 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6278 old_dm_plane_state = to_dm_plane_state(plane->state);
6279 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6280 if (!dm_plane_state)
6283 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6285 if (old_dm_plane_state->dc_state) {
6286 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6287 dc_plane_state_retain(dm_plane_state->dc_state);
6290 return &dm_plane_state->base;
6293 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6294 struct drm_plane_state *state)
6296 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6298 if (dm_plane_state->dc_state)
6299 dc_plane_state_release(dm_plane_state->dc_state);
6301 drm_atomic_helper_plane_destroy_state(plane, state);
6304 static const struct drm_plane_funcs dm_plane_funcs = {
6305 .update_plane = drm_atomic_helper_update_plane,
6306 .disable_plane = drm_atomic_helper_disable_plane,
6307 .destroy = drm_primary_helper_destroy,
6308 .reset = dm_drm_plane_reset,
6309 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6310 .atomic_destroy_state = dm_drm_plane_destroy_state,
6311 .format_mod_supported = dm_plane_format_mod_supported,
6314 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6315 struct drm_plane_state *new_state)
6317 struct amdgpu_framebuffer *afb;
6318 struct drm_gem_object *obj;
6319 struct amdgpu_device *adev;
6320 struct amdgpu_bo *rbo;
6321 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6322 struct list_head list;
6323 struct ttm_validate_buffer tv;
6324 struct ww_acquire_ctx ticket;
6328 if (!new_state->fb) {
6329 DRM_DEBUG_DRIVER("No FB bound\n");
6333 afb = to_amdgpu_framebuffer(new_state->fb);
6334 obj = new_state->fb->obj[0];
6335 rbo = gem_to_amdgpu_bo(obj);
6336 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6337 INIT_LIST_HEAD(&list);
6341 list_add(&tv.head, &list);
6343 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6345 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6349 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6350 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6352 domain = AMDGPU_GEM_DOMAIN_VRAM;
6354 r = amdgpu_bo_pin(rbo, domain);
6355 if (unlikely(r != 0)) {
6356 if (r != -ERESTARTSYS)
6357 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6358 ttm_eu_backoff_reservation(&ticket, &list);
6362 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6363 if (unlikely(r != 0)) {
6364 amdgpu_bo_unpin(rbo);
6365 ttm_eu_backoff_reservation(&ticket, &list);
6366 DRM_ERROR("%p bind failed\n", rbo);
6370 ttm_eu_backoff_reservation(&ticket, &list);
6372 afb->address = amdgpu_bo_gpu_offset(rbo);
6377 * We don't do surface updates on planes that have been newly created,
6378 * but we also don't have the afb->address during atomic check.
6380 * Fill in buffer attributes depending on the address here, but only on
6381 * newly created planes since they're not being used by DC yet and this
6382 * won't modify global state.
6384 dm_plane_state_old = to_dm_plane_state(plane->state);
6385 dm_plane_state_new = to_dm_plane_state(new_state);
6387 if (dm_plane_state_new->dc_state &&
6388 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6389 struct dc_plane_state *plane_state =
6390 dm_plane_state_new->dc_state;
6391 bool force_disable_dcc = !plane_state->dcc.enable;
6393 fill_plane_buffer_attributes(
6394 adev, afb, plane_state->format, plane_state->rotation,
6396 &plane_state->tiling_info, &plane_state->plane_size,
6397 &plane_state->dcc, &plane_state->address,
6398 afb->tmz_surface, force_disable_dcc);
6404 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6405 struct drm_plane_state *old_state)
6407 struct amdgpu_bo *rbo;
6413 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6414 r = amdgpu_bo_reserve(rbo, false);
6416 DRM_ERROR("failed to reserve rbo before unpin\n");
6420 amdgpu_bo_unpin(rbo);
6421 amdgpu_bo_unreserve(rbo);
6422 amdgpu_bo_unref(&rbo);
6425 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6426 struct drm_crtc_state *new_crtc_state)
6428 int max_downscale = 0;
6429 int max_upscale = INT_MAX;
6431 /* TODO: These should be checked against DC plane caps */
6432 return drm_atomic_helper_check_plane_state(
6433 state, new_crtc_state, max_downscale, max_upscale, true, true);
6436 static int dm_plane_atomic_check(struct drm_plane *plane,
6437 struct drm_plane_state *state)
6439 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6440 struct dc *dc = adev->dm.dc;
6441 struct dm_plane_state *dm_plane_state;
6442 struct dc_scaling_info scaling_info;
6443 struct drm_crtc_state *new_crtc_state;
6446 trace_amdgpu_dm_plane_atomic_check(state);
6448 dm_plane_state = to_dm_plane_state(state);
6450 if (!dm_plane_state->dc_state)
6454 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6455 if (!new_crtc_state)
6458 ret = dm_plane_helper_check_state(state, new_crtc_state);
6462 ret = fill_dc_scaling_info(state, &scaling_info);
6466 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6472 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6473 struct drm_plane_state *new_plane_state)
6475 /* Only support async updates on cursor planes. */
6476 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6482 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6483 struct drm_plane_state *new_state)
6485 struct drm_plane_state *old_state =
6486 drm_atomic_get_old_plane_state(new_state->state, plane);
6488 trace_amdgpu_dm_atomic_update_cursor(new_state);
6490 swap(plane->state->fb, new_state->fb);
6492 plane->state->src_x = new_state->src_x;
6493 plane->state->src_y = new_state->src_y;
6494 plane->state->src_w = new_state->src_w;
6495 plane->state->src_h = new_state->src_h;
6496 plane->state->crtc_x = new_state->crtc_x;
6497 plane->state->crtc_y = new_state->crtc_y;
6498 plane->state->crtc_w = new_state->crtc_w;
6499 plane->state->crtc_h = new_state->crtc_h;
6501 handle_cursor_update(plane, old_state);
6504 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6505 .prepare_fb = dm_plane_helper_prepare_fb,
6506 .cleanup_fb = dm_plane_helper_cleanup_fb,
6507 .atomic_check = dm_plane_atomic_check,
6508 .atomic_async_check = dm_plane_atomic_async_check,
6509 .atomic_async_update = dm_plane_atomic_async_update
6513 * TODO: these are currently initialized to rgb formats only.
6514 * For future use cases we should either initialize them dynamically based on
6515 * plane capabilities, or initialize this array to all formats, so internal drm
6516 * check will succeed, and let DC implement proper check
6518 static const uint32_t rgb_formats[] = {
6519 DRM_FORMAT_XRGB8888,
6520 DRM_FORMAT_ARGB8888,
6521 DRM_FORMAT_RGBA8888,
6522 DRM_FORMAT_XRGB2101010,
6523 DRM_FORMAT_XBGR2101010,
6524 DRM_FORMAT_ARGB2101010,
6525 DRM_FORMAT_ABGR2101010,
6526 DRM_FORMAT_XBGR8888,
6527 DRM_FORMAT_ABGR8888,
6531 static const uint32_t overlay_formats[] = {
6532 DRM_FORMAT_XRGB8888,
6533 DRM_FORMAT_ARGB8888,
6534 DRM_FORMAT_RGBA8888,
6535 DRM_FORMAT_XBGR8888,
6536 DRM_FORMAT_ABGR8888,
6540 static const u32 cursor_formats[] = {
6544 static int get_plane_formats(const struct drm_plane *plane,
6545 const struct dc_plane_cap *plane_cap,
6546 uint32_t *formats, int max_formats)
6548 int i, num_formats = 0;
6551 * TODO: Query support for each group of formats directly from
6552 * DC plane caps. This will require adding more formats to the
6556 switch (plane->type) {
6557 case DRM_PLANE_TYPE_PRIMARY:
6558 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6559 if (num_formats >= max_formats)
6562 formats[num_formats++] = rgb_formats[i];
6565 if (plane_cap && plane_cap->pixel_format_support.nv12)
6566 formats[num_formats++] = DRM_FORMAT_NV12;
6567 if (plane_cap && plane_cap->pixel_format_support.p010)
6568 formats[num_formats++] = DRM_FORMAT_P010;
6569 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6570 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6571 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6572 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6573 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6577 case DRM_PLANE_TYPE_OVERLAY:
6578 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6579 if (num_formats >= max_formats)
6582 formats[num_formats++] = overlay_formats[i];
6586 case DRM_PLANE_TYPE_CURSOR:
6587 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6588 if (num_formats >= max_formats)
6591 formats[num_formats++] = cursor_formats[i];
6599 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6600 struct drm_plane *plane,
6601 unsigned long possible_crtcs,
6602 const struct dc_plane_cap *plane_cap)
6604 uint32_t formats[32];
6607 unsigned int supported_rotations;
6608 uint64_t *modifiers = NULL;
6610 num_formats = get_plane_formats(plane, plane_cap, formats,
6611 ARRAY_SIZE(formats));
6613 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6617 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6618 &dm_plane_funcs, formats, num_formats,
6619 modifiers, plane->type, NULL);
6624 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6625 plane_cap && plane_cap->per_pixel_alpha) {
6626 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6627 BIT(DRM_MODE_BLEND_PREMULTI);
6629 drm_plane_create_alpha_property(plane);
6630 drm_plane_create_blend_mode_property(plane, blend_caps);
6633 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6635 (plane_cap->pixel_format_support.nv12 ||
6636 plane_cap->pixel_format_support.p010)) {
6637 /* This only affects YUV formats. */
6638 drm_plane_create_color_properties(
6640 BIT(DRM_COLOR_YCBCR_BT601) |
6641 BIT(DRM_COLOR_YCBCR_BT709) |
6642 BIT(DRM_COLOR_YCBCR_BT2020),
6643 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6644 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6645 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6648 supported_rotations =
6649 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6650 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6652 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6653 plane->type != DRM_PLANE_TYPE_CURSOR)
6654 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6655 supported_rotations);
6657 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6659 /* Create (reset) the plane state */
6660 if (plane->funcs->reset)
6661 plane->funcs->reset(plane);
6666 #ifdef CONFIG_DEBUG_FS
6667 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6668 struct amdgpu_crtc *acrtc)
6670 drm_object_attach_property(&acrtc->base.base,
6671 dm->crc_win_x_start_property,
6673 drm_object_attach_property(&acrtc->base.base,
6674 dm->crc_win_y_start_property,
6676 drm_object_attach_property(&acrtc->base.base,
6677 dm->crc_win_x_end_property,
6679 drm_object_attach_property(&acrtc->base.base,
6680 dm->crc_win_y_end_property,
6685 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6686 struct drm_plane *plane,
6687 uint32_t crtc_index)
6689 struct amdgpu_crtc *acrtc = NULL;
6690 struct drm_plane *cursor_plane;
6694 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6698 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6699 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6701 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6705 res = drm_crtc_init_with_planes(
6710 &amdgpu_dm_crtc_funcs, NULL);
6715 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6717 /* Create (reset) the plane state */
6718 if (acrtc->base.funcs->reset)
6719 acrtc->base.funcs->reset(&acrtc->base);
6721 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6722 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6724 acrtc->crtc_id = crtc_index;
6725 acrtc->base.enabled = false;
6726 acrtc->otg_inst = -1;
6728 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6729 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6730 true, MAX_COLOR_LUT_ENTRIES);
6731 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6732 #ifdef CONFIG_DEBUG_FS
6733 attach_crtc_crc_properties(dm, acrtc);
6739 kfree(cursor_plane);
6744 static int to_drm_connector_type(enum signal_type st)
6747 case SIGNAL_TYPE_HDMI_TYPE_A:
6748 return DRM_MODE_CONNECTOR_HDMIA;
6749 case SIGNAL_TYPE_EDP:
6750 return DRM_MODE_CONNECTOR_eDP;
6751 case SIGNAL_TYPE_LVDS:
6752 return DRM_MODE_CONNECTOR_LVDS;
6753 case SIGNAL_TYPE_RGB:
6754 return DRM_MODE_CONNECTOR_VGA;
6755 case SIGNAL_TYPE_DISPLAY_PORT:
6756 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6757 return DRM_MODE_CONNECTOR_DisplayPort;
6758 case SIGNAL_TYPE_DVI_DUAL_LINK:
6759 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6760 return DRM_MODE_CONNECTOR_DVID;
6761 case SIGNAL_TYPE_VIRTUAL:
6762 return DRM_MODE_CONNECTOR_VIRTUAL;
6765 return DRM_MODE_CONNECTOR_Unknown;
6769 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6771 struct drm_encoder *encoder;
6773 /* There is only one encoder per connector */
6774 drm_connector_for_each_possible_encoder(connector, encoder)
6780 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6782 struct drm_encoder *encoder;
6783 struct amdgpu_encoder *amdgpu_encoder;
6785 encoder = amdgpu_dm_connector_to_encoder(connector);
6787 if (encoder == NULL)
6790 amdgpu_encoder = to_amdgpu_encoder(encoder);
6792 amdgpu_encoder->native_mode.clock = 0;
6794 if (!list_empty(&connector->probed_modes)) {
6795 struct drm_display_mode *preferred_mode = NULL;
6797 list_for_each_entry(preferred_mode,
6798 &connector->probed_modes,
6800 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6801 amdgpu_encoder->native_mode = *preferred_mode;
6809 static struct drm_display_mode *
6810 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6812 int hdisplay, int vdisplay)
6814 struct drm_device *dev = encoder->dev;
6815 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6816 struct drm_display_mode *mode = NULL;
6817 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6819 mode = drm_mode_duplicate(dev, native_mode);
6824 mode->hdisplay = hdisplay;
6825 mode->vdisplay = vdisplay;
6826 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6827 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6833 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6834 struct drm_connector *connector)
6836 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6837 struct drm_display_mode *mode = NULL;
6838 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6839 struct amdgpu_dm_connector *amdgpu_dm_connector =
6840 to_amdgpu_dm_connector(connector);
6844 char name[DRM_DISPLAY_MODE_LEN];
6847 } common_modes[] = {
6848 { "640x480", 640, 480},
6849 { "800x600", 800, 600},
6850 { "1024x768", 1024, 768},
6851 { "1280x720", 1280, 720},
6852 { "1280x800", 1280, 800},
6853 {"1280x1024", 1280, 1024},
6854 { "1440x900", 1440, 900},
6855 {"1680x1050", 1680, 1050},
6856 {"1600x1200", 1600, 1200},
6857 {"1920x1080", 1920, 1080},
6858 {"1920x1200", 1920, 1200}
6861 n = ARRAY_SIZE(common_modes);
6863 for (i = 0; i < n; i++) {
6864 struct drm_display_mode *curmode = NULL;
6865 bool mode_existed = false;
6867 if (common_modes[i].w > native_mode->hdisplay ||
6868 common_modes[i].h > native_mode->vdisplay ||
6869 (common_modes[i].w == native_mode->hdisplay &&
6870 common_modes[i].h == native_mode->vdisplay))
6873 list_for_each_entry(curmode, &connector->probed_modes, head) {
6874 if (common_modes[i].w == curmode->hdisplay &&
6875 common_modes[i].h == curmode->vdisplay) {
6876 mode_existed = true;
6884 mode = amdgpu_dm_create_common_mode(encoder,
6885 common_modes[i].name, common_modes[i].w,
6887 drm_mode_probed_add(connector, mode);
6888 amdgpu_dm_connector->num_modes++;
6892 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6895 struct amdgpu_dm_connector *amdgpu_dm_connector =
6896 to_amdgpu_dm_connector(connector);
6899 /* empty probed_modes */
6900 INIT_LIST_HEAD(&connector->probed_modes);
6901 amdgpu_dm_connector->num_modes =
6902 drm_add_edid_modes(connector, edid);
6904 /* sorting the probed modes before calling function
6905 * amdgpu_dm_get_native_mode() since EDID can have
6906 * more than one preferred mode. The modes that are
6907 * later in the probed mode list could be of higher
6908 * and preferred resolution. For example, 3840x2160
6909 * resolution in base EDID preferred timing and 4096x2160
6910 * preferred resolution in DID extension block later.
6912 drm_mode_sort(&connector->probed_modes);
6913 amdgpu_dm_get_native_mode(connector);
6915 amdgpu_dm_connector->num_modes = 0;
6919 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6921 struct amdgpu_dm_connector *amdgpu_dm_connector =
6922 to_amdgpu_dm_connector(connector);
6923 struct drm_encoder *encoder;
6924 struct edid *edid = amdgpu_dm_connector->edid;
6926 encoder = amdgpu_dm_connector_to_encoder(connector);
6928 if (!drm_edid_is_valid(edid)) {
6929 amdgpu_dm_connector->num_modes =
6930 drm_add_modes_noedid(connector, 640, 480);
6932 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6933 amdgpu_dm_connector_add_common_modes(encoder, connector);
6935 amdgpu_dm_fbc_init(connector);
6937 return amdgpu_dm_connector->num_modes;
6940 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6941 struct amdgpu_dm_connector *aconnector,
6943 struct dc_link *link,
6946 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6949 * Some of the properties below require access to state, like bpc.
6950 * Allocate some default initial connector state with our reset helper.
6952 if (aconnector->base.funcs->reset)
6953 aconnector->base.funcs->reset(&aconnector->base);
6955 aconnector->connector_id = link_index;
6956 aconnector->dc_link = link;
6957 aconnector->base.interlace_allowed = false;
6958 aconnector->base.doublescan_allowed = false;
6959 aconnector->base.stereo_allowed = false;
6960 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6961 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6962 aconnector->audio_inst = -1;
6963 mutex_init(&aconnector->hpd_lock);
6966 * configure support HPD hot plug connector_>polled default value is 0
6967 * which means HPD hot plug not supported
6969 switch (connector_type) {
6970 case DRM_MODE_CONNECTOR_HDMIA:
6971 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6972 aconnector->base.ycbcr_420_allowed =
6973 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6975 case DRM_MODE_CONNECTOR_DisplayPort:
6976 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6977 aconnector->base.ycbcr_420_allowed =
6978 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6980 case DRM_MODE_CONNECTOR_DVID:
6981 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6987 drm_object_attach_property(&aconnector->base.base,
6988 dm->ddev->mode_config.scaling_mode_property,
6989 DRM_MODE_SCALE_NONE);
6991 drm_object_attach_property(&aconnector->base.base,
6992 adev->mode_info.underscan_property,
6994 drm_object_attach_property(&aconnector->base.base,
6995 adev->mode_info.underscan_hborder_property,
6997 drm_object_attach_property(&aconnector->base.base,
6998 adev->mode_info.underscan_vborder_property,
7001 if (!aconnector->mst_port)
7002 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7004 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7005 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7006 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7008 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7009 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7010 drm_object_attach_property(&aconnector->base.base,
7011 adev->mode_info.abm_level_property, 0);
7014 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7015 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7016 connector_type == DRM_MODE_CONNECTOR_eDP) {
7017 drm_object_attach_property(
7018 &aconnector->base.base,
7019 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7021 if (!aconnector->mst_port)
7022 drm_connector_attach_vrr_capable_property(&aconnector->base);
7024 #ifdef CONFIG_DRM_AMD_DC_HDCP
7025 if (adev->dm.hdcp_workqueue)
7026 drm_connector_attach_content_protection_property(&aconnector->base, true);
7031 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7032 struct i2c_msg *msgs, int num)
7034 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7035 struct ddc_service *ddc_service = i2c->ddc_service;
7036 struct i2c_command cmd;
7040 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7045 cmd.number_of_payloads = num;
7046 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7049 for (i = 0; i < num; i++) {
7050 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7051 cmd.payloads[i].address = msgs[i].addr;
7052 cmd.payloads[i].length = msgs[i].len;
7053 cmd.payloads[i].data = msgs[i].buf;
7057 ddc_service->ctx->dc,
7058 ddc_service->ddc_pin->hw_info.ddc_channel,
7062 kfree(cmd.payloads);
7066 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7068 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7071 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7072 .master_xfer = amdgpu_dm_i2c_xfer,
7073 .functionality = amdgpu_dm_i2c_func,
7076 static struct amdgpu_i2c_adapter *
7077 create_i2c(struct ddc_service *ddc_service,
7081 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7082 struct amdgpu_i2c_adapter *i2c;
7084 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7087 i2c->base.owner = THIS_MODULE;
7088 i2c->base.class = I2C_CLASS_DDC;
7089 i2c->base.dev.parent = &adev->pdev->dev;
7090 i2c->base.algo = &amdgpu_dm_i2c_algo;
7091 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7092 i2c_set_adapdata(&i2c->base, i2c);
7093 i2c->ddc_service = ddc_service;
7094 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7101 * Note: this function assumes that dc_link_detect() was called for the
7102 * dc_link which will be represented by this aconnector.
7104 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7105 struct amdgpu_dm_connector *aconnector,
7106 uint32_t link_index,
7107 struct amdgpu_encoder *aencoder)
7111 struct dc *dc = dm->dc;
7112 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7113 struct amdgpu_i2c_adapter *i2c;
7115 link->priv = aconnector;
7117 DRM_DEBUG_DRIVER("%s()\n", __func__);
7119 i2c = create_i2c(link->ddc, link->link_index, &res);
7121 DRM_ERROR("Failed to create i2c adapter data\n");
7125 aconnector->i2c = i2c;
7126 res = i2c_add_adapter(&i2c->base);
7129 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7133 connector_type = to_drm_connector_type(link->connector_signal);
7135 res = drm_connector_init_with_ddc(
7138 &amdgpu_dm_connector_funcs,
7143 DRM_ERROR("connector_init failed\n");
7144 aconnector->connector_id = -1;
7148 drm_connector_helper_add(
7150 &amdgpu_dm_connector_helper_funcs);
7152 amdgpu_dm_connector_init_helper(
7159 drm_connector_attach_encoder(
7160 &aconnector->base, &aencoder->base);
7162 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7163 || connector_type == DRM_MODE_CONNECTOR_eDP)
7164 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7169 aconnector->i2c = NULL;
7174 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7176 switch (adev->mode_info.num_crtc) {
7193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7194 struct amdgpu_encoder *aencoder,
7195 uint32_t link_index)
7197 struct amdgpu_device *adev = drm_to_adev(dev);
7199 int res = drm_encoder_init(dev,
7201 &amdgpu_dm_encoder_funcs,
7202 DRM_MODE_ENCODER_TMDS,
7205 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7208 aencoder->encoder_id = link_index;
7210 aencoder->encoder_id = -1;
7212 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7217 static void manage_dm_interrupts(struct amdgpu_device *adev,
7218 struct amdgpu_crtc *acrtc,
7222 * We have no guarantee that the frontend index maps to the same
7223 * backend index - some even map to more than one.
7225 * TODO: Use a different interrupt or check DC itself for the mapping.
7228 amdgpu_display_crtc_idx_to_irq_type(
7233 drm_crtc_vblank_on(&acrtc->base);
7236 &adev->pageflip_irq,
7242 &adev->pageflip_irq,
7244 drm_crtc_vblank_off(&acrtc->base);
7248 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7249 struct amdgpu_crtc *acrtc)
7252 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7255 * This reads the current state for the IRQ and force reapplies
7256 * the setting to hardware.
7258 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7262 is_scaling_state_different(const struct dm_connector_state *dm_state,
7263 const struct dm_connector_state *old_dm_state)
7265 if (dm_state->scaling != old_dm_state->scaling)
7267 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7268 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7270 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7271 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7273 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7274 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7279 #ifdef CONFIG_DRM_AMD_DC_HDCP
7280 static bool is_content_protection_different(struct drm_connector_state *state,
7281 const struct drm_connector_state *old_state,
7282 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7284 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7285 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7287 /* Handle: Type0/1 change */
7288 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7289 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7290 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7294 /* CP is being re enabled, ignore this
7296 * Handles: ENABLED -> DESIRED
7298 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7299 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7300 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7304 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7306 * Handles: UNDESIRED -> ENABLED
7308 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7309 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7310 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7312 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7313 * hot-plug, headless s3, dpms
7315 * Handles: DESIRED -> DESIRED (Special case)
7317 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7318 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7319 dm_con_state->update_hdcp = false;
7324 * Handles: UNDESIRED -> UNDESIRED
7325 * DESIRED -> DESIRED
7326 * ENABLED -> ENABLED
7328 if (old_state->content_protection == state->content_protection)
7332 * Handles: UNDESIRED -> DESIRED
7333 * DESIRED -> UNDESIRED
7334 * ENABLED -> UNDESIRED
7336 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7340 * Handles: DESIRED -> ENABLED
7346 static void remove_stream(struct amdgpu_device *adev,
7347 struct amdgpu_crtc *acrtc,
7348 struct dc_stream_state *stream)
7350 /* this is the update mode case */
7352 acrtc->otg_inst = -1;
7353 acrtc->enabled = false;
7356 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7357 struct dc_cursor_position *position)
7359 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7361 int xorigin = 0, yorigin = 0;
7363 position->enable = false;
7367 if (!crtc || !plane->state->fb)
7370 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7371 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7372 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7374 plane->state->crtc_w,
7375 plane->state->crtc_h);
7379 x = plane->state->crtc_x;
7380 y = plane->state->crtc_y;
7382 if (x <= -amdgpu_crtc->max_cursor_width ||
7383 y <= -amdgpu_crtc->max_cursor_height)
7387 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7391 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7394 position->enable = true;
7395 position->translate_by_source = true;
7398 position->x_hotspot = xorigin;
7399 position->y_hotspot = yorigin;
7404 static void handle_cursor_update(struct drm_plane *plane,
7405 struct drm_plane_state *old_plane_state)
7407 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7408 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7409 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7410 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7411 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7412 uint64_t address = afb ? afb->address : 0;
7413 struct dc_cursor_position position;
7414 struct dc_cursor_attributes attributes;
7417 if (!plane->state->fb && !old_plane_state->fb)
7420 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7422 amdgpu_crtc->crtc_id,
7423 plane->state->crtc_w,
7424 plane->state->crtc_h);
7426 ret = get_cursor_position(plane, crtc, &position);
7430 if (!position.enable) {
7431 /* turn off cursor */
7432 if (crtc_state && crtc_state->stream) {
7433 mutex_lock(&adev->dm.dc_lock);
7434 dc_stream_set_cursor_position(crtc_state->stream,
7436 mutex_unlock(&adev->dm.dc_lock);
7441 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7442 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7444 memset(&attributes, 0, sizeof(attributes));
7445 attributes.address.high_part = upper_32_bits(address);
7446 attributes.address.low_part = lower_32_bits(address);
7447 attributes.width = plane->state->crtc_w;
7448 attributes.height = plane->state->crtc_h;
7449 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7450 attributes.rotation_angle = 0;
7451 attributes.attribute_flags.value = 0;
7453 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7455 if (crtc_state->stream) {
7456 mutex_lock(&adev->dm.dc_lock);
7457 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7459 DRM_ERROR("DC failed to set cursor attributes\n");
7461 if (!dc_stream_set_cursor_position(crtc_state->stream,
7463 DRM_ERROR("DC failed to set cursor position\n");
7464 mutex_unlock(&adev->dm.dc_lock);
7468 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7471 assert_spin_locked(&acrtc->base.dev->event_lock);
7472 WARN_ON(acrtc->event);
7474 acrtc->event = acrtc->base.state->event;
7476 /* Set the flip status */
7477 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7479 /* Mark this event as consumed */
7480 acrtc->base.state->event = NULL;
7482 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7486 static void update_freesync_state_on_stream(
7487 struct amdgpu_display_manager *dm,
7488 struct dm_crtc_state *new_crtc_state,
7489 struct dc_stream_state *new_stream,
7490 struct dc_plane_state *surface,
7491 u32 flip_timestamp_in_us)
7493 struct mod_vrr_params vrr_params;
7494 struct dc_info_packet vrr_infopacket = {0};
7495 struct amdgpu_device *adev = dm->adev;
7496 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7497 unsigned long flags;
7503 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7504 * For now it's sufficient to just guard against these conditions.
7507 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7510 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7511 vrr_params = acrtc->dm_irq_params.vrr_params;
7514 mod_freesync_handle_preflip(
7515 dm->freesync_module,
7518 flip_timestamp_in_us,
7521 if (adev->family < AMDGPU_FAMILY_AI &&
7522 amdgpu_dm_vrr_active(new_crtc_state)) {
7523 mod_freesync_handle_v_update(dm->freesync_module,
7524 new_stream, &vrr_params);
7526 /* Need to call this before the frame ends. */
7527 dc_stream_adjust_vmin_vmax(dm->dc,
7528 new_crtc_state->stream,
7529 &vrr_params.adjust);
7533 mod_freesync_build_vrr_infopacket(
7534 dm->freesync_module,
7538 TRANSFER_FUNC_UNKNOWN,
7541 new_crtc_state->freesync_timing_changed |=
7542 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7544 sizeof(vrr_params.adjust)) != 0);
7546 new_crtc_state->freesync_vrr_info_changed |=
7547 (memcmp(&new_crtc_state->vrr_infopacket,
7549 sizeof(vrr_infopacket)) != 0);
7551 acrtc->dm_irq_params.vrr_params = vrr_params;
7552 new_crtc_state->vrr_infopacket = vrr_infopacket;
7554 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7555 new_stream->vrr_infopacket = vrr_infopacket;
7557 if (new_crtc_state->freesync_vrr_info_changed)
7558 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7559 new_crtc_state->base.crtc->base.id,
7560 (int)new_crtc_state->base.vrr_enabled,
7561 (int)vrr_params.state);
7563 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7566 static void update_stream_irq_parameters(
7567 struct amdgpu_display_manager *dm,
7568 struct dm_crtc_state *new_crtc_state)
7570 struct dc_stream_state *new_stream = new_crtc_state->stream;
7571 struct mod_vrr_params vrr_params;
7572 struct mod_freesync_config config = new_crtc_state->freesync_config;
7573 struct amdgpu_device *adev = dm->adev;
7574 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7575 unsigned long flags;
7581 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7582 * For now it's sufficient to just guard against these conditions.
7584 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7587 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7588 vrr_params = acrtc->dm_irq_params.vrr_params;
7590 if (new_crtc_state->vrr_supported &&
7591 config.min_refresh_in_uhz &&
7592 config.max_refresh_in_uhz) {
7593 config.state = new_crtc_state->base.vrr_enabled ?
7594 VRR_STATE_ACTIVE_VARIABLE :
7597 config.state = VRR_STATE_UNSUPPORTED;
7600 mod_freesync_build_vrr_params(dm->freesync_module,
7602 &config, &vrr_params);
7604 new_crtc_state->freesync_timing_changed |=
7605 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7606 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7608 new_crtc_state->freesync_config = config;
7609 /* Copy state for access from DM IRQ handler */
7610 acrtc->dm_irq_params.freesync_config = config;
7611 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7612 acrtc->dm_irq_params.vrr_params = vrr_params;
7613 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7616 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7617 struct dm_crtc_state *new_state)
7619 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7620 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7622 if (!old_vrr_active && new_vrr_active) {
7623 /* Transition VRR inactive -> active:
7624 * While VRR is active, we must not disable vblank irq, as a
7625 * reenable after disable would compute bogus vblank/pflip
7626 * timestamps if it likely happened inside display front-porch.
7628 * We also need vupdate irq for the actual core vblank handling
7631 dm_set_vupdate_irq(new_state->base.crtc, true);
7632 drm_crtc_vblank_get(new_state->base.crtc);
7633 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7634 __func__, new_state->base.crtc->base.id);
7635 } else if (old_vrr_active && !new_vrr_active) {
7636 /* Transition VRR active -> inactive:
7637 * Allow vblank irq disable again for fixed refresh rate.
7639 dm_set_vupdate_irq(new_state->base.crtc, false);
7640 drm_crtc_vblank_put(new_state->base.crtc);
7641 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7642 __func__, new_state->base.crtc->base.id);
7646 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7648 struct drm_plane *plane;
7649 struct drm_plane_state *old_plane_state, *new_plane_state;
7653 * TODO: Make this per-stream so we don't issue redundant updates for
7654 * commits with multiple streams.
7656 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7658 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7659 handle_cursor_update(plane, old_plane_state);
7662 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7663 struct dc_state *dc_state,
7664 struct drm_device *dev,
7665 struct amdgpu_display_manager *dm,
7666 struct drm_crtc *pcrtc,
7667 bool wait_for_vblank)
7670 uint64_t timestamp_ns;
7671 struct drm_plane *plane;
7672 struct drm_plane_state *old_plane_state, *new_plane_state;
7673 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7674 struct drm_crtc_state *new_pcrtc_state =
7675 drm_atomic_get_new_crtc_state(state, pcrtc);
7676 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7677 struct dm_crtc_state *dm_old_crtc_state =
7678 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7679 int planes_count = 0, vpos, hpos;
7681 unsigned long flags;
7682 struct amdgpu_bo *abo;
7683 uint32_t target_vblank, last_flip_vblank;
7684 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7685 bool pflip_present = false;
7687 struct dc_surface_update surface_updates[MAX_SURFACES];
7688 struct dc_plane_info plane_infos[MAX_SURFACES];
7689 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7690 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7691 struct dc_stream_update stream_update;
7694 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7697 dm_error("Failed to allocate update bundle\n");
7702 * Disable the cursor first if we're disabling all the planes.
7703 * It'll remain on the screen after the planes are re-enabled
7706 if (acrtc_state->active_planes == 0)
7707 amdgpu_dm_commit_cursors(state);
7709 /* update planes when needed */
7710 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7711 struct drm_crtc *crtc = new_plane_state->crtc;
7712 struct drm_crtc_state *new_crtc_state;
7713 struct drm_framebuffer *fb = new_plane_state->fb;
7714 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7715 bool plane_needs_flip;
7716 struct dc_plane_state *dc_plane;
7717 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7719 /* Cursor plane is handled after stream updates */
7720 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7723 if (!fb || !crtc || pcrtc != crtc)
7726 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7727 if (!new_crtc_state->active)
7730 dc_plane = dm_new_plane_state->dc_state;
7732 bundle->surface_updates[planes_count].surface = dc_plane;
7733 if (new_pcrtc_state->color_mgmt_changed) {
7734 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7735 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7736 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7739 fill_dc_scaling_info(new_plane_state,
7740 &bundle->scaling_infos[planes_count]);
7742 bundle->surface_updates[planes_count].scaling_info =
7743 &bundle->scaling_infos[planes_count];
7745 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7747 pflip_present = pflip_present || plane_needs_flip;
7749 if (!plane_needs_flip) {
7754 abo = gem_to_amdgpu_bo(fb->obj[0]);
7757 * Wait for all fences on this FB. Do limited wait to avoid
7758 * deadlock during GPU reset when this fence will not signal
7759 * but we hold reservation lock for the BO.
7761 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7763 msecs_to_jiffies(5000));
7764 if (unlikely(r <= 0))
7765 DRM_ERROR("Waiting for fences timed out!");
7767 fill_dc_plane_info_and_addr(
7768 dm->adev, new_plane_state,
7770 &bundle->plane_infos[planes_count],
7771 &bundle->flip_addrs[planes_count].address,
7772 afb->tmz_surface, false);
7774 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7775 new_plane_state->plane->index,
7776 bundle->plane_infos[planes_count].dcc.enable);
7778 bundle->surface_updates[planes_count].plane_info =
7779 &bundle->plane_infos[planes_count];
7782 * Only allow immediate flips for fast updates that don't
7783 * change FB pitch, DCC state, rotation or mirroing.
7785 bundle->flip_addrs[planes_count].flip_immediate =
7786 crtc->state->async_flip &&
7787 acrtc_state->update_type == UPDATE_TYPE_FAST;
7789 timestamp_ns = ktime_get_ns();
7790 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7791 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7792 bundle->surface_updates[planes_count].surface = dc_plane;
7794 if (!bundle->surface_updates[planes_count].surface) {
7795 DRM_ERROR("No surface for CRTC: id=%d\n",
7796 acrtc_attach->crtc_id);
7800 if (plane == pcrtc->primary)
7801 update_freesync_state_on_stream(
7804 acrtc_state->stream,
7806 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7808 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7810 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7811 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7817 if (pflip_present) {
7819 /* Use old throttling in non-vrr fixed refresh rate mode
7820 * to keep flip scheduling based on target vblank counts
7821 * working in a backwards compatible way, e.g., for
7822 * clients using the GLX_OML_sync_control extension or
7823 * DRI3/Present extension with defined target_msc.
7825 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7828 /* For variable refresh rate mode only:
7829 * Get vblank of last completed flip to avoid > 1 vrr
7830 * flips per video frame by use of throttling, but allow
7831 * flip programming anywhere in the possibly large
7832 * variable vrr vblank interval for fine-grained flip
7833 * timing control and more opportunity to avoid stutter
7834 * on late submission of flips.
7836 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7837 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7838 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7841 target_vblank = last_flip_vblank + wait_for_vblank;
7844 * Wait until we're out of the vertical blank period before the one
7845 * targeted by the flip
7847 while ((acrtc_attach->enabled &&
7848 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7849 0, &vpos, &hpos, NULL,
7850 NULL, &pcrtc->hwmode)
7851 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7852 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7853 (int)(target_vblank -
7854 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7855 usleep_range(1000, 1100);
7859 * Prepare the flip event for the pageflip interrupt to handle.
7861 * This only works in the case where we've already turned on the
7862 * appropriate hardware blocks (eg. HUBP) so in the transition case
7863 * from 0 -> n planes we have to skip a hardware generated event
7864 * and rely on sending it from software.
7866 if (acrtc_attach->base.state->event &&
7867 acrtc_state->active_planes > 0) {
7868 drm_crtc_vblank_get(pcrtc);
7870 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7872 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7873 prepare_flip_isr(acrtc_attach);
7875 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7878 if (acrtc_state->stream) {
7879 if (acrtc_state->freesync_vrr_info_changed)
7880 bundle->stream_update.vrr_infopacket =
7881 &acrtc_state->stream->vrr_infopacket;
7885 /* Update the planes if changed or disable if we don't have any. */
7886 if ((planes_count || acrtc_state->active_planes == 0) &&
7887 acrtc_state->stream) {
7888 bundle->stream_update.stream = acrtc_state->stream;
7889 if (new_pcrtc_state->mode_changed) {
7890 bundle->stream_update.src = acrtc_state->stream->src;
7891 bundle->stream_update.dst = acrtc_state->stream->dst;
7894 if (new_pcrtc_state->color_mgmt_changed) {
7896 * TODO: This isn't fully correct since we've actually
7897 * already modified the stream in place.
7899 bundle->stream_update.gamut_remap =
7900 &acrtc_state->stream->gamut_remap_matrix;
7901 bundle->stream_update.output_csc_transform =
7902 &acrtc_state->stream->csc_color_matrix;
7903 bundle->stream_update.out_transfer_func =
7904 acrtc_state->stream->out_transfer_func;
7907 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7908 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7909 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7912 * If FreeSync state on the stream has changed then we need to
7913 * re-adjust the min/max bounds now that DC doesn't handle this
7914 * as part of commit.
7916 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7917 amdgpu_dm_vrr_active(acrtc_state)) {
7918 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7919 dc_stream_adjust_vmin_vmax(
7920 dm->dc, acrtc_state->stream,
7921 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7922 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7924 mutex_lock(&dm->dc_lock);
7925 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7926 acrtc_state->stream->link->psr_settings.psr_allow_active)
7927 amdgpu_dm_psr_disable(acrtc_state->stream);
7929 dc_commit_updates_for_stream(dm->dc,
7930 bundle->surface_updates,
7932 acrtc_state->stream,
7933 &bundle->stream_update,
7937 * Enable or disable the interrupts on the backend.
7939 * Most pipes are put into power gating when unused.
7941 * When power gating is enabled on a pipe we lose the
7942 * interrupt enablement state when power gating is disabled.
7944 * So we need to update the IRQ control state in hardware
7945 * whenever the pipe turns on (since it could be previously
7946 * power gated) or off (since some pipes can't be power gated
7949 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7950 dm_update_pflip_irq_state(drm_to_adev(dev),
7953 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7954 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7955 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7956 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7957 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7958 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7959 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7960 amdgpu_dm_psr_enable(acrtc_state->stream);
7963 mutex_unlock(&dm->dc_lock);
7967 * Update cursor state *after* programming all the planes.
7968 * This avoids redundant programming in the case where we're going
7969 * to be disabling a single plane - those pipes are being disabled.
7971 if (acrtc_state->active_planes)
7972 amdgpu_dm_commit_cursors(state);
7978 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7979 struct drm_atomic_state *state)
7981 struct amdgpu_device *adev = drm_to_adev(dev);
7982 struct amdgpu_dm_connector *aconnector;
7983 struct drm_connector *connector;
7984 struct drm_connector_state *old_con_state, *new_con_state;
7985 struct drm_crtc_state *new_crtc_state;
7986 struct dm_crtc_state *new_dm_crtc_state;
7987 const struct dc_stream_status *status;
7990 /* Notify device removals. */
7991 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7992 if (old_con_state->crtc != new_con_state->crtc) {
7993 /* CRTC changes require notification. */
7997 if (!new_con_state->crtc)
8000 new_crtc_state = drm_atomic_get_new_crtc_state(
8001 state, new_con_state->crtc);
8003 if (!new_crtc_state)
8006 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8010 aconnector = to_amdgpu_dm_connector(connector);
8012 mutex_lock(&adev->dm.audio_lock);
8013 inst = aconnector->audio_inst;
8014 aconnector->audio_inst = -1;
8015 mutex_unlock(&adev->dm.audio_lock);
8017 amdgpu_dm_audio_eld_notify(adev, inst);
8020 /* Notify audio device additions. */
8021 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8022 if (!new_con_state->crtc)
8025 new_crtc_state = drm_atomic_get_new_crtc_state(
8026 state, new_con_state->crtc);
8028 if (!new_crtc_state)
8031 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8034 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8035 if (!new_dm_crtc_state->stream)
8038 status = dc_stream_get_status(new_dm_crtc_state->stream);
8042 aconnector = to_amdgpu_dm_connector(connector);
8044 mutex_lock(&adev->dm.audio_lock);
8045 inst = status->audio_inst;
8046 aconnector->audio_inst = inst;
8047 mutex_unlock(&adev->dm.audio_lock);
8049 amdgpu_dm_audio_eld_notify(adev, inst);
8054 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8055 * @crtc_state: the DRM CRTC state
8056 * @stream_state: the DC stream state.
8058 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8059 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8061 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8062 struct dc_stream_state *stream_state)
8064 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8068 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8069 * @state: The atomic state to commit
8071 * This will tell DC to commit the constructed DC state from atomic_check,
8072 * programming the hardware. Any failures here implies a hardware failure, since
8073 * atomic check should have filtered anything non-kosher.
8075 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8077 struct drm_device *dev = state->dev;
8078 struct amdgpu_device *adev = drm_to_adev(dev);
8079 struct amdgpu_display_manager *dm = &adev->dm;
8080 struct dm_atomic_state *dm_state;
8081 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8083 struct drm_crtc *crtc;
8084 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8085 unsigned long flags;
8086 bool wait_for_vblank = true;
8087 struct drm_connector *connector;
8088 struct drm_connector_state *old_con_state, *new_con_state;
8089 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8090 int crtc_disable_count = 0;
8091 bool mode_set_reset_required = false;
8093 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8095 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8097 dm_state = dm_atomic_get_new_state(state);
8098 if (dm_state && dm_state->context) {
8099 dc_state = dm_state->context;
8101 /* No state changes, retain current state. */
8102 dc_state_temp = dc_create_state(dm->dc);
8103 ASSERT(dc_state_temp);
8104 dc_state = dc_state_temp;
8105 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8108 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8109 new_crtc_state, i) {
8110 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8112 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8114 if (old_crtc_state->active &&
8115 (!new_crtc_state->active ||
8116 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8117 manage_dm_interrupts(adev, acrtc, false);
8118 dc_stream_release(dm_old_crtc_state->stream);
8122 drm_atomic_helper_calc_timestamping_constants(state);
8124 /* update changed items */
8125 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8126 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8128 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8129 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8132 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8133 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8134 "connectors_changed:%d\n",
8136 new_crtc_state->enable,
8137 new_crtc_state->active,
8138 new_crtc_state->planes_changed,
8139 new_crtc_state->mode_changed,
8140 new_crtc_state->active_changed,
8141 new_crtc_state->connectors_changed);
8143 /* Disable cursor if disabling crtc */
8144 if (old_crtc_state->active && !new_crtc_state->active) {
8145 struct dc_cursor_position position;
8147 memset(&position, 0, sizeof(position));
8148 mutex_lock(&dm->dc_lock);
8149 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8150 mutex_unlock(&dm->dc_lock);
8153 /* Copy all transient state flags into dc state */
8154 if (dm_new_crtc_state->stream) {
8155 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8156 dm_new_crtc_state->stream);
8159 /* handles headless hotplug case, updating new_state and
8160 * aconnector as needed
8163 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8165 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8167 if (!dm_new_crtc_state->stream) {
8169 * this could happen because of issues with
8170 * userspace notifications delivery.
8171 * In this case userspace tries to set mode on
8172 * display which is disconnected in fact.
8173 * dc_sink is NULL in this case on aconnector.
8174 * We expect reset mode will come soon.
8176 * This can also happen when unplug is done
8177 * during resume sequence ended
8179 * In this case, we want to pretend we still
8180 * have a sink to keep the pipe running so that
8181 * hw state is consistent with the sw state
8183 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8184 __func__, acrtc->base.base.id);
8188 if (dm_old_crtc_state->stream)
8189 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8191 pm_runtime_get_noresume(dev->dev);
8193 acrtc->enabled = true;
8194 acrtc->hw_mode = new_crtc_state->mode;
8195 crtc->hwmode = new_crtc_state->mode;
8196 mode_set_reset_required = true;
8197 } else if (modereset_required(new_crtc_state)) {
8198 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8199 /* i.e. reset mode */
8200 if (dm_old_crtc_state->stream)
8201 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8202 mode_set_reset_required = true;
8204 } /* for_each_crtc_in_state() */
8207 /* if there mode set or reset, disable eDP PSR */
8208 if (mode_set_reset_required)
8209 amdgpu_dm_psr_disable_all(dm);
8211 dm_enable_per_frame_crtc_master_sync(dc_state);
8212 mutex_lock(&dm->dc_lock);
8213 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8214 mutex_unlock(&dm->dc_lock);
8217 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8218 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8220 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8222 if (dm_new_crtc_state->stream != NULL) {
8223 const struct dc_stream_status *status =
8224 dc_stream_get_status(dm_new_crtc_state->stream);
8227 status = dc_stream_get_status_from_state(dc_state,
8228 dm_new_crtc_state->stream);
8230 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8232 acrtc->otg_inst = status->primary_otg_inst;
8235 #ifdef CONFIG_DRM_AMD_DC_HDCP
8236 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8237 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8238 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8239 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8241 new_crtc_state = NULL;
8244 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8246 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8248 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8249 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8250 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8251 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8252 dm_new_con_state->update_hdcp = true;
8256 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8257 hdcp_update_display(
8258 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8259 new_con_state->hdcp_content_type,
8260 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8265 /* Handle connector state changes */
8266 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8267 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8268 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8269 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8270 struct dc_surface_update dummy_updates[MAX_SURFACES];
8271 struct dc_stream_update stream_update;
8272 struct dc_info_packet hdr_packet;
8273 struct dc_stream_status *status = NULL;
8274 bool abm_changed, hdr_changed, scaling_changed;
8276 memset(&dummy_updates, 0, sizeof(dummy_updates));
8277 memset(&stream_update, 0, sizeof(stream_update));
8280 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8281 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8284 /* Skip any modesets/resets */
8285 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8288 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8289 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8291 scaling_changed = is_scaling_state_different(dm_new_con_state,
8294 abm_changed = dm_new_crtc_state->abm_level !=
8295 dm_old_crtc_state->abm_level;
8298 is_hdr_metadata_different(old_con_state, new_con_state);
8300 if (!scaling_changed && !abm_changed && !hdr_changed)
8303 stream_update.stream = dm_new_crtc_state->stream;
8304 if (scaling_changed) {
8305 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8306 dm_new_con_state, dm_new_crtc_state->stream);
8308 stream_update.src = dm_new_crtc_state->stream->src;
8309 stream_update.dst = dm_new_crtc_state->stream->dst;
8313 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8315 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8319 fill_hdr_info_packet(new_con_state, &hdr_packet);
8320 stream_update.hdr_static_metadata = &hdr_packet;
8323 status = dc_stream_get_status(dm_new_crtc_state->stream);
8325 WARN_ON(!status->plane_count);
8328 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8329 * Here we create an empty update on each plane.
8330 * To fix this, DC should permit updating only stream properties.
8332 for (j = 0; j < status->plane_count; j++)
8333 dummy_updates[j].surface = status->plane_states[0];
8336 mutex_lock(&dm->dc_lock);
8337 dc_commit_updates_for_stream(dm->dc,
8339 status->plane_count,
8340 dm_new_crtc_state->stream,
8343 mutex_unlock(&dm->dc_lock);
8346 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8347 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8348 new_crtc_state, i) {
8349 if (old_crtc_state->active && !new_crtc_state->active)
8350 crtc_disable_count++;
8352 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8353 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8355 /* For freesync config update on crtc state and params for irq */
8356 update_stream_irq_parameters(dm, dm_new_crtc_state);
8358 /* Handle vrr on->off / off->on transitions */
8359 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8364 * Enable interrupts for CRTCs that are newly enabled or went through
8365 * a modeset. It was intentionally deferred until after the front end
8366 * state was modified to wait until the OTG was on and so the IRQ
8367 * handlers didn't access stale or invalid state.
8369 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8370 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8371 bool configure_crc = false;
8373 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8375 if (new_crtc_state->active &&
8376 (!old_crtc_state->active ||
8377 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8378 dc_stream_retain(dm_new_crtc_state->stream);
8379 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8380 manage_dm_interrupts(adev, acrtc, true);
8382 #ifdef CONFIG_DEBUG_FS
8383 if (new_crtc_state->active &&
8384 amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8386 * Frontend may have changed so reapply the CRC capture
8387 * settings for the stream.
8389 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8390 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8392 if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8393 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8394 configure_crc = true;
8396 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8397 configure_crc = true;
8401 amdgpu_dm_crtc_configure_crc_source(
8402 crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8407 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8408 if (new_crtc_state->async_flip)
8409 wait_for_vblank = false;
8411 /* update planes when needed per crtc*/
8412 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8413 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8415 if (dm_new_crtc_state->stream)
8416 amdgpu_dm_commit_planes(state, dc_state, dev,
8417 dm, crtc, wait_for_vblank);
8420 /* Update audio instances for each connector. */
8421 amdgpu_dm_commit_audio(dev, state);
8424 * send vblank event on all events not handled in flip and
8425 * mark consumed event for drm_atomic_helper_commit_hw_done
8427 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8428 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8430 if (new_crtc_state->event)
8431 drm_send_event_locked(dev, &new_crtc_state->event->base);
8433 new_crtc_state->event = NULL;
8435 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8437 /* Signal HW programming completion */
8438 drm_atomic_helper_commit_hw_done(state);
8440 if (wait_for_vblank)
8441 drm_atomic_helper_wait_for_flip_done(dev, state);
8443 drm_atomic_helper_cleanup_planes(dev, state);
8445 /* return the stolen vga memory back to VRAM */
8446 if (!adev->mman.keep_stolen_vga_memory)
8447 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8448 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8451 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8452 * so we can put the GPU into runtime suspend if we're not driving any
8455 for (i = 0; i < crtc_disable_count; i++)
8456 pm_runtime_put_autosuspend(dev->dev);
8457 pm_runtime_mark_last_busy(dev->dev);
8460 dc_release_state(dc_state_temp);
8464 static int dm_force_atomic_commit(struct drm_connector *connector)
8467 struct drm_device *ddev = connector->dev;
8468 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8469 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8470 struct drm_plane *plane = disconnected_acrtc->base.primary;
8471 struct drm_connector_state *conn_state;
8472 struct drm_crtc_state *crtc_state;
8473 struct drm_plane_state *plane_state;
8478 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8480 /* Construct an atomic state to restore previous display setting */
8483 * Attach connectors to drm_atomic_state
8485 conn_state = drm_atomic_get_connector_state(state, connector);
8487 ret = PTR_ERR_OR_ZERO(conn_state);
8491 /* Attach crtc to drm_atomic_state*/
8492 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8494 ret = PTR_ERR_OR_ZERO(crtc_state);
8498 /* force a restore */
8499 crtc_state->mode_changed = true;
8501 /* Attach plane to drm_atomic_state */
8502 plane_state = drm_atomic_get_plane_state(state, plane);
8504 ret = PTR_ERR_OR_ZERO(plane_state);
8509 /* Call commit internally with the state we just constructed */
8510 ret = drm_atomic_commit(state);
8515 DRM_ERROR("Restoring old state failed with %i\n", ret);
8516 drm_atomic_state_put(state);
8522 * This function handles all cases when set mode does not come upon hotplug.
8523 * This includes when a display is unplugged then plugged back into the
8524 * same port and when running without usermode desktop manager supprot
8526 void dm_restore_drm_connector_state(struct drm_device *dev,
8527 struct drm_connector *connector)
8529 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8530 struct amdgpu_crtc *disconnected_acrtc;
8531 struct dm_crtc_state *acrtc_state;
8533 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8536 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8537 if (!disconnected_acrtc)
8540 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8541 if (!acrtc_state->stream)
8545 * If the previous sink is not released and different from the current,
8546 * we deduce we are in a state where we can not rely on usermode call
8547 * to turn on the display, so we do it here
8549 if (acrtc_state->stream->sink != aconnector->dc_sink)
8550 dm_force_atomic_commit(&aconnector->base);
8554 * Grabs all modesetting locks to serialize against any blocking commits,
8555 * Waits for completion of all non blocking commits.
8557 static int do_aquire_global_lock(struct drm_device *dev,
8558 struct drm_atomic_state *state)
8560 struct drm_crtc *crtc;
8561 struct drm_crtc_commit *commit;
8565 * Adding all modeset locks to aquire_ctx will
8566 * ensure that when the framework release it the
8567 * extra locks we are locking here will get released to
8569 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8573 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8574 spin_lock(&crtc->commit_lock);
8575 commit = list_first_entry_or_null(&crtc->commit_list,
8576 struct drm_crtc_commit, commit_entry);
8578 drm_crtc_commit_get(commit);
8579 spin_unlock(&crtc->commit_lock);
8585 * Make sure all pending HW programming completed and
8588 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8591 ret = wait_for_completion_interruptible_timeout(
8592 &commit->flip_done, 10*HZ);
8595 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8596 "timed out\n", crtc->base.id, crtc->name);
8598 drm_crtc_commit_put(commit);
8601 return ret < 0 ? ret : 0;
8604 static void get_freesync_config_for_crtc(
8605 struct dm_crtc_state *new_crtc_state,
8606 struct dm_connector_state *new_con_state)
8608 struct mod_freesync_config config = {0};
8609 struct amdgpu_dm_connector *aconnector =
8610 to_amdgpu_dm_connector(new_con_state->base.connector);
8611 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8612 int vrefresh = drm_mode_vrefresh(mode);
8614 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8615 vrefresh >= aconnector->min_vfreq &&
8616 vrefresh <= aconnector->max_vfreq;
8618 if (new_crtc_state->vrr_supported) {
8619 new_crtc_state->stream->ignore_msa_timing_param = true;
8620 config.state = new_crtc_state->base.vrr_enabled ?
8621 VRR_STATE_ACTIVE_VARIABLE :
8623 config.min_refresh_in_uhz =
8624 aconnector->min_vfreq * 1000000;
8625 config.max_refresh_in_uhz =
8626 aconnector->max_vfreq * 1000000;
8627 config.vsif_supported = true;
8631 new_crtc_state->freesync_config = config;
8634 static void reset_freesync_config_for_crtc(
8635 struct dm_crtc_state *new_crtc_state)
8637 new_crtc_state->vrr_supported = false;
8639 memset(&new_crtc_state->vrr_infopacket, 0,
8640 sizeof(new_crtc_state->vrr_infopacket));
8643 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8644 struct drm_atomic_state *state,
8645 struct drm_crtc *crtc,
8646 struct drm_crtc_state *old_crtc_state,
8647 struct drm_crtc_state *new_crtc_state,
8649 bool *lock_and_validation_needed)
8651 struct dm_atomic_state *dm_state = NULL;
8652 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8653 struct dc_stream_state *new_stream;
8657 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8658 * update changed items
8660 struct amdgpu_crtc *acrtc = NULL;
8661 struct amdgpu_dm_connector *aconnector = NULL;
8662 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8663 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8667 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8668 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8669 acrtc = to_amdgpu_crtc(crtc);
8670 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8672 /* TODO This hack should go away */
8673 if (aconnector && enable) {
8674 /* Make sure fake sink is created in plug-in scenario */
8675 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8677 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8680 if (IS_ERR(drm_new_conn_state)) {
8681 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8685 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8686 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8688 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8691 new_stream = create_validate_stream_for_sink(aconnector,
8692 &new_crtc_state->mode,
8694 dm_old_crtc_state->stream);
8697 * we can have no stream on ACTION_SET if a display
8698 * was disconnected during S3, in this case it is not an
8699 * error, the OS will be updated after detection, and
8700 * will do the right thing on next atomic commit
8704 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8705 __func__, acrtc->base.base.id);
8711 * TODO: Check VSDB bits to decide whether this should
8712 * be enabled or not.
8714 new_stream->triggered_crtc_reset.enabled =
8715 dm->force_timing_sync;
8717 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8719 ret = fill_hdr_info_packet(drm_new_conn_state,
8720 &new_stream->hdr_static_metadata);
8725 * If we already removed the old stream from the context
8726 * (and set the new stream to NULL) then we can't reuse
8727 * the old stream even if the stream and scaling are unchanged.
8728 * We'll hit the BUG_ON and black screen.
8730 * TODO: Refactor this function to allow this check to work
8731 * in all conditions.
8733 if (dm_new_crtc_state->stream &&
8734 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8735 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8736 new_crtc_state->mode_changed = false;
8737 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8738 new_crtc_state->mode_changed);
8742 /* mode_changed flag may get updated above, need to check again */
8743 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8747 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8748 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8749 "connectors_changed:%d\n",
8751 new_crtc_state->enable,
8752 new_crtc_state->active,
8753 new_crtc_state->planes_changed,
8754 new_crtc_state->mode_changed,
8755 new_crtc_state->active_changed,
8756 new_crtc_state->connectors_changed);
8758 /* Remove stream for any changed/disabled CRTC */
8761 if (!dm_old_crtc_state->stream)
8764 ret = dm_atomic_get_state(state, &dm_state);
8768 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8771 /* i.e. reset mode */
8772 if (dc_remove_stream_from_ctx(
8775 dm_old_crtc_state->stream) != DC_OK) {
8780 dc_stream_release(dm_old_crtc_state->stream);
8781 dm_new_crtc_state->stream = NULL;
8783 reset_freesync_config_for_crtc(dm_new_crtc_state);
8785 *lock_and_validation_needed = true;
8787 } else {/* Add stream for any updated/enabled CRTC */
8789 * Quick fix to prevent NULL pointer on new_stream when
8790 * added MST connectors not found in existing crtc_state in the chained mode
8791 * TODO: need to dig out the root cause of that
8793 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8796 if (modereset_required(new_crtc_state))
8799 if (modeset_required(new_crtc_state, new_stream,
8800 dm_old_crtc_state->stream)) {
8802 WARN_ON(dm_new_crtc_state->stream);
8804 ret = dm_atomic_get_state(state, &dm_state);
8808 dm_new_crtc_state->stream = new_stream;
8810 dc_stream_retain(new_stream);
8812 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8815 if (dc_add_stream_to_ctx(
8818 dm_new_crtc_state->stream) != DC_OK) {
8823 *lock_and_validation_needed = true;
8828 /* Release extra reference */
8830 dc_stream_release(new_stream);
8833 * We want to do dc stream updates that do not require a
8834 * full modeset below.
8836 if (!(enable && aconnector && new_crtc_state->active))
8839 * Given above conditions, the dc state cannot be NULL because:
8840 * 1. We're in the process of enabling CRTCs (just been added
8841 * to the dc context, or already is on the context)
8842 * 2. Has a valid connector attached, and
8843 * 3. Is currently active and enabled.
8844 * => The dc stream state currently exists.
8846 BUG_ON(dm_new_crtc_state->stream == NULL);
8848 /* Scaling or underscan settings */
8849 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8850 update_stream_scaling_settings(
8851 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8854 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8857 * Color management settings. We also update color properties
8858 * when a modeset is needed, to ensure it gets reprogrammed.
8860 if (dm_new_crtc_state->base.color_mgmt_changed ||
8861 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8862 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8867 /* Update Freesync settings. */
8868 get_freesync_config_for_crtc(dm_new_crtc_state,
8875 dc_stream_release(new_stream);
8879 static bool should_reset_plane(struct drm_atomic_state *state,
8880 struct drm_plane *plane,
8881 struct drm_plane_state *old_plane_state,
8882 struct drm_plane_state *new_plane_state)
8884 struct drm_plane *other;
8885 struct drm_plane_state *old_other_state, *new_other_state;
8886 struct drm_crtc_state *new_crtc_state;
8890 * TODO: Remove this hack once the checks below are sufficient
8891 * enough to determine when we need to reset all the planes on
8894 if (state->allow_modeset)
8897 /* Exit early if we know that we're adding or removing the plane. */
8898 if (old_plane_state->crtc != new_plane_state->crtc)
8901 /* old crtc == new_crtc == NULL, plane not in context. */
8902 if (!new_plane_state->crtc)
8906 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8908 if (!new_crtc_state)
8911 /* CRTC Degamma changes currently require us to recreate planes. */
8912 if (new_crtc_state->color_mgmt_changed)
8915 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8919 * If there are any new primary or overlay planes being added or
8920 * removed then the z-order can potentially change. To ensure
8921 * correct z-order and pipe acquisition the current DC architecture
8922 * requires us to remove and recreate all existing planes.
8924 * TODO: Come up with a more elegant solution for this.
8926 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8927 struct amdgpu_framebuffer *old_afb, *new_afb;
8928 if (other->type == DRM_PLANE_TYPE_CURSOR)
8931 if (old_other_state->crtc != new_plane_state->crtc &&
8932 new_other_state->crtc != new_plane_state->crtc)
8935 if (old_other_state->crtc != new_other_state->crtc)
8938 /* Src/dst size and scaling updates. */
8939 if (old_other_state->src_w != new_other_state->src_w ||
8940 old_other_state->src_h != new_other_state->src_h ||
8941 old_other_state->crtc_w != new_other_state->crtc_w ||
8942 old_other_state->crtc_h != new_other_state->crtc_h)
8945 /* Rotation / mirroring updates. */
8946 if (old_other_state->rotation != new_other_state->rotation)
8949 /* Blending updates. */
8950 if (old_other_state->pixel_blend_mode !=
8951 new_other_state->pixel_blend_mode)
8954 /* Alpha updates. */
8955 if (old_other_state->alpha != new_other_state->alpha)
8958 /* Colorspace changes. */
8959 if (old_other_state->color_range != new_other_state->color_range ||
8960 old_other_state->color_encoding != new_other_state->color_encoding)
8963 /* Framebuffer checks fall at the end. */
8964 if (!old_other_state->fb || !new_other_state->fb)
8967 /* Pixel format changes can require bandwidth updates. */
8968 if (old_other_state->fb->format != new_other_state->fb->format)
8971 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8972 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8974 /* Tiling and DCC changes also require bandwidth updates. */
8975 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8976 old_afb->base.modifier != new_afb->base.modifier)
8983 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8984 struct drm_plane_state *new_plane_state,
8985 struct drm_framebuffer *fb)
8987 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8988 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8992 if (fb->width > new_acrtc->max_cursor_width ||
8993 fb->height > new_acrtc->max_cursor_height) {
8994 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8995 new_plane_state->fb->width,
8996 new_plane_state->fb->height);
8999 if (new_plane_state->src_w != fb->width << 16 ||
9000 new_plane_state->src_h != fb->height << 16) {
9001 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9005 /* Pitch in pixels */
9006 pitch = fb->pitches[0] / fb->format->cpp[0];
9008 if (fb->width != pitch) {
9009 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9018 /* FB pitch is supported by cursor plane */
9021 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9025 /* Core DRM takes care of checking FB modifiers, so we only need to
9026 * check tiling flags when the FB doesn't have a modifier. */
9027 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9028 if (adev->family < AMDGPU_FAMILY_AI) {
9029 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9030 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9031 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9033 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9036 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9044 static int dm_update_plane_state(struct dc *dc,
9045 struct drm_atomic_state *state,
9046 struct drm_plane *plane,
9047 struct drm_plane_state *old_plane_state,
9048 struct drm_plane_state *new_plane_state,
9050 bool *lock_and_validation_needed)
9053 struct dm_atomic_state *dm_state = NULL;
9054 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9055 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9056 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9057 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9058 struct amdgpu_crtc *new_acrtc;
9063 new_plane_crtc = new_plane_state->crtc;
9064 old_plane_crtc = old_plane_state->crtc;
9065 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9066 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9068 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9069 if (!enable || !new_plane_crtc ||
9070 drm_atomic_plane_disabling(plane->state, new_plane_state))
9073 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9075 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9076 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9080 if (new_plane_state->fb) {
9081 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9082 new_plane_state->fb);
9090 needs_reset = should_reset_plane(state, plane, old_plane_state,
9093 /* Remove any changed/removed planes */
9098 if (!old_plane_crtc)
9101 old_crtc_state = drm_atomic_get_old_crtc_state(
9102 state, old_plane_crtc);
9103 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9105 if (!dm_old_crtc_state->stream)
9108 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9109 plane->base.id, old_plane_crtc->base.id);
9111 ret = dm_atomic_get_state(state, &dm_state);
9115 if (!dc_remove_plane_from_context(
9117 dm_old_crtc_state->stream,
9118 dm_old_plane_state->dc_state,
9119 dm_state->context)) {
9125 dc_plane_state_release(dm_old_plane_state->dc_state);
9126 dm_new_plane_state->dc_state = NULL;
9128 *lock_and_validation_needed = true;
9130 } else { /* Add new planes */
9131 struct dc_plane_state *dc_new_plane_state;
9133 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9136 if (!new_plane_crtc)
9139 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9140 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9142 if (!dm_new_crtc_state->stream)
9148 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9152 WARN_ON(dm_new_plane_state->dc_state);
9154 dc_new_plane_state = dc_create_plane_state(dc);
9155 if (!dc_new_plane_state)
9158 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9159 plane->base.id, new_plane_crtc->base.id);
9161 ret = fill_dc_plane_attributes(
9162 drm_to_adev(new_plane_crtc->dev),
9167 dc_plane_state_release(dc_new_plane_state);
9171 ret = dm_atomic_get_state(state, &dm_state);
9173 dc_plane_state_release(dc_new_plane_state);
9178 * Any atomic check errors that occur after this will
9179 * not need a release. The plane state will be attached
9180 * to the stream, and therefore part of the atomic
9181 * state. It'll be released when the atomic state is
9184 if (!dc_add_plane_to_context(
9186 dm_new_crtc_state->stream,
9188 dm_state->context)) {
9190 dc_plane_state_release(dc_new_plane_state);
9194 dm_new_plane_state->dc_state = dc_new_plane_state;
9196 /* Tell DC to do a full surface update every time there
9197 * is a plane change. Inefficient, but works for now.
9199 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9201 *lock_and_validation_needed = true;
9208 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9209 struct drm_crtc *crtc,
9210 struct drm_crtc_state *new_crtc_state)
9212 struct drm_plane_state *new_cursor_state, *new_primary_state;
9213 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9215 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9216 * cursor per pipe but it's going to inherit the scaling and
9217 * positioning from the underlying pipe. Check the cursor plane's
9218 * blending properties match the primary plane's. */
9220 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9221 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9222 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9226 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9227 (new_cursor_state->src_w >> 16);
9228 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9229 (new_cursor_state->src_h >> 16);
9231 primary_scale_w = new_primary_state->crtc_w * 1000 /
9232 (new_primary_state->src_w >> 16);
9233 primary_scale_h = new_primary_state->crtc_h * 1000 /
9234 (new_primary_state->src_h >> 16);
9236 if (cursor_scale_w != primary_scale_w ||
9237 cursor_scale_h != primary_scale_h) {
9238 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9245 #if defined(CONFIG_DRM_AMD_DC_DCN)
9246 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9248 struct drm_connector *connector;
9249 struct drm_connector_state *conn_state;
9250 struct amdgpu_dm_connector *aconnector = NULL;
9252 for_each_new_connector_in_state(state, connector, conn_state, i) {
9253 if (conn_state->crtc != crtc)
9256 aconnector = to_amdgpu_dm_connector(connector);
9257 if (!aconnector->port || !aconnector->mst_port)
9266 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9271 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9272 * @dev: The DRM device
9273 * @state: The atomic state to commit
9275 * Validate that the given atomic state is programmable by DC into hardware.
9276 * This involves constructing a &struct dc_state reflecting the new hardware
9277 * state we wish to commit, then querying DC to see if it is programmable. It's
9278 * important not to modify the existing DC state. Otherwise, atomic_check
9279 * may unexpectedly commit hardware changes.
9281 * When validating the DC state, it's important that the right locks are
9282 * acquired. For full updates case which removes/adds/updates streams on one
9283 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9284 * that any such full update commit will wait for completion of any outstanding
9285 * flip using DRMs synchronization events.
9287 * Note that DM adds the affected connectors for all CRTCs in state, when that
9288 * might not seem necessary. This is because DC stream creation requires the
9289 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9290 * be possible but non-trivial - a possible TODO item.
9292 * Return: -Error code if validation failed.
9294 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9295 struct drm_atomic_state *state)
9297 struct amdgpu_device *adev = drm_to_adev(dev);
9298 struct dm_atomic_state *dm_state = NULL;
9299 struct dc *dc = adev->dm.dc;
9300 struct drm_connector *connector;
9301 struct drm_connector_state *old_con_state, *new_con_state;
9302 struct drm_crtc *crtc;
9303 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9304 struct drm_plane *plane;
9305 struct drm_plane_state *old_plane_state, *new_plane_state;
9306 enum dc_status status;
9308 bool lock_and_validation_needed = false;
9309 struct dm_crtc_state *dm_old_crtc_state;
9311 trace_amdgpu_dm_atomic_check_begin(state);
9313 ret = drm_atomic_helper_check_modeset(dev, state);
9317 /* Check connector changes */
9318 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9319 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9320 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9322 /* Skip connectors that are disabled or part of modeset already. */
9323 if (!old_con_state->crtc && !new_con_state->crtc)
9326 if (!new_con_state->crtc)
9329 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9330 if (IS_ERR(new_crtc_state)) {
9331 ret = PTR_ERR(new_crtc_state);
9335 if (dm_old_con_state->abm_level !=
9336 dm_new_con_state->abm_level)
9337 new_crtc_state->connectors_changed = true;
9340 #if defined(CONFIG_DRM_AMD_DC_DCN)
9341 if (adev->asic_type >= CHIP_NAVI10) {
9342 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9343 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9344 ret = add_affected_mst_dsc_crtcs(state, crtc);
9351 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9352 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9354 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9355 !new_crtc_state->color_mgmt_changed &&
9356 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9357 dm_old_crtc_state->dsc_force_changed == false)
9360 if (!new_crtc_state->enable)
9363 ret = drm_atomic_add_affected_connectors(state, crtc);
9367 ret = drm_atomic_add_affected_planes(state, crtc);
9371 if (dm_old_crtc_state->dsc_force_changed)
9372 new_crtc_state->mode_changed = true;
9376 * Add all primary and overlay planes on the CRTC to the state
9377 * whenever a plane is enabled to maintain correct z-ordering
9378 * and to enable fast surface updates.
9380 drm_for_each_crtc(crtc, dev) {
9381 bool modified = false;
9383 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9384 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9387 if (new_plane_state->crtc == crtc ||
9388 old_plane_state->crtc == crtc) {
9397 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9398 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9402 drm_atomic_get_plane_state(state, plane);
9404 if (IS_ERR(new_plane_state)) {
9405 ret = PTR_ERR(new_plane_state);
9411 /* Remove exiting planes if they are modified */
9412 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9413 ret = dm_update_plane_state(dc, state, plane,
9417 &lock_and_validation_needed);
9422 /* Disable all crtcs which require disable */
9423 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9424 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9428 &lock_and_validation_needed);
9433 /* Enable all crtcs which require enable */
9434 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9435 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9439 &lock_and_validation_needed);
9444 /* Add new/modified planes */
9445 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9446 ret = dm_update_plane_state(dc, state, plane,
9450 &lock_and_validation_needed);
9455 /* Run this here since we want to validate the streams we created */
9456 ret = drm_atomic_helper_check_planes(dev, state);
9460 /* Check cursor planes scaling */
9461 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9462 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9467 if (state->legacy_cursor_update) {
9469 * This is a fast cursor update coming from the plane update
9470 * helper, check if it can be done asynchronously for better
9473 state->async_update =
9474 !drm_atomic_helper_async_check(dev, state);
9477 * Skip the remaining global validation if this is an async
9478 * update. Cursor updates can be done without affecting
9479 * state or bandwidth calcs and this avoids the performance
9480 * penalty of locking the private state object and
9481 * allocating a new dc_state.
9483 if (state->async_update)
9487 /* Check scaling and underscan changes*/
9488 /* TODO Removed scaling changes validation due to inability to commit
9489 * new stream into context w\o causing full reset. Need to
9490 * decide how to handle.
9492 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9493 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9494 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9495 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9497 /* Skip any modesets/resets */
9498 if (!acrtc || drm_atomic_crtc_needs_modeset(
9499 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9502 /* Skip any thing not scale or underscan changes */
9503 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9506 lock_and_validation_needed = true;
9510 * Streams and planes are reset when there are changes that affect
9511 * bandwidth. Anything that affects bandwidth needs to go through
9512 * DC global validation to ensure that the configuration can be applied
9515 * We have to currently stall out here in atomic_check for outstanding
9516 * commits to finish in this case because our IRQ handlers reference
9517 * DRM state directly - we can end up disabling interrupts too early
9520 * TODO: Remove this stall and drop DM state private objects.
9522 if (lock_and_validation_needed) {
9523 ret = dm_atomic_get_state(state, &dm_state);
9527 ret = do_aquire_global_lock(dev, state);
9531 #if defined(CONFIG_DRM_AMD_DC_DCN)
9532 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9535 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9541 * Perform validation of MST topology in the state:
9542 * We need to perform MST atomic check before calling
9543 * dc_validate_global_state(), or there is a chance
9544 * to get stuck in an infinite loop and hang eventually.
9546 ret = drm_dp_mst_atomic_check(state);
9549 status = dc_validate_global_state(dc, dm_state->context, false);
9550 if (status != DC_OK) {
9551 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9552 dc_status_to_str(status), status);
9558 * The commit is a fast update. Fast updates shouldn't change
9559 * the DC context, affect global validation, and can have their
9560 * commit work done in parallel with other commits not touching
9561 * the same resource. If we have a new DC context as part of
9562 * the DM atomic state from validation we need to free it and
9563 * retain the existing one instead.
9565 * Furthermore, since the DM atomic state only contains the DC
9566 * context and can safely be annulled, we can free the state
9567 * and clear the associated private object now to free
9568 * some memory and avoid a possible use-after-free later.
9571 for (i = 0; i < state->num_private_objs; i++) {
9572 struct drm_private_obj *obj = state->private_objs[i].ptr;
9574 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9575 int j = state->num_private_objs-1;
9577 dm_atomic_destroy_state(obj,
9578 state->private_objs[i].state);
9580 /* If i is not at the end of the array then the
9581 * last element needs to be moved to where i was
9582 * before the array can safely be truncated.
9585 state->private_objs[i] =
9586 state->private_objs[j];
9588 state->private_objs[j].ptr = NULL;
9589 state->private_objs[j].state = NULL;
9590 state->private_objs[j].old_state = NULL;
9591 state->private_objs[j].new_state = NULL;
9593 state->num_private_objs = j;
9599 /* Store the overall update type for use later in atomic check. */
9600 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9601 struct dm_crtc_state *dm_new_crtc_state =
9602 to_dm_crtc_state(new_crtc_state);
9604 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9609 /* Must be success */
9612 trace_amdgpu_dm_atomic_check_finish(state, ret);
9617 if (ret == -EDEADLK)
9618 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9619 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9620 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9622 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9624 trace_amdgpu_dm_atomic_check_finish(state, ret);
9629 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9630 struct amdgpu_dm_connector *amdgpu_dm_connector)
9633 bool capable = false;
9635 if (amdgpu_dm_connector->dc_link &&
9636 dm_helpers_dp_read_dpcd(
9638 amdgpu_dm_connector->dc_link,
9639 DP_DOWN_STREAM_PORT_COUNT,
9641 sizeof(dpcd_data))) {
9642 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9647 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9651 bool edid_check_required;
9652 struct detailed_timing *timing;
9653 struct detailed_non_pixel *data;
9654 struct detailed_data_monitor_range *range;
9655 struct amdgpu_dm_connector *amdgpu_dm_connector =
9656 to_amdgpu_dm_connector(connector);
9657 struct dm_connector_state *dm_con_state = NULL;
9659 struct drm_device *dev = connector->dev;
9660 struct amdgpu_device *adev = drm_to_adev(dev);
9661 bool freesync_capable = false;
9663 if (!connector->state) {
9664 DRM_ERROR("%s - Connector has no state", __func__);
9669 dm_con_state = to_dm_connector_state(connector->state);
9671 amdgpu_dm_connector->min_vfreq = 0;
9672 amdgpu_dm_connector->max_vfreq = 0;
9673 amdgpu_dm_connector->pixel_clock_mhz = 0;
9678 dm_con_state = to_dm_connector_state(connector->state);
9680 edid_check_required = false;
9681 if (!amdgpu_dm_connector->dc_sink) {
9682 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9685 if (!adev->dm.freesync_module)
9688 * if edid non zero restrict freesync only for dp and edp
9691 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9692 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9693 edid_check_required = is_dp_capable_without_timing_msa(
9695 amdgpu_dm_connector);
9698 if (edid_check_required == true && (edid->version > 1 ||
9699 (edid->version == 1 && edid->revision > 1))) {
9700 for (i = 0; i < 4; i++) {
9702 timing = &edid->detailed_timings[i];
9703 data = &timing->data.other_data;
9704 range = &data->data.range;
9706 * Check if monitor has continuous frequency mode
9708 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9711 * Check for flag range limits only. If flag == 1 then
9712 * no additional timing information provided.
9713 * Default GTF, GTF Secondary curve and CVT are not
9716 if (range->flags != 1)
9719 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9720 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9721 amdgpu_dm_connector->pixel_clock_mhz =
9722 range->pixel_clock_mhz * 10;
9726 if (amdgpu_dm_connector->max_vfreq -
9727 amdgpu_dm_connector->min_vfreq > 10) {
9729 freesync_capable = true;
9735 dm_con_state->freesync_capable = freesync_capable;
9737 if (connector->vrr_capable_property)
9738 drm_connector_set_vrr_capable_property(connector,
9742 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9744 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9746 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9748 if (link->type == dc_connection_none)
9750 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9751 dpcd_data, sizeof(dpcd_data))) {
9752 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9754 if (dpcd_data[0] == 0) {
9755 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9756 link->psr_settings.psr_feature_enabled = false;
9758 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9759 link->psr_settings.psr_feature_enabled = true;
9762 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9767 * amdgpu_dm_link_setup_psr() - configure psr link
9768 * @stream: stream state
9770 * Return: true if success
9772 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9774 struct dc_link *link = NULL;
9775 struct psr_config psr_config = {0};
9776 struct psr_context psr_context = {0};
9782 link = stream->link;
9784 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9786 if (psr_config.psr_version > 0) {
9787 psr_config.psr_exit_link_training_required = 0x1;
9788 psr_config.psr_frame_capture_indication_req = 0;
9789 psr_config.psr_rfb_setup_time = 0x37;
9790 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9791 psr_config.allow_smu_optimizations = 0x0;
9793 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9796 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9802 * amdgpu_dm_psr_enable() - enable psr f/w
9803 * @stream: stream state
9805 * Return: true if success
9807 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9809 struct dc_link *link = stream->link;
9810 unsigned int vsync_rate_hz = 0;
9811 struct dc_static_screen_params params = {0};
9812 /* Calculate number of static frames before generating interrupt to
9815 // Init fail safe of 2 frames static
9816 unsigned int num_frames_static = 2;
9818 DRM_DEBUG_DRIVER("Enabling psr...\n");
9820 vsync_rate_hz = div64_u64(div64_u64((
9821 stream->timing.pix_clk_100hz * 100),
9822 stream->timing.v_total),
9823 stream->timing.h_total);
9826 * Calculate number of frames such that at least 30 ms of time has
9829 if (vsync_rate_hz != 0) {
9830 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9831 num_frames_static = (30000 / frame_time_microsec) + 1;
9834 params.triggers.cursor_update = true;
9835 params.triggers.overlay_update = true;
9836 params.triggers.surface_update = true;
9837 params.num_frames = num_frames_static;
9839 dc_stream_set_static_screen_params(link->ctx->dc,
9843 return dc_link_set_psr_allow_active(link, true, false, false);
9847 * amdgpu_dm_psr_disable() - disable psr f/w
9848 * @stream: stream state
9850 * Return: true if success
9852 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9855 DRM_DEBUG_DRIVER("Disabling psr...\n");
9857 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9861 * amdgpu_dm_psr_disable() - disable psr f/w
9862 * if psr is enabled on any stream
9864 * Return: true if success
9866 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9868 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9869 return dc_set_psr_allow_active(dm->dc, false);
9872 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9874 struct amdgpu_device *adev = drm_to_adev(dev);
9875 struct dc *dc = adev->dm.dc;
9878 mutex_lock(&adev->dm.dc_lock);
9879 if (dc->current_state) {
9880 for (i = 0; i < dc->current_state->stream_count; ++i)
9881 dc->current_state->streams[i]
9882 ->triggered_crtc_reset.enabled =
9883 adev->dm.force_timing_sync;
9885 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9886 dc_trigger_sync(dc, dc->current_state);
9888 mutex_unlock(&adev->dm.dc_lock);
9891 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9892 uint32_t value, const char *func_name)
9894 #ifdef DM_CHECK_ADDR_0
9896 DC_ERR("invalid register write. address = 0");
9900 cgs_write_register(ctx->cgs_device, address, value);
9901 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9904 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9905 const char *func_name)
9908 #ifdef DM_CHECK_ADDR_0
9910 DC_ERR("invalid register read; address = 0\n");
9915 if (ctx->dmub_srv &&
9916 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9917 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9922 value = cgs_read_register(ctx->cgs_device, address);
9924 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);