2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
89 #include "soc15_common.h"
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
112 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
124 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126 * requests into DC requests, and DC responses into DRM responses.
128 * The root control structure is &struct amdgpu_display_manager.
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
172 * initializes drm_device display related structures, based on the information
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
176 * Returns 0 on success
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 struct drm_plane *plane,
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
192 struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
199 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
200 struct drm_atomic_state *state,
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 struct drm_atomic_state *state);
208 static void handle_cursor_update(struct drm_plane *plane,
209 struct drm_plane_state *old_plane_state);
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
221 * dm_vblank_get_counter
224 * Get counter for number of vertical blanks
227 * struct amdgpu_device *adev - [in] desired amdgpu device
228 * int disp_idx - [in] which CRTC to get the counter from
231 * Counter for vertical blanks
233 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 if (crtc >= adev->mode_info.num_crtc)
238 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 if (acrtc->dm_irq_params.stream == NULL) {
241 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
250 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
251 u32 *vbl, u32 *position)
253 uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
258 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 if (acrtc->dm_irq_params.stream == NULL) {
261 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
267 * TODO rework base driver to use values directly.
268 * for now parse it back into reg-format
270 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
276 *position = v_position | (h_position << 16);
277 *vbl = v_blank_start | (v_blank_end << 16);
283 static bool dm_is_idle(void *handle)
289 static int dm_wait_for_idle(void *handle)
295 static bool dm_check_soft_reset(void *handle)
300 static int dm_soft_reset(void *handle)
306 static struct amdgpu_crtc *
307 get_crtc_by_otg_inst(struct amdgpu_device *adev,
310 struct drm_device *dev = adev_to_drm(adev);
311 struct drm_crtc *crtc;
312 struct amdgpu_crtc *amdgpu_crtc;
314 if (otg_inst == -1) {
316 return adev->mode_info.crtcs[0];
319 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
320 amdgpu_crtc = to_amdgpu_crtc(crtc);
322 if (amdgpu_crtc->otg_inst == otg_inst)
329 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331 return acrtc->dm_irq_params.freesync_config.state ==
332 VRR_STATE_ACTIVE_VARIABLE ||
333 acrtc->dm_irq_params.freesync_config.state ==
334 VRR_STATE_ACTIVE_FIXED;
337 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
340 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
344 * dm_pflip_high_irq() - Handle pageflip interrupt
345 * @interrupt_params: ignored
347 * Handles the pageflip interrupt by notifying all interested parties
348 * that the pageflip has been completed.
350 static void dm_pflip_high_irq(void *interrupt_params)
352 struct amdgpu_crtc *amdgpu_crtc;
353 struct common_irq_params *irq_params = interrupt_params;
354 struct amdgpu_device *adev = irq_params->adev;
356 struct drm_pending_vblank_event *e;
357 uint32_t vpos, hpos, v_blank_start, v_blank_end;
360 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
362 /* IRQ could occur when in initial stage */
363 /* TODO work and BO cleanup */
364 if (amdgpu_crtc == NULL) {
365 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
369 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
371 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
372 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
373 amdgpu_crtc->pflip_status,
374 AMDGPU_FLIP_SUBMITTED,
375 amdgpu_crtc->crtc_id,
377 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
381 /* page flip completed. */
382 e = amdgpu_crtc->event;
383 amdgpu_crtc->event = NULL;
388 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
390 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
392 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
393 &v_blank_end, &hpos, &vpos) ||
394 (vpos < v_blank_start)) {
395 /* Update to correct count and vblank timestamp if racing with
396 * vblank irq. This also updates to the correct vblank timestamp
397 * even in VRR mode, as scanout is past the front-porch atm.
399 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
401 /* Wake up userspace by sending the pageflip event with proper
402 * count and timestamp of vblank of flip completion.
405 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
407 /* Event sent, so done with vblank for this flip */
408 drm_crtc_vblank_put(&amdgpu_crtc->base);
411 /* VRR active and inside front-porch: vblank count and
412 * timestamp for pageflip event will only be up to date after
413 * drm_crtc_handle_vblank() has been executed from late vblank
414 * irq handler after start of back-porch (vline 0). We queue the
415 * pageflip event for send-out by drm_crtc_handle_vblank() with
416 * updated timestamp and count, once it runs after us.
418 * We need to open-code this instead of using the helper
419 * drm_crtc_arm_vblank_event(), as that helper would
420 * call drm_crtc_accurate_vblank_count(), which we must
421 * not call in VRR mode while we are in front-porch!
424 /* sequence will be replaced by real count during send-out. */
425 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
426 e->pipe = amdgpu_crtc->crtc_id;
428 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
432 /* Keep track of vblank of this flip for flip throttling. We use the
433 * cooked hw counter, as that one incremented at start of this vblank
434 * of pageflip completion, so last_flip_vblank is the forbidden count
435 * for queueing new pageflips if vsync + VRR is enabled.
437 amdgpu_crtc->dm_irq_params.last_flip_vblank =
438 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
440 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
441 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
443 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
444 amdgpu_crtc->crtc_id, amdgpu_crtc,
445 vrr_active, (int) !e);
448 static void dm_vupdate_high_irq(void *interrupt_params)
450 struct common_irq_params *irq_params = interrupt_params;
451 struct amdgpu_device *adev = irq_params->adev;
452 struct amdgpu_crtc *acrtc;
456 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
459 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
461 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
465 /* Core vblank handling is done here after end of front-porch in
466 * vrr mode, as vblank timestamping will give valid results
467 * while now done after front-porch. This will also deliver
468 * page-flip completion events that have been queued to us
469 * if a pageflip happened inside front-porch.
472 drm_crtc_handle_vblank(&acrtc->base);
474 /* BTR processing for pre-DCE12 ASICs */
475 if (acrtc->dm_irq_params.stream &&
476 adev->family < AMDGPU_FAMILY_AI) {
477 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
478 mod_freesync_handle_v_update(
479 adev->dm.freesync_module,
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params);
483 dc_stream_adjust_vmin_vmax(
485 acrtc->dm_irq_params.stream,
486 &acrtc->dm_irq_params.vrr_params.adjust);
487 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
494 * dm_crtc_high_irq() - Handles CRTC interrupt
495 * @interrupt_params: used for determining the CRTC instance
497 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
500 static void dm_crtc_high_irq(void *interrupt_params)
502 struct common_irq_params *irq_params = interrupt_params;
503 struct amdgpu_device *adev = irq_params->adev;
504 struct amdgpu_crtc *acrtc;
508 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
512 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
514 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
515 vrr_active, acrtc->dm_irq_params.active_planes);
518 * Core vblank handling at start of front-porch is only possible
519 * in non-vrr mode, as only there vblank timestamping will give
520 * valid results while done in front-porch. Otherwise defer it
521 * to dm_vupdate_high_irq after end of front-porch.
524 drm_crtc_handle_vblank(&acrtc->base);
527 * Following stuff must happen at start of vblank, for crc
528 * computation and below-the-range btr support in vrr mode.
530 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 /* BTR updates need to happen before VUPDATE on Vega and above. */
533 if (adev->family < AMDGPU_FAMILY_AI)
536 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
538 if (acrtc->dm_irq_params.stream &&
539 acrtc->dm_irq_params.vrr_params.supported &&
540 acrtc->dm_irq_params.freesync_config.state ==
541 VRR_STATE_ACTIVE_VARIABLE) {
542 mod_freesync_handle_v_update(adev->dm.freesync_module,
543 acrtc->dm_irq_params.stream,
544 &acrtc->dm_irq_params.vrr_params);
546 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
547 &acrtc->dm_irq_params.vrr_params.adjust);
551 * If there aren't any active_planes then DCH HUBP may be clock-gated.
552 * In that case, pageflip completion interrupts won't fire and pageflip
553 * completion events won't get delivered. Prevent this by sending
554 * pending pageflip events from here if a flip is still pending.
556 * If any planes are enabled, use dm_pflip_high_irq() instead, to
557 * avoid race conditions between flip programming and completion,
558 * which could cause too early flip completion events.
560 if (adev->family >= AMDGPU_FAMILY_RV &&
561 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
562 acrtc->dm_irq_params.active_planes == 0) {
564 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
566 drm_crtc_vblank_put(&acrtc->base);
568 acrtc->pflip_status = AMDGPU_FLIP_NONE;
571 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
574 static int dm_set_clockgating_state(void *handle,
575 enum amd_clockgating_state state)
580 static int dm_set_powergating_state(void *handle,
581 enum amd_powergating_state state)
586 /* Prototypes of private functions */
587 static int dm_early_init(void* handle);
589 /* Allocate memory for FBC compressed data */
590 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
592 struct drm_device *dev = connector->dev;
593 struct amdgpu_device *adev = drm_to_adev(dev);
594 struct dm_compressor_info *compressor = &adev->dm.compressor;
595 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
596 struct drm_display_mode *mode;
597 unsigned long max_size = 0;
599 if (adev->dm.dc->fbc_compressor == NULL)
602 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
605 if (compressor->bo_ptr)
609 list_for_each_entry(mode, &connector->modes, head) {
610 if (max_size < mode->htotal * mode->vtotal)
611 max_size = mode->htotal * mode->vtotal;
615 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
616 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
617 &compressor->gpu_addr, &compressor->cpu_addr);
620 DRM_ERROR("DM: Failed to initialize FBC\n");
622 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
623 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
630 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
631 int pipe, bool *enabled,
632 unsigned char *buf, int max_bytes)
634 struct drm_device *dev = dev_get_drvdata(kdev);
635 struct amdgpu_device *adev = drm_to_adev(dev);
636 struct drm_connector *connector;
637 struct drm_connector_list_iter conn_iter;
638 struct amdgpu_dm_connector *aconnector;
643 mutex_lock(&adev->dm.audio_lock);
645 drm_connector_list_iter_begin(dev, &conn_iter);
646 drm_for_each_connector_iter(connector, &conn_iter) {
647 aconnector = to_amdgpu_dm_connector(connector);
648 if (aconnector->audio_inst != port)
652 ret = drm_eld_size(connector->eld);
653 memcpy(buf, connector->eld, min(max_bytes, ret));
657 drm_connector_list_iter_end(&conn_iter);
659 mutex_unlock(&adev->dm.audio_lock);
661 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
666 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
667 .get_eld = amdgpu_dm_audio_component_get_eld,
670 static int amdgpu_dm_audio_component_bind(struct device *kdev,
671 struct device *hda_kdev, void *data)
673 struct drm_device *dev = dev_get_drvdata(kdev);
674 struct amdgpu_device *adev = drm_to_adev(dev);
675 struct drm_audio_component *acomp = data;
677 acomp->ops = &amdgpu_dm_audio_component_ops;
679 adev->dm.audio_component = acomp;
684 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
685 struct device *hda_kdev, void *data)
687 struct drm_device *dev = dev_get_drvdata(kdev);
688 struct amdgpu_device *adev = drm_to_adev(dev);
689 struct drm_audio_component *acomp = data;
693 adev->dm.audio_component = NULL;
696 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
697 .bind = amdgpu_dm_audio_component_bind,
698 .unbind = amdgpu_dm_audio_component_unbind,
701 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
708 adev->mode_info.audio.enabled = true;
710 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
712 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
713 adev->mode_info.audio.pin[i].channels = -1;
714 adev->mode_info.audio.pin[i].rate = -1;
715 adev->mode_info.audio.pin[i].bits_per_sample = -1;
716 adev->mode_info.audio.pin[i].status_bits = 0;
717 adev->mode_info.audio.pin[i].category_code = 0;
718 adev->mode_info.audio.pin[i].connected = false;
719 adev->mode_info.audio.pin[i].id =
720 adev->dm.dc->res_pool->audios[i]->inst;
721 adev->mode_info.audio.pin[i].offset = 0;
724 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
728 adev->dm.audio_registered = true;
733 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
738 if (!adev->mode_info.audio.enabled)
741 if (adev->dm.audio_registered) {
742 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
743 adev->dm.audio_registered = false;
746 /* TODO: Disable audio? */
748 adev->mode_info.audio.enabled = false;
751 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
753 struct drm_audio_component *acomp = adev->dm.audio_component;
755 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
756 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
758 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
763 static int dm_dmub_hw_init(struct amdgpu_device *adev)
765 const struct dmcub_firmware_header_v1_0 *hdr;
766 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
767 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
768 const struct firmware *dmub_fw = adev->dm.dmub_fw;
769 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
770 struct abm *abm = adev->dm.dc->res_pool->abm;
771 struct dmub_srv_hw_params hw_params;
772 enum dmub_status status;
773 const unsigned char *fw_inst_const, *fw_bss_data;
774 uint32_t i, fw_inst_const_size, fw_bss_data_size;
778 /* DMUB isn't supported on the ASIC. */
782 DRM_ERROR("No framebuffer info for DMUB service.\n");
787 /* Firmware required for DMUB support. */
788 DRM_ERROR("No firmware provided for DMUB.\n");
792 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
793 if (status != DMUB_STATUS_OK) {
794 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
798 if (!has_hw_support) {
799 DRM_INFO("DMUB unsupported on ASIC\n");
803 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
805 fw_inst_const = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
809 fw_bss_data = dmub_fw->data +
810 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
811 le32_to_cpu(hdr->inst_const_bytes);
813 /* Copy firmware and bios info into FB memory. */
814 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
815 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
817 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
819 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
820 * amdgpu_ucode_init_single_fw will load dmub firmware
821 * fw_inst_const part to cw0; otherwise, the firmware back door load
822 * will be done by dm_dmub_hw_init
824 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
825 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
829 if (fw_bss_data_size)
830 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
831 fw_bss_data, fw_bss_data_size);
833 /* Copy firmware bios info into FB memory. */
834 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
837 /* Reset regions that need to be reset. */
838 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
839 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
841 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
842 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
844 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
845 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
847 /* Initialize hardware. */
848 memset(&hw_params, 0, sizeof(hw_params));
849 hw_params.fb_base = adev->gmc.fb_start;
850 hw_params.fb_offset = adev->gmc.aper_base;
852 /* backdoor load firmware and trigger dmub running */
853 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
854 hw_params.load_inst_const = true;
857 hw_params.psp_version = dmcu->psp_version;
859 for (i = 0; i < fb_info->num_fb; ++i)
860 hw_params.fb[i] = &fb_info->fb[i];
862 status = dmub_srv_hw_init(dmub_srv, &hw_params);
863 if (status != DMUB_STATUS_OK) {
864 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
868 /* Wait for firmware load to finish. */
869 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
870 if (status != DMUB_STATUS_OK)
871 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
873 /* Init DMCU and ABM if available. */
875 dmcu->funcs->dmcu_init(dmcu);
876 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
879 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
880 if (!adev->dm.dc->ctx->dmub_srv) {
881 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
885 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
886 adev->dm.dmcub_fw_version);
891 #if defined(CONFIG_DRM_AMD_DC_DCN)
892 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
895 uint32_t logical_addr_low;
896 uint32_t logical_addr_high;
897 uint32_t agp_base, agp_bot, agp_top;
898 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
900 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
901 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
903 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
905 * Raven2 has a HW issue that it is unable to use the vram which
906 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
907 * workaround that increase system aperture high address (add 1)
908 * to get rid of the VM fault and hardware hang.
910 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
912 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
915 agp_bot = adev->gmc.agp_start >> 24;
916 agp_top = adev->gmc.agp_end >> 24;
919 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
920 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
921 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
922 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
923 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
924 page_table_base.low_part = lower_32_bits(pt_base);
926 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
927 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
929 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
930 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
931 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
933 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
934 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
935 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
937 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
938 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
939 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
941 pa_config->is_hvm_enabled = 0;
946 #ifdef CONFIG_DEBUG_FS
947 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
949 dm->crc_win_x_start_property =
950 drm_property_create_range(adev_to_drm(dm->adev),
951 DRM_MODE_PROP_ATOMIC,
952 "AMD_CRC_WIN_X_START", 0, U16_MAX);
953 if (!dm->crc_win_x_start_property)
956 dm->crc_win_y_start_property =
957 drm_property_create_range(adev_to_drm(dm->adev),
958 DRM_MODE_PROP_ATOMIC,
959 "AMD_CRC_WIN_Y_START", 0, U16_MAX);
960 if (!dm->crc_win_y_start_property)
963 dm->crc_win_x_end_property =
964 drm_property_create_range(adev_to_drm(dm->adev),
965 DRM_MODE_PROP_ATOMIC,
966 "AMD_CRC_WIN_X_END", 0, U16_MAX);
967 if (!dm->crc_win_x_end_property)
970 dm->crc_win_y_end_property =
971 drm_property_create_range(adev_to_drm(dm->adev),
972 DRM_MODE_PROP_ATOMIC,
973 "AMD_CRC_WIN_Y_END", 0, U16_MAX);
974 if (!dm->crc_win_y_end_property)
981 static int amdgpu_dm_init(struct amdgpu_device *adev)
983 struct dc_init_data init_data;
984 #ifdef CONFIG_DRM_AMD_DC_HDCP
985 struct dc_callback_init init_params;
989 adev->dm.ddev = adev_to_drm(adev);
990 adev->dm.adev = adev;
992 /* Zero all the fields */
993 memset(&init_data, 0, sizeof(init_data));
994 #ifdef CONFIG_DRM_AMD_DC_HDCP
995 memset(&init_params, 0, sizeof(init_params));
998 mutex_init(&adev->dm.dc_lock);
999 mutex_init(&adev->dm.audio_lock);
1001 if(amdgpu_dm_irq_init(adev)) {
1002 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1006 init_data.asic_id.chip_family = adev->family;
1008 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1009 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1011 init_data.asic_id.vram_width = adev->gmc.vram_width;
1012 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1013 init_data.asic_id.atombios_base_address =
1014 adev->mode_info.atom_context->bios;
1016 init_data.driver = adev;
1018 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1020 if (!adev->dm.cgs_device) {
1021 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1025 init_data.cgs_device = adev->dm.cgs_device;
1027 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1029 switch (adev->asic_type) {
1034 init_data.flags.gpu_vm_support = true;
1035 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1036 init_data.flags.disable_dmcu = true;
1038 #if defined(CONFIG_DRM_AMD_DC_DCN)
1040 init_data.flags.gpu_vm_support = true;
1047 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1048 init_data.flags.fbc_support = true;
1050 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1051 init_data.flags.multi_mon_pp_mclk_switch = true;
1053 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1054 init_data.flags.disable_fractional_pwm = true;
1056 init_data.flags.power_down_display_on_boot = true;
1058 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1060 /* Display Core create. */
1061 adev->dm.dc = dc_create(&init_data);
1064 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1066 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1070 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1071 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1072 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1075 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1076 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1078 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1079 adev->dm.dc->debug.disable_stutter = true;
1081 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1082 adev->dm.dc->debug.disable_dsc = true;
1084 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1085 adev->dm.dc->debug.disable_clock_gate = true;
1087 r = dm_dmub_hw_init(adev);
1089 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1093 dc_hardware_init(adev->dm.dc);
1095 #if defined(CONFIG_DRM_AMD_DC_DCN)
1096 if (adev->apu_flags) {
1097 struct dc_phy_addr_space_config pa_config;
1099 mmhub_read_system_context(adev, &pa_config);
1101 // Call the DC init_memory func
1102 dc_setup_system_context(adev->dm.dc, &pa_config);
1106 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1107 if (!adev->dm.freesync_module) {
1109 "amdgpu: failed to initialize freesync_module.\n");
1111 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1112 adev->dm.freesync_module);
1114 amdgpu_dm_init_color_mod();
1116 #ifdef CONFIG_DRM_AMD_DC_HDCP
1117 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1118 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1120 if (!adev->dm.hdcp_workqueue)
1121 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1123 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1125 dc_init_callbacks(adev->dm.dc, &init_params);
1128 #ifdef CONFIG_DEBUG_FS
1129 if (create_crtc_crc_properties(&adev->dm))
1130 DRM_ERROR("amdgpu: failed to create crc property.\n");
1132 if (amdgpu_dm_initialize_drm_device(adev)) {
1134 "amdgpu: failed to initialize sw for display support.\n");
1138 /* create fake encoders for MST */
1139 dm_dp_create_fake_mst_encoders(adev);
1141 /* TODO: Add_display_info? */
1143 /* TODO use dynamic cursor width */
1144 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1145 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1147 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1149 "amdgpu: failed to initialize sw for display support.\n");
1154 DRM_DEBUG_DRIVER("KMS initialized.\n");
1158 amdgpu_dm_fini(adev);
1163 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1167 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1168 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1171 amdgpu_dm_audio_fini(adev);
1173 amdgpu_dm_destroy_drm_device(&adev->dm);
1175 #ifdef CONFIG_DRM_AMD_DC_HDCP
1176 if (adev->dm.hdcp_workqueue) {
1177 hdcp_destroy(adev->dm.hdcp_workqueue);
1178 adev->dm.hdcp_workqueue = NULL;
1182 dc_deinit_callbacks(adev->dm.dc);
1184 if (adev->dm.dc->ctx->dmub_srv) {
1185 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1186 adev->dm.dc->ctx->dmub_srv = NULL;
1189 if (adev->dm.dmub_bo)
1190 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1191 &adev->dm.dmub_bo_gpu_addr,
1192 &adev->dm.dmub_bo_cpu_addr);
1194 /* DC Destroy TODO: Replace destroy DAL */
1196 dc_destroy(&adev->dm.dc);
1198 * TODO: pageflip, vlank interrupt
1200 * amdgpu_dm_irq_fini(adev);
1203 if (adev->dm.cgs_device) {
1204 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1205 adev->dm.cgs_device = NULL;
1207 if (adev->dm.freesync_module) {
1208 mod_freesync_destroy(adev->dm.freesync_module);
1209 adev->dm.freesync_module = NULL;
1212 mutex_destroy(&adev->dm.audio_lock);
1213 mutex_destroy(&adev->dm.dc_lock);
1218 static int load_dmcu_fw(struct amdgpu_device *adev)
1220 const char *fw_name_dmcu = NULL;
1222 const struct dmcu_firmware_header_v1_0 *hdr;
1224 switch(adev->asic_type) {
1225 #if defined(CONFIG_DRM_AMD_DC_SI)
1240 case CHIP_POLARIS11:
1241 case CHIP_POLARIS10:
1242 case CHIP_POLARIS12:
1250 case CHIP_SIENNA_CICHLID:
1251 case CHIP_NAVY_FLOUNDER:
1252 case CHIP_DIMGREY_CAVEFISH:
1256 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1259 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1260 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1261 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1262 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1267 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1271 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1272 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1276 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1278 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1279 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1280 adev->dm.fw_dmcu = NULL;
1284 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1289 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1291 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1293 release_firmware(adev->dm.fw_dmcu);
1294 adev->dm.fw_dmcu = NULL;
1298 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1299 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1300 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1301 adev->firmware.fw_size +=
1302 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1304 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1305 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1306 adev->firmware.fw_size +=
1307 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1309 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1311 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1316 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1318 struct amdgpu_device *adev = ctx;
1320 return dm_read_reg(adev->dm.dc->ctx, address);
1323 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1326 struct amdgpu_device *adev = ctx;
1328 return dm_write_reg(adev->dm.dc->ctx, address, value);
1331 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1333 struct dmub_srv_create_params create_params;
1334 struct dmub_srv_region_params region_params;
1335 struct dmub_srv_region_info region_info;
1336 struct dmub_srv_fb_params fb_params;
1337 struct dmub_srv_fb_info *fb_info;
1338 struct dmub_srv *dmub_srv;
1339 const struct dmcub_firmware_header_v1_0 *hdr;
1340 const char *fw_name_dmub;
1341 enum dmub_asic dmub_asic;
1342 enum dmub_status status;
1345 switch (adev->asic_type) {
1347 dmub_asic = DMUB_ASIC_DCN21;
1348 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1349 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1350 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1352 case CHIP_SIENNA_CICHLID:
1353 dmub_asic = DMUB_ASIC_DCN30;
1354 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1356 case CHIP_NAVY_FLOUNDER:
1357 dmub_asic = DMUB_ASIC_DCN30;
1358 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1361 dmub_asic = DMUB_ASIC_DCN301;
1362 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1364 case CHIP_DIMGREY_CAVEFISH:
1365 dmub_asic = DMUB_ASIC_DCN302;
1366 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1370 /* ASIC doesn't support DMUB. */
1374 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1376 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1380 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1382 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1386 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1388 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1389 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1390 AMDGPU_UCODE_ID_DMCUB;
1391 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1393 adev->firmware.fw_size +=
1394 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1396 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1397 adev->dm.dmcub_fw_version);
1400 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1402 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1403 dmub_srv = adev->dm.dmub_srv;
1406 DRM_ERROR("Failed to allocate DMUB service!\n");
1410 memset(&create_params, 0, sizeof(create_params));
1411 create_params.user_ctx = adev;
1412 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1413 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1414 create_params.asic = dmub_asic;
1416 /* Create the DMUB service. */
1417 status = dmub_srv_create(dmub_srv, &create_params);
1418 if (status != DMUB_STATUS_OK) {
1419 DRM_ERROR("Error creating DMUB service: %d\n", status);
1423 /* Calculate the size of all the regions for the DMUB service. */
1424 memset(®ion_params, 0, sizeof(region_params));
1426 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1427 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1428 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1429 region_params.vbios_size = adev->bios_size;
1430 region_params.fw_bss_data = region_params.bss_data_size ?
1431 adev->dm.dmub_fw->data +
1432 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1433 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1434 region_params.fw_inst_const =
1435 adev->dm.dmub_fw->data +
1436 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1439 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1442 if (status != DMUB_STATUS_OK) {
1443 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1448 * Allocate a framebuffer based on the total size of all the regions.
1449 * TODO: Move this into GART.
1451 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1452 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1453 &adev->dm.dmub_bo_gpu_addr,
1454 &adev->dm.dmub_bo_cpu_addr);
1458 /* Rebase the regions on the framebuffer address. */
1459 memset(&fb_params, 0, sizeof(fb_params));
1460 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1461 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1462 fb_params.region_info = ®ion_info;
1464 adev->dm.dmub_fb_info =
1465 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1466 fb_info = adev->dm.dmub_fb_info;
1470 "Failed to allocate framebuffer info for DMUB service!\n");
1474 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1475 if (status != DMUB_STATUS_OK) {
1476 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1483 static int dm_sw_init(void *handle)
1485 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488 r = dm_dmub_sw_init(adev);
1492 return load_dmcu_fw(adev);
1495 static int dm_sw_fini(void *handle)
1497 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1499 kfree(adev->dm.dmub_fb_info);
1500 adev->dm.dmub_fb_info = NULL;
1502 if (adev->dm.dmub_srv) {
1503 dmub_srv_destroy(adev->dm.dmub_srv);
1504 adev->dm.dmub_srv = NULL;
1507 release_firmware(adev->dm.dmub_fw);
1508 adev->dm.dmub_fw = NULL;
1510 release_firmware(adev->dm.fw_dmcu);
1511 adev->dm.fw_dmcu = NULL;
1516 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1518 struct amdgpu_dm_connector *aconnector;
1519 struct drm_connector *connector;
1520 struct drm_connector_list_iter iter;
1523 drm_connector_list_iter_begin(dev, &iter);
1524 drm_for_each_connector_iter(connector, &iter) {
1525 aconnector = to_amdgpu_dm_connector(connector);
1526 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1527 aconnector->mst_mgr.aux) {
1528 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1530 aconnector->base.base.id);
1532 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1534 DRM_ERROR("DM_MST: Failed to start MST\n");
1535 aconnector->dc_link->type =
1536 dc_connection_single;
1541 drm_connector_list_iter_end(&iter);
1546 static int dm_late_init(void *handle)
1548 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1550 struct dmcu_iram_parameters params;
1551 unsigned int linear_lut[16];
1553 struct dmcu *dmcu = NULL;
1556 dmcu = adev->dm.dc->res_pool->dmcu;
1558 for (i = 0; i < 16; i++)
1559 linear_lut[i] = 0xFFFF * i / 15;
1562 params.backlight_ramping_start = 0xCCCC;
1563 params.backlight_ramping_reduction = 0xCCCCCCCC;
1564 params.backlight_lut_array_size = 16;
1565 params.backlight_lut_array = linear_lut;
1567 /* Min backlight level after ABM reduction, Don't allow below 1%
1568 * 0xFFFF x 0.01 = 0x28F
1570 params.min_abm_backlight = 0x28F;
1572 /* In the case where abm is implemented on dmcub,
1573 * dmcu object will be null.
1574 * ABM 2.4 and up are implemented on dmcub.
1577 ret = dmcu_load_iram(dmcu, params);
1578 else if (adev->dm.dc->ctx->dmub_srv)
1579 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1584 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1587 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1589 struct amdgpu_dm_connector *aconnector;
1590 struct drm_connector *connector;
1591 struct drm_connector_list_iter iter;
1592 struct drm_dp_mst_topology_mgr *mgr;
1594 bool need_hotplug = false;
1596 drm_connector_list_iter_begin(dev, &iter);
1597 drm_for_each_connector_iter(connector, &iter) {
1598 aconnector = to_amdgpu_dm_connector(connector);
1599 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1600 aconnector->mst_port)
1603 mgr = &aconnector->mst_mgr;
1606 drm_dp_mst_topology_mgr_suspend(mgr);
1608 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1610 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1611 need_hotplug = true;
1615 drm_connector_list_iter_end(&iter);
1618 drm_kms_helper_hotplug_event(dev);
1621 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1623 struct smu_context *smu = &adev->smu;
1626 if (!is_support_sw_smu(adev))
1629 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1630 * on window driver dc implementation.
1631 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1632 * should be passed to smu during boot up and resume from s3.
1633 * boot up: dc calculate dcn watermark clock settings within dc_create,
1634 * dcn20_resource_construct
1635 * then call pplib functions below to pass the settings to smu:
1636 * smu_set_watermarks_for_clock_ranges
1637 * smu_set_watermarks_table
1638 * navi10_set_watermarks_table
1639 * smu_write_watermarks_table
1641 * For Renoir, clock settings of dcn watermark are also fixed values.
1642 * dc has implemented different flow for window driver:
1643 * dc_hardware_init / dc_set_power_state
1648 * smu_set_watermarks_for_clock_ranges
1649 * renoir_set_watermarks_table
1650 * smu_write_watermarks_table
1653 * dc_hardware_init -> amdgpu_dm_init
1654 * dc_set_power_state --> dm_resume
1656 * therefore, this function apply to navi10/12/14 but not Renoir
1659 switch(adev->asic_type) {
1668 ret = smu_write_watermarks_table(smu);
1670 DRM_ERROR("Failed to update WMTABLE!\n");
1678 * dm_hw_init() - Initialize DC device
1679 * @handle: The base driver device containing the amdgpu_dm device.
1681 * Initialize the &struct amdgpu_display_manager device. This involves calling
1682 * the initializers of each DM component, then populating the struct with them.
1684 * Although the function implies hardware initialization, both hardware and
1685 * software are initialized here. Splitting them out to their relevant init
1686 * hooks is a future TODO item.
1688 * Some notable things that are initialized here:
1690 * - Display Core, both software and hardware
1691 * - DC modules that we need (freesync and color management)
1692 * - DRM software states
1693 * - Interrupt sources and handlers
1695 * - Debug FS entries, if enabled
1697 static int dm_hw_init(void *handle)
1699 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1700 /* Create DAL display manager */
1701 amdgpu_dm_init(adev);
1702 amdgpu_dm_hpd_init(adev);
1708 * dm_hw_fini() - Teardown DC device
1709 * @handle: The base driver device containing the amdgpu_dm device.
1711 * Teardown components within &struct amdgpu_display_manager that require
1712 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1713 * were loaded. Also flush IRQ workqueues and disable them.
1715 static int dm_hw_fini(void *handle)
1717 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1719 amdgpu_dm_hpd_fini(adev);
1721 amdgpu_dm_irq_fini(adev);
1722 amdgpu_dm_fini(adev);
1727 static int dm_enable_vblank(struct drm_crtc *crtc);
1728 static void dm_disable_vblank(struct drm_crtc *crtc);
1730 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1731 struct dc_state *state, bool enable)
1733 enum dc_irq_source irq_source;
1734 struct amdgpu_crtc *acrtc;
1738 for (i = 0; i < state->stream_count; i++) {
1739 acrtc = get_crtc_by_otg_inst(
1740 adev, state->stream_status[i].primary_otg_inst);
1742 if (acrtc && state->stream_status[i].plane_count != 0) {
1743 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1744 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1745 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1746 acrtc->crtc_id, enable ? "en" : "dis", rc);
1748 DRM_WARN("Failed to %s pflip interrupts\n",
1749 enable ? "enable" : "disable");
1752 rc = dm_enable_vblank(&acrtc->base);
1754 DRM_WARN("Failed to enable vblank interrupts\n");
1756 dm_disable_vblank(&acrtc->base);
1764 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1766 struct dc_state *context = NULL;
1767 enum dc_status res = DC_ERROR_UNEXPECTED;
1769 struct dc_stream_state *del_streams[MAX_PIPES];
1770 int del_streams_count = 0;
1772 memset(del_streams, 0, sizeof(del_streams));
1774 context = dc_create_state(dc);
1775 if (context == NULL)
1776 goto context_alloc_fail;
1778 dc_resource_state_copy_construct_current(dc, context);
1780 /* First remove from context all streams */
1781 for (i = 0; i < context->stream_count; i++) {
1782 struct dc_stream_state *stream = context->streams[i];
1784 del_streams[del_streams_count++] = stream;
1787 /* Remove all planes for removed streams and then remove the streams */
1788 for (i = 0; i < del_streams_count; i++) {
1789 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1790 res = DC_FAIL_DETACH_SURFACES;
1794 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1800 res = dc_validate_global_state(dc, context, false);
1803 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1807 res = dc_commit_state(dc, context);
1810 dc_release_state(context);
1816 static int dm_suspend(void *handle)
1818 struct amdgpu_device *adev = handle;
1819 struct amdgpu_display_manager *dm = &adev->dm;
1822 if (amdgpu_in_reset(adev)) {
1823 mutex_lock(&dm->dc_lock);
1824 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1826 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1828 amdgpu_dm_commit_zero_streams(dm->dc);
1830 amdgpu_dm_irq_suspend(adev);
1835 WARN_ON(adev->dm.cached_state);
1836 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1838 s3_handle_mst(adev_to_drm(adev), true);
1840 amdgpu_dm_irq_suspend(adev);
1843 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1848 static struct amdgpu_dm_connector *
1849 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1850 struct drm_crtc *crtc)
1853 struct drm_connector_state *new_con_state;
1854 struct drm_connector *connector;
1855 struct drm_crtc *crtc_from_state;
1857 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1858 crtc_from_state = new_con_state->crtc;
1860 if (crtc_from_state == crtc)
1861 return to_amdgpu_dm_connector(connector);
1867 static void emulated_link_detect(struct dc_link *link)
1869 struct dc_sink_init_data sink_init_data = { 0 };
1870 struct display_sink_capability sink_caps = { 0 };
1871 enum dc_edid_status edid_status;
1872 struct dc_context *dc_ctx = link->ctx;
1873 struct dc_sink *sink = NULL;
1874 struct dc_sink *prev_sink = NULL;
1876 link->type = dc_connection_none;
1877 prev_sink = link->local_sink;
1879 if (prev_sink != NULL)
1880 dc_sink_retain(prev_sink);
1882 switch (link->connector_signal) {
1883 case SIGNAL_TYPE_HDMI_TYPE_A: {
1884 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1885 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1889 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1890 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1891 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1895 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1896 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1897 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1901 case SIGNAL_TYPE_LVDS: {
1902 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1903 sink_caps.signal = SIGNAL_TYPE_LVDS;
1907 case SIGNAL_TYPE_EDP: {
1908 sink_caps.transaction_type =
1909 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1910 sink_caps.signal = SIGNAL_TYPE_EDP;
1914 case SIGNAL_TYPE_DISPLAY_PORT: {
1915 sink_caps.transaction_type =
1916 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1917 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1922 DC_ERROR("Invalid connector type! signal:%d\n",
1923 link->connector_signal);
1927 sink_init_data.link = link;
1928 sink_init_data.sink_signal = sink_caps.signal;
1930 sink = dc_sink_create(&sink_init_data);
1932 DC_ERROR("Failed to create sink!\n");
1936 /* dc_sink_create returns a new reference */
1937 link->local_sink = sink;
1939 edid_status = dm_helpers_read_local_edid(
1944 if (edid_status != EDID_OK)
1945 DC_ERROR("Failed to read EDID");
1949 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1950 struct amdgpu_display_manager *dm)
1953 struct dc_surface_update surface_updates[MAX_SURFACES];
1954 struct dc_plane_info plane_infos[MAX_SURFACES];
1955 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1956 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1957 struct dc_stream_update stream_update;
1961 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1964 dm_error("Failed to allocate update bundle\n");
1968 for (k = 0; k < dc_state->stream_count; k++) {
1969 bundle->stream_update.stream = dc_state->streams[k];
1971 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1972 bundle->surface_updates[m].surface =
1973 dc_state->stream_status->plane_states[m];
1974 bundle->surface_updates[m].surface->force_full_update =
1977 dc_commit_updates_for_stream(
1978 dm->dc, bundle->surface_updates,
1979 dc_state->stream_status->plane_count,
1980 dc_state->streams[k], &bundle->stream_update, dc_state);
1989 static void dm_set_dpms_off(struct dc_link *link)
1991 struct dc_stream_state *stream_state;
1992 struct amdgpu_dm_connector *aconnector = link->priv;
1993 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1994 struct dc_stream_update stream_update;
1995 bool dpms_off = true;
1997 memset(&stream_update, 0, sizeof(stream_update));
1998 stream_update.dpms_off = &dpms_off;
2000 mutex_lock(&adev->dm.dc_lock);
2001 stream_state = dc_stream_find_from_link(link);
2003 if (stream_state == NULL) {
2004 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2005 mutex_unlock(&adev->dm.dc_lock);
2009 stream_update.stream = stream_state;
2010 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2011 stream_state, &stream_update,
2012 stream_state->ctx->dc->current_state);
2013 mutex_unlock(&adev->dm.dc_lock);
2016 static int dm_resume(void *handle)
2018 struct amdgpu_device *adev = handle;
2019 struct drm_device *ddev = adev_to_drm(adev);
2020 struct amdgpu_display_manager *dm = &adev->dm;
2021 struct amdgpu_dm_connector *aconnector;
2022 struct drm_connector *connector;
2023 struct drm_connector_list_iter iter;
2024 struct drm_crtc *crtc;
2025 struct drm_crtc_state *new_crtc_state;
2026 struct dm_crtc_state *dm_new_crtc_state;
2027 struct drm_plane *plane;
2028 struct drm_plane_state *new_plane_state;
2029 struct dm_plane_state *dm_new_plane_state;
2030 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2031 enum dc_connection_type new_connection_type = dc_connection_none;
2032 struct dc_state *dc_state;
2035 if (amdgpu_in_reset(adev)) {
2036 dc_state = dm->cached_dc_state;
2038 r = dm_dmub_hw_init(adev);
2040 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2042 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2045 amdgpu_dm_irq_resume_early(adev);
2047 for (i = 0; i < dc_state->stream_count; i++) {
2048 dc_state->streams[i]->mode_changed = true;
2049 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2050 dc_state->stream_status->plane_states[j]->update_flags.raw
2055 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2057 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2059 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2061 dc_release_state(dm->cached_dc_state);
2062 dm->cached_dc_state = NULL;
2064 amdgpu_dm_irq_resume_late(adev);
2066 mutex_unlock(&dm->dc_lock);
2070 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2071 dc_release_state(dm_state->context);
2072 dm_state->context = dc_create_state(dm->dc);
2073 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2074 dc_resource_state_construct(dm->dc, dm_state->context);
2076 /* Before powering on DC we need to re-initialize DMUB. */
2077 r = dm_dmub_hw_init(adev);
2079 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2081 /* power on hardware */
2082 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2084 /* program HPD filter */
2088 * early enable HPD Rx IRQ, should be done before set mode as short
2089 * pulse interrupts are used for MST
2091 amdgpu_dm_irq_resume_early(adev);
2093 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2094 s3_handle_mst(ddev, false);
2097 drm_connector_list_iter_begin(ddev, &iter);
2098 drm_for_each_connector_iter(connector, &iter) {
2099 aconnector = to_amdgpu_dm_connector(connector);
2102 * this is the case when traversing through already created
2103 * MST connectors, should be skipped
2105 if (aconnector->mst_port)
2108 mutex_lock(&aconnector->hpd_lock);
2109 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2110 DRM_ERROR("KMS: Failed to detect connector\n");
2112 if (aconnector->base.force && new_connection_type == dc_connection_none)
2113 emulated_link_detect(aconnector->dc_link);
2115 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2117 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2118 aconnector->fake_enable = false;
2120 if (aconnector->dc_sink)
2121 dc_sink_release(aconnector->dc_sink);
2122 aconnector->dc_sink = NULL;
2123 amdgpu_dm_update_connector_after_detect(aconnector);
2124 mutex_unlock(&aconnector->hpd_lock);
2126 drm_connector_list_iter_end(&iter);
2128 /* Force mode set in atomic commit */
2129 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2130 new_crtc_state->active_changed = true;
2133 * atomic_check is expected to create the dc states. We need to release
2134 * them here, since they were duplicated as part of the suspend
2137 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2138 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2139 if (dm_new_crtc_state->stream) {
2140 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2141 dc_stream_release(dm_new_crtc_state->stream);
2142 dm_new_crtc_state->stream = NULL;
2146 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2147 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2148 if (dm_new_plane_state->dc_state) {
2149 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2150 dc_plane_state_release(dm_new_plane_state->dc_state);
2151 dm_new_plane_state->dc_state = NULL;
2155 drm_atomic_helper_resume(ddev, dm->cached_state);
2157 dm->cached_state = NULL;
2159 amdgpu_dm_irq_resume_late(adev);
2161 amdgpu_dm_smu_write_watermarks_table(adev);
2169 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2170 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2171 * the base driver's device list to be initialized and torn down accordingly.
2173 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2176 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2178 .early_init = dm_early_init,
2179 .late_init = dm_late_init,
2180 .sw_init = dm_sw_init,
2181 .sw_fini = dm_sw_fini,
2182 .hw_init = dm_hw_init,
2183 .hw_fini = dm_hw_fini,
2184 .suspend = dm_suspend,
2185 .resume = dm_resume,
2186 .is_idle = dm_is_idle,
2187 .wait_for_idle = dm_wait_for_idle,
2188 .check_soft_reset = dm_check_soft_reset,
2189 .soft_reset = dm_soft_reset,
2190 .set_clockgating_state = dm_set_clockgating_state,
2191 .set_powergating_state = dm_set_powergating_state,
2194 const struct amdgpu_ip_block_version dm_ip_block =
2196 .type = AMD_IP_BLOCK_TYPE_DCE,
2200 .funcs = &amdgpu_dm_funcs,
2210 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2211 .fb_create = amdgpu_display_user_framebuffer_create,
2212 .get_format_info = amd_get_format_info,
2213 .output_poll_changed = drm_fb_helper_output_poll_changed,
2214 .atomic_check = amdgpu_dm_atomic_check,
2215 .atomic_commit = amdgpu_dm_atomic_commit,
2218 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2219 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2222 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2224 u32 max_cll, min_cll, max, min, q, r;
2225 struct amdgpu_dm_backlight_caps *caps;
2226 struct amdgpu_display_manager *dm;
2227 struct drm_connector *conn_base;
2228 struct amdgpu_device *adev;
2229 struct dc_link *link = NULL;
2230 static const u8 pre_computed_values[] = {
2231 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2232 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2234 if (!aconnector || !aconnector->dc_link)
2237 link = aconnector->dc_link;
2238 if (link->connector_signal != SIGNAL_TYPE_EDP)
2241 conn_base = &aconnector->base;
2242 adev = drm_to_adev(conn_base->dev);
2244 caps = &dm->backlight_caps;
2245 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2246 caps->aux_support = false;
2247 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2248 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2250 if (caps->ext_caps->bits.oled == 1 ||
2251 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2252 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2253 caps->aux_support = true;
2255 /* From the specification (CTA-861-G), for calculating the maximum
2256 * luminance we need to use:
2257 * Luminance = 50*2**(CV/32)
2258 * Where CV is a one-byte value.
2259 * For calculating this expression we may need float point precision;
2260 * to avoid this complexity level, we take advantage that CV is divided
2261 * by a constant. From the Euclids division algorithm, we know that CV
2262 * can be written as: CV = 32*q + r. Next, we replace CV in the
2263 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2264 * need to pre-compute the value of r/32. For pre-computing the values
2265 * We just used the following Ruby line:
2266 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2267 * The results of the above expressions can be verified at
2268 * pre_computed_values.
2272 max = (1 << q) * pre_computed_values[r];
2274 // min luminance: maxLum * (CV/255)^2 / 100
2275 q = DIV_ROUND_CLOSEST(min_cll, 255);
2276 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2278 caps->aux_max_input_signal = max;
2279 caps->aux_min_input_signal = min;
2282 void amdgpu_dm_update_connector_after_detect(
2283 struct amdgpu_dm_connector *aconnector)
2285 struct drm_connector *connector = &aconnector->base;
2286 struct drm_device *dev = connector->dev;
2287 struct dc_sink *sink;
2289 /* MST handled by drm_mst framework */
2290 if (aconnector->mst_mgr.mst_state == true)
2293 sink = aconnector->dc_link->local_sink;
2295 dc_sink_retain(sink);
2298 * Edid mgmt connector gets first update only in mode_valid hook and then
2299 * the connector sink is set to either fake or physical sink depends on link status.
2300 * Skip if already done during boot.
2302 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2303 && aconnector->dc_em_sink) {
2306 * For S3 resume with headless use eml_sink to fake stream
2307 * because on resume connector->sink is set to NULL
2309 mutex_lock(&dev->mode_config.mutex);
2312 if (aconnector->dc_sink) {
2313 amdgpu_dm_update_freesync_caps(connector, NULL);
2315 * retain and release below are used to
2316 * bump up refcount for sink because the link doesn't point
2317 * to it anymore after disconnect, so on next crtc to connector
2318 * reshuffle by UMD we will get into unwanted dc_sink release
2320 dc_sink_release(aconnector->dc_sink);
2322 aconnector->dc_sink = sink;
2323 dc_sink_retain(aconnector->dc_sink);
2324 amdgpu_dm_update_freesync_caps(connector,
2327 amdgpu_dm_update_freesync_caps(connector, NULL);
2328 if (!aconnector->dc_sink) {
2329 aconnector->dc_sink = aconnector->dc_em_sink;
2330 dc_sink_retain(aconnector->dc_sink);
2334 mutex_unlock(&dev->mode_config.mutex);
2337 dc_sink_release(sink);
2342 * TODO: temporary guard to look for proper fix
2343 * if this sink is MST sink, we should not do anything
2345 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2346 dc_sink_release(sink);
2350 if (aconnector->dc_sink == sink) {
2352 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2355 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2356 aconnector->connector_id);
2358 dc_sink_release(sink);
2362 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2363 aconnector->connector_id, aconnector->dc_sink, sink);
2365 mutex_lock(&dev->mode_config.mutex);
2368 * 1. Update status of the drm connector
2369 * 2. Send an event and let userspace tell us what to do
2373 * TODO: check if we still need the S3 mode update workaround.
2374 * If yes, put it here.
2376 if (aconnector->dc_sink)
2377 amdgpu_dm_update_freesync_caps(connector, NULL);
2379 aconnector->dc_sink = sink;
2380 dc_sink_retain(aconnector->dc_sink);
2381 if (sink->dc_edid.length == 0) {
2382 aconnector->edid = NULL;
2383 if (aconnector->dc_link->aux_mode) {
2384 drm_dp_cec_unset_edid(
2385 &aconnector->dm_dp_aux.aux);
2389 (struct edid *)sink->dc_edid.raw_edid;
2391 drm_connector_update_edid_property(connector,
2393 drm_add_edid_modes(connector, aconnector->edid);
2395 if (aconnector->dc_link->aux_mode)
2396 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2400 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2401 update_connector_ext_caps(aconnector);
2403 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2404 amdgpu_dm_update_freesync_caps(connector, NULL);
2405 drm_connector_update_edid_property(connector, NULL);
2406 aconnector->num_modes = 0;
2407 dc_sink_release(aconnector->dc_sink);
2408 aconnector->dc_sink = NULL;
2409 aconnector->edid = NULL;
2410 #ifdef CONFIG_DRM_AMD_DC_HDCP
2411 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2412 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2413 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2417 mutex_unlock(&dev->mode_config.mutex);
2419 update_subconnector_property(aconnector);
2422 dc_sink_release(sink);
2425 static void handle_hpd_irq(void *param)
2427 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2428 struct drm_connector *connector = &aconnector->base;
2429 struct drm_device *dev = connector->dev;
2430 enum dc_connection_type new_connection_type = dc_connection_none;
2431 #ifdef CONFIG_DRM_AMD_DC_HDCP
2432 struct amdgpu_device *adev = drm_to_adev(dev);
2433 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2437 * In case of failure or MST no need to update connector status or notify the OS
2438 * since (for MST case) MST does this in its own context.
2440 mutex_lock(&aconnector->hpd_lock);
2442 #ifdef CONFIG_DRM_AMD_DC_HDCP
2443 if (adev->dm.hdcp_workqueue) {
2444 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2445 dm_con_state->update_hdcp = true;
2448 if (aconnector->fake_enable)
2449 aconnector->fake_enable = false;
2451 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2452 DRM_ERROR("KMS: Failed to detect connector\n");
2454 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2455 emulated_link_detect(aconnector->dc_link);
2458 drm_modeset_lock_all(dev);
2459 dm_restore_drm_connector_state(dev, connector);
2460 drm_modeset_unlock_all(dev);
2462 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2463 drm_kms_helper_hotplug_event(dev);
2465 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2466 if (new_connection_type == dc_connection_none &&
2467 aconnector->dc_link->type == dc_connection_none)
2468 dm_set_dpms_off(aconnector->dc_link);
2470 amdgpu_dm_update_connector_after_detect(aconnector);
2472 drm_modeset_lock_all(dev);
2473 dm_restore_drm_connector_state(dev, connector);
2474 drm_modeset_unlock_all(dev);
2476 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2477 drm_kms_helper_hotplug_event(dev);
2479 mutex_unlock(&aconnector->hpd_lock);
2483 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2485 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2487 bool new_irq_handled = false;
2489 int dpcd_bytes_to_read;
2491 const int max_process_count = 30;
2492 int process_count = 0;
2494 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2496 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2497 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2498 /* DPCD 0x200 - 0x201 for downstream IRQ */
2499 dpcd_addr = DP_SINK_COUNT;
2501 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2502 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2503 dpcd_addr = DP_SINK_COUNT_ESI;
2506 dret = drm_dp_dpcd_read(
2507 &aconnector->dm_dp_aux.aux,
2510 dpcd_bytes_to_read);
2512 while (dret == dpcd_bytes_to_read &&
2513 process_count < max_process_count) {
2519 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2520 /* handle HPD short pulse irq */
2521 if (aconnector->mst_mgr.mst_state)
2523 &aconnector->mst_mgr,
2527 if (new_irq_handled) {
2528 /* ACK at DPCD to notify down stream */
2529 const int ack_dpcd_bytes_to_write =
2530 dpcd_bytes_to_read - 1;
2532 for (retry = 0; retry < 3; retry++) {
2535 wret = drm_dp_dpcd_write(
2536 &aconnector->dm_dp_aux.aux,
2539 ack_dpcd_bytes_to_write);
2540 if (wret == ack_dpcd_bytes_to_write)
2544 /* check if there is new irq to be handled */
2545 dret = drm_dp_dpcd_read(
2546 &aconnector->dm_dp_aux.aux,
2549 dpcd_bytes_to_read);
2551 new_irq_handled = false;
2557 if (process_count == max_process_count)
2558 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2561 static void handle_hpd_rx_irq(void *param)
2563 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2564 struct drm_connector *connector = &aconnector->base;
2565 struct drm_device *dev = connector->dev;
2566 struct dc_link *dc_link = aconnector->dc_link;
2567 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2568 bool result = false;
2569 enum dc_connection_type new_connection_type = dc_connection_none;
2570 struct amdgpu_device *adev = drm_to_adev(dev);
2571 union hpd_irq_data hpd_irq_data;
2573 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2576 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2577 * conflict, after implement i2c helper, this mutex should be
2580 if (dc_link->type != dc_connection_mst_branch)
2581 mutex_lock(&aconnector->hpd_lock);
2583 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2585 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2586 (dc_link->type == dc_connection_mst_branch)) {
2587 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2589 dm_handle_hpd_rx_irq(aconnector);
2591 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2593 dm_handle_hpd_rx_irq(aconnector);
2598 mutex_lock(&adev->dm.dc_lock);
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2602 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2604 mutex_unlock(&adev->dm.dc_lock);
2607 if (result && !is_mst_root_connector) {
2608 /* Downstream Port status changed. */
2609 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2610 DRM_ERROR("KMS: Failed to detect connector\n");
2612 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2613 emulated_link_detect(dc_link);
2615 if (aconnector->fake_enable)
2616 aconnector->fake_enable = false;
2618 amdgpu_dm_update_connector_after_detect(aconnector);
2621 drm_modeset_lock_all(dev);
2622 dm_restore_drm_connector_state(dev, connector);
2623 drm_modeset_unlock_all(dev);
2625 drm_kms_helper_hotplug_event(dev);
2626 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2628 if (aconnector->fake_enable)
2629 aconnector->fake_enable = false;
2631 amdgpu_dm_update_connector_after_detect(aconnector);
2634 drm_modeset_lock_all(dev);
2635 dm_restore_drm_connector_state(dev, connector);
2636 drm_modeset_unlock_all(dev);
2638 drm_kms_helper_hotplug_event(dev);
2641 #ifdef CONFIG_DRM_AMD_DC_HDCP
2642 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2643 if (adev->dm.hdcp_workqueue)
2644 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2648 if (dc_link->type != dc_connection_mst_branch) {
2649 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2650 mutex_unlock(&aconnector->hpd_lock);
2654 static void register_hpd_handlers(struct amdgpu_device *adev)
2656 struct drm_device *dev = adev_to_drm(adev);
2657 struct drm_connector *connector;
2658 struct amdgpu_dm_connector *aconnector;
2659 const struct dc_link *dc_link;
2660 struct dc_interrupt_params int_params = {0};
2662 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2663 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2665 list_for_each_entry(connector,
2666 &dev->mode_config.connector_list, head) {
2668 aconnector = to_amdgpu_dm_connector(connector);
2669 dc_link = aconnector->dc_link;
2671 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2672 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2673 int_params.irq_source = dc_link->irq_source_hpd;
2675 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2677 (void *) aconnector);
2680 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2682 /* Also register for DP short pulse (hpd_rx). */
2683 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2684 int_params.irq_source = dc_link->irq_source_hpd_rx;
2686 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2688 (void *) aconnector);
2693 #if defined(CONFIG_DRM_AMD_DC_SI)
2694 /* Register IRQ sources and initialize IRQ callbacks */
2695 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2697 struct dc *dc = adev->dm.dc;
2698 struct common_irq_params *c_irq_params;
2699 struct dc_interrupt_params int_params = {0};
2702 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2704 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2705 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2708 * Actions of amdgpu_irq_add_id():
2709 * 1. Register a set() function with base driver.
2710 * Base driver will call set() function to enable/disable an
2711 * interrupt in DC hardware.
2712 * 2. Register amdgpu_dm_irq_handler().
2713 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2714 * coming from DC hardware.
2715 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2716 * for acknowledging and handling. */
2718 /* Use VBLANK interrupt */
2719 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2720 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2722 DRM_ERROR("Failed to add crtc irq id!\n");
2726 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2727 int_params.irq_source =
2728 dc_interrupt_to_irq_source(dc, i+1 , 0);
2730 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2732 c_irq_params->adev = adev;
2733 c_irq_params->irq_src = int_params.irq_source;
2735 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2736 dm_crtc_high_irq, c_irq_params);
2739 /* Use GRPH_PFLIP interrupt */
2740 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2741 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2742 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2744 DRM_ERROR("Failed to add page flip irq id!\n");
2748 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2749 int_params.irq_source =
2750 dc_interrupt_to_irq_source(dc, i, 0);
2752 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2754 c_irq_params->adev = adev;
2755 c_irq_params->irq_src = int_params.irq_source;
2757 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2758 dm_pflip_high_irq, c_irq_params);
2763 r = amdgpu_irq_add_id(adev, client_id,
2764 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2766 DRM_ERROR("Failed to add hpd irq id!\n");
2770 register_hpd_handlers(adev);
2776 /* Register IRQ sources and initialize IRQ callbacks */
2777 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2779 struct dc *dc = adev->dm.dc;
2780 struct common_irq_params *c_irq_params;
2781 struct dc_interrupt_params int_params = {0};
2784 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2786 if (adev->asic_type >= CHIP_VEGA10)
2787 client_id = SOC15_IH_CLIENTID_DCE;
2789 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2790 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2793 * Actions of amdgpu_irq_add_id():
2794 * 1. Register a set() function with base driver.
2795 * Base driver will call set() function to enable/disable an
2796 * interrupt in DC hardware.
2797 * 2. Register amdgpu_dm_irq_handler().
2798 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2799 * coming from DC hardware.
2800 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2801 * for acknowledging and handling. */
2803 /* Use VBLANK interrupt */
2804 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2805 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2807 DRM_ERROR("Failed to add crtc irq id!\n");
2811 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2812 int_params.irq_source =
2813 dc_interrupt_to_irq_source(dc, i, 0);
2815 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2817 c_irq_params->adev = adev;
2818 c_irq_params->irq_src = int_params.irq_source;
2820 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2821 dm_crtc_high_irq, c_irq_params);
2824 /* Use VUPDATE interrupt */
2825 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2826 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2828 DRM_ERROR("Failed to add vupdate irq id!\n");
2832 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2833 int_params.irq_source =
2834 dc_interrupt_to_irq_source(dc, i, 0);
2836 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2838 c_irq_params->adev = adev;
2839 c_irq_params->irq_src = int_params.irq_source;
2841 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2842 dm_vupdate_high_irq, c_irq_params);
2845 /* Use GRPH_PFLIP interrupt */
2846 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2847 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2848 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2850 DRM_ERROR("Failed to add page flip irq id!\n");
2854 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2855 int_params.irq_source =
2856 dc_interrupt_to_irq_source(dc, i, 0);
2858 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2860 c_irq_params->adev = adev;
2861 c_irq_params->irq_src = int_params.irq_source;
2863 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2864 dm_pflip_high_irq, c_irq_params);
2869 r = amdgpu_irq_add_id(adev, client_id,
2870 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2872 DRM_ERROR("Failed to add hpd irq id!\n");
2876 register_hpd_handlers(adev);
2881 #if defined(CONFIG_DRM_AMD_DC_DCN)
2882 /* Register IRQ sources and initialize IRQ callbacks */
2883 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2885 struct dc *dc = adev->dm.dc;
2886 struct common_irq_params *c_irq_params;
2887 struct dc_interrupt_params int_params = {0};
2891 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2892 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2895 * Actions of amdgpu_irq_add_id():
2896 * 1. Register a set() function with base driver.
2897 * Base driver will call set() function to enable/disable an
2898 * interrupt in DC hardware.
2899 * 2. Register amdgpu_dm_irq_handler().
2900 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2901 * coming from DC hardware.
2902 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2903 * for acknowledging and handling.
2906 /* Use VSTARTUP interrupt */
2907 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2908 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2910 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2913 DRM_ERROR("Failed to add crtc irq id!\n");
2917 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2918 int_params.irq_source =
2919 dc_interrupt_to_irq_source(dc, i, 0);
2921 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2923 c_irq_params->adev = adev;
2924 c_irq_params->irq_src = int_params.irq_source;
2926 amdgpu_dm_irq_register_interrupt(
2927 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2930 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2931 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2932 * to trigger at end of each vblank, regardless of state of the lock,
2933 * matching DCE behaviour.
2935 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2936 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2938 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2941 DRM_ERROR("Failed to add vupdate irq id!\n");
2945 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2946 int_params.irq_source =
2947 dc_interrupt_to_irq_source(dc, i, 0);
2949 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2951 c_irq_params->adev = adev;
2952 c_irq_params->irq_src = int_params.irq_source;
2954 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2955 dm_vupdate_high_irq, c_irq_params);
2958 /* Use GRPH_PFLIP interrupt */
2959 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2960 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2962 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2964 DRM_ERROR("Failed to add page flip irq id!\n");
2968 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2969 int_params.irq_source =
2970 dc_interrupt_to_irq_source(dc, i, 0);
2972 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2974 c_irq_params->adev = adev;
2975 c_irq_params->irq_src = int_params.irq_source;
2977 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2978 dm_pflip_high_irq, c_irq_params);
2983 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2986 DRM_ERROR("Failed to add hpd irq id!\n");
2990 register_hpd_handlers(adev);
2997 * Acquires the lock for the atomic state object and returns
2998 * the new atomic state.
3000 * This should only be called during atomic check.
3002 static int dm_atomic_get_state(struct drm_atomic_state *state,
3003 struct dm_atomic_state **dm_state)
3005 struct drm_device *dev = state->dev;
3006 struct amdgpu_device *adev = drm_to_adev(dev);
3007 struct amdgpu_display_manager *dm = &adev->dm;
3008 struct drm_private_state *priv_state;
3013 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3014 if (IS_ERR(priv_state))
3015 return PTR_ERR(priv_state);
3017 *dm_state = to_dm_atomic_state(priv_state);
3022 static struct dm_atomic_state *
3023 dm_atomic_get_new_state(struct drm_atomic_state *state)
3025 struct drm_device *dev = state->dev;
3026 struct amdgpu_device *adev = drm_to_adev(dev);
3027 struct amdgpu_display_manager *dm = &adev->dm;
3028 struct drm_private_obj *obj;
3029 struct drm_private_state *new_obj_state;
3032 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3033 if (obj->funcs == dm->atomic_obj.funcs)
3034 return to_dm_atomic_state(new_obj_state);
3040 static struct drm_private_state *
3041 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3043 struct dm_atomic_state *old_state, *new_state;
3045 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3049 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3051 old_state = to_dm_atomic_state(obj->state);
3053 if (old_state && old_state->context)
3054 new_state->context = dc_copy_state(old_state->context);
3056 if (!new_state->context) {
3061 return &new_state->base;
3064 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3065 struct drm_private_state *state)
3067 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3069 if (dm_state && dm_state->context)
3070 dc_release_state(dm_state->context);
3075 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3076 .atomic_duplicate_state = dm_atomic_duplicate_state,
3077 .atomic_destroy_state = dm_atomic_destroy_state,
3080 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3082 struct dm_atomic_state *state;
3085 adev->mode_info.mode_config_initialized = true;
3087 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3088 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3090 adev_to_drm(adev)->mode_config.max_width = 16384;
3091 adev_to_drm(adev)->mode_config.max_height = 16384;
3093 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3094 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3095 /* indicates support for immediate flip */
3096 adev_to_drm(adev)->mode_config.async_page_flip = true;
3098 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3100 state = kzalloc(sizeof(*state), GFP_KERNEL);
3104 state->context = dc_create_state(adev->dm.dc);
3105 if (!state->context) {
3110 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3112 drm_atomic_private_obj_init(adev_to_drm(adev),
3113 &adev->dm.atomic_obj,
3115 &dm_atomic_state_funcs);
3117 r = amdgpu_display_modeset_create_props(adev);
3119 dc_release_state(state->context);
3124 r = amdgpu_dm_audio_init(adev);
3126 dc_release_state(state->context);
3134 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3135 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3136 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3138 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3139 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3141 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3143 #if defined(CONFIG_ACPI)
3144 struct amdgpu_dm_backlight_caps caps;
3146 memset(&caps, 0, sizeof(caps));
3148 if (dm->backlight_caps.caps_valid)
3151 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3152 if (caps.caps_valid) {
3153 dm->backlight_caps.caps_valid = true;
3154 if (caps.aux_support)
3156 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3157 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3159 dm->backlight_caps.min_input_signal =
3160 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3161 dm->backlight_caps.max_input_signal =
3162 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3165 if (dm->backlight_caps.aux_support)
3168 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3169 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3173 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3180 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3181 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3186 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3187 unsigned *min, unsigned *max)
3192 if (caps->aux_support) {
3193 // Firmware limits are in nits, DC API wants millinits.
3194 *max = 1000 * caps->aux_max_input_signal;
3195 *min = 1000 * caps->aux_min_input_signal;
3197 // Firmware limits are 8-bit, PWM control is 16-bit.
3198 *max = 0x101 * caps->max_input_signal;
3199 *min = 0x101 * caps->min_input_signal;
3204 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3205 uint32_t brightness)
3209 if (!get_brightness_range(caps, &min, &max))
3212 // Rescale 0..255 to min..max
3213 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3214 AMDGPU_MAX_BL_LEVEL);
3217 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3218 uint32_t brightness)
3222 if (!get_brightness_range(caps, &min, &max))
3225 if (brightness < min)
3227 // Rescale min..max to 0..255
3228 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3232 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3234 struct amdgpu_display_manager *dm = bl_get_data(bd);
3235 struct amdgpu_dm_backlight_caps caps;
3236 struct dc_link *link = NULL;
3240 amdgpu_dm_update_backlight_caps(dm);
3241 caps = dm->backlight_caps;
3243 link = (struct dc_link *)dm->backlight_link;
3245 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3246 // Change brightness based on AUX property
3247 if (caps.aux_support)
3248 return set_backlight_via_aux(link, brightness);
3250 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3255 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3257 struct amdgpu_display_manager *dm = bl_get_data(bd);
3258 int ret = dc_link_get_backlight_level(dm->backlight_link);
3260 if (ret == DC_ERROR_UNEXPECTED)
3261 return bd->props.brightness;
3262 return convert_brightness_to_user(&dm->backlight_caps, ret);
3265 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3266 .options = BL_CORE_SUSPENDRESUME,
3267 .get_brightness = amdgpu_dm_backlight_get_brightness,
3268 .update_status = amdgpu_dm_backlight_update_status,
3272 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3275 struct backlight_properties props = { 0 };
3277 amdgpu_dm_update_backlight_caps(dm);
3279 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3280 props.brightness = AMDGPU_MAX_BL_LEVEL;
3281 props.type = BACKLIGHT_RAW;
3283 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3284 adev_to_drm(dm->adev)->primary->index);
3286 dm->backlight_dev = backlight_device_register(bl_name,
3287 adev_to_drm(dm->adev)->dev,
3289 &amdgpu_dm_backlight_ops,
3292 if (IS_ERR(dm->backlight_dev))
3293 DRM_ERROR("DM: Backlight registration failed!\n");
3295 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3300 static int initialize_plane(struct amdgpu_display_manager *dm,
3301 struct amdgpu_mode_info *mode_info, int plane_id,
3302 enum drm_plane_type plane_type,
3303 const struct dc_plane_cap *plane_cap)
3305 struct drm_plane *plane;
3306 unsigned long possible_crtcs;
3309 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3311 DRM_ERROR("KMS: Failed to allocate plane\n");
3314 plane->type = plane_type;
3317 * HACK: IGT tests expect that the primary plane for a CRTC
3318 * can only have one possible CRTC. Only expose support for
3319 * any CRTC if they're not going to be used as a primary plane
3320 * for a CRTC - like overlay or underlay planes.
3322 possible_crtcs = 1 << plane_id;
3323 if (plane_id >= dm->dc->caps.max_streams)
3324 possible_crtcs = 0xff;
3326 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3329 DRM_ERROR("KMS: Failed to initialize plane\n");
3335 mode_info->planes[plane_id] = plane;
3341 static void register_backlight_device(struct amdgpu_display_manager *dm,
3342 struct dc_link *link)
3344 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3345 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3347 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3348 link->type != dc_connection_none) {
3350 * Event if registration failed, we should continue with
3351 * DM initialization because not having a backlight control
3352 * is better then a black screen.
3354 amdgpu_dm_register_backlight_device(dm);
3356 if (dm->backlight_dev)
3357 dm->backlight_link = link;
3364 * In this architecture, the association
3365 * connector -> encoder -> crtc
3366 * id not really requried. The crtc and connector will hold the
3367 * display_index as an abstraction to use with DAL component
3369 * Returns 0 on success
3371 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3373 struct amdgpu_display_manager *dm = &adev->dm;
3375 struct amdgpu_dm_connector *aconnector = NULL;
3376 struct amdgpu_encoder *aencoder = NULL;
3377 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3379 int32_t primary_planes;
3380 enum dc_connection_type new_connection_type = dc_connection_none;
3381 const struct dc_plane_cap *plane;
3383 dm->display_indexes_num = dm->dc->caps.max_streams;
3384 /* Update the actual used number of crtc */
3385 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3387 link_cnt = dm->dc->caps.max_links;
3388 if (amdgpu_dm_mode_config_init(dm->adev)) {
3389 DRM_ERROR("DM: Failed to initialize mode config\n");
3393 /* There is one primary plane per CRTC */
3394 primary_planes = dm->dc->caps.max_streams;
3395 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3398 * Initialize primary planes, implicit planes for legacy IOCTLS.
3399 * Order is reversed to match iteration order in atomic check.
3401 for (i = (primary_planes - 1); i >= 0; i--) {
3402 plane = &dm->dc->caps.planes[i];
3404 if (initialize_plane(dm, mode_info, i,
3405 DRM_PLANE_TYPE_PRIMARY, plane)) {
3406 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3412 * Initialize overlay planes, index starting after primary planes.
3413 * These planes have a higher DRM index than the primary planes since
3414 * they should be considered as having a higher z-order.
3415 * Order is reversed to match iteration order in atomic check.
3417 * Only support DCN for now, and only expose one so we don't encourage
3418 * userspace to use up all the pipes.
3420 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3421 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3423 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3426 if (!plane->blends_with_above || !plane->blends_with_below)
3429 if (!plane->pixel_format_support.argb8888)
3432 if (initialize_plane(dm, NULL, primary_planes + i,
3433 DRM_PLANE_TYPE_OVERLAY, plane)) {
3434 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3438 /* Only create one overlay plane. */
3442 for (i = 0; i < dm->dc->caps.max_streams; i++)
3443 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3444 DRM_ERROR("KMS: Failed to initialize crtc\n");
3448 /* loops over all connectors on the board */
3449 for (i = 0; i < link_cnt; i++) {
3450 struct dc_link *link = NULL;
3452 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3454 "KMS: Cannot support more than %d display indexes\n",
3455 AMDGPU_DM_MAX_DISPLAY_INDEX);
3459 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3463 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3467 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3468 DRM_ERROR("KMS: Failed to initialize encoder\n");
3472 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3473 DRM_ERROR("KMS: Failed to initialize connector\n");
3477 link = dc_get_link_at_index(dm->dc, i);
3479 if (!dc_link_detect_sink(link, &new_connection_type))
3480 DRM_ERROR("KMS: Failed to detect connector\n");
3482 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3483 emulated_link_detect(link);
3484 amdgpu_dm_update_connector_after_detect(aconnector);
3486 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3487 amdgpu_dm_update_connector_after_detect(aconnector);
3488 register_backlight_device(dm, link);
3489 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3490 amdgpu_dm_set_psr_caps(link);
3496 /* Software is initialized. Now we can register interrupt handlers. */
3497 switch (adev->asic_type) {
3498 #if defined(CONFIG_DRM_AMD_DC_SI)
3503 if (dce60_register_irq_handlers(dm->adev)) {
3504 DRM_ERROR("DM: Failed to initialize IRQ\n");
3518 case CHIP_POLARIS11:
3519 case CHIP_POLARIS10:
3520 case CHIP_POLARIS12:
3525 if (dce110_register_irq_handlers(dm->adev)) {
3526 DRM_ERROR("DM: Failed to initialize IRQ\n");
3530 #if defined(CONFIG_DRM_AMD_DC_DCN)
3536 case CHIP_SIENNA_CICHLID:
3537 case CHIP_NAVY_FLOUNDER:
3538 case CHIP_DIMGREY_CAVEFISH:
3540 if (dcn10_register_irq_handlers(dm->adev)) {
3541 DRM_ERROR("DM: Failed to initialize IRQ\n");
3547 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3559 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3561 drm_mode_config_cleanup(dm->ddev);
3562 drm_atomic_private_obj_fini(&dm->atomic_obj);
3566 /******************************************************************************
3567 * amdgpu_display_funcs functions
3568 *****************************************************************************/
3571 * dm_bandwidth_update - program display watermarks
3573 * @adev: amdgpu_device pointer
3575 * Calculate and program the display watermarks and line buffer allocation.
3577 static void dm_bandwidth_update(struct amdgpu_device *adev)
3579 /* TODO: implement later */
3582 static const struct amdgpu_display_funcs dm_display_funcs = {
3583 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3584 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3585 .backlight_set_level = NULL, /* never called for DC */
3586 .backlight_get_level = NULL, /* never called for DC */
3587 .hpd_sense = NULL,/* called unconditionally */
3588 .hpd_set_polarity = NULL, /* called unconditionally */
3589 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3590 .page_flip_get_scanoutpos =
3591 dm_crtc_get_scanoutpos,/* called unconditionally */
3592 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3593 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3596 #if defined(CONFIG_DEBUG_KERNEL_DC)
3598 static ssize_t s3_debug_store(struct device *device,
3599 struct device_attribute *attr,
3605 struct drm_device *drm_dev = dev_get_drvdata(device);
3606 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3608 ret = kstrtoint(buf, 0, &s3_state);
3613 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3618 return ret == 0 ? count : 0;
3621 DEVICE_ATTR_WO(s3_debug);
3625 static int dm_early_init(void *handle)
3627 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3629 switch (adev->asic_type) {
3630 #if defined(CONFIG_DRM_AMD_DC_SI)
3634 adev->mode_info.num_crtc = 6;
3635 adev->mode_info.num_hpd = 6;
3636 adev->mode_info.num_dig = 6;
3639 adev->mode_info.num_crtc = 2;
3640 adev->mode_info.num_hpd = 2;
3641 adev->mode_info.num_dig = 2;
3646 adev->mode_info.num_crtc = 6;
3647 adev->mode_info.num_hpd = 6;
3648 adev->mode_info.num_dig = 6;
3651 adev->mode_info.num_crtc = 4;
3652 adev->mode_info.num_hpd = 6;
3653 adev->mode_info.num_dig = 7;
3657 adev->mode_info.num_crtc = 2;
3658 adev->mode_info.num_hpd = 6;
3659 adev->mode_info.num_dig = 6;
3663 adev->mode_info.num_crtc = 6;
3664 adev->mode_info.num_hpd = 6;
3665 adev->mode_info.num_dig = 7;
3668 adev->mode_info.num_crtc = 3;
3669 adev->mode_info.num_hpd = 6;
3670 adev->mode_info.num_dig = 9;
3673 adev->mode_info.num_crtc = 2;
3674 adev->mode_info.num_hpd = 6;
3675 adev->mode_info.num_dig = 9;
3677 case CHIP_POLARIS11:
3678 case CHIP_POLARIS12:
3679 adev->mode_info.num_crtc = 5;
3680 adev->mode_info.num_hpd = 5;
3681 adev->mode_info.num_dig = 5;
3683 case CHIP_POLARIS10:
3685 adev->mode_info.num_crtc = 6;
3686 adev->mode_info.num_hpd = 6;
3687 adev->mode_info.num_dig = 6;
3692 adev->mode_info.num_crtc = 6;
3693 adev->mode_info.num_hpd = 6;
3694 adev->mode_info.num_dig = 6;
3696 #if defined(CONFIG_DRM_AMD_DC_DCN)
3700 adev->mode_info.num_crtc = 4;
3701 adev->mode_info.num_hpd = 4;
3702 adev->mode_info.num_dig = 4;
3706 case CHIP_SIENNA_CICHLID:
3707 case CHIP_NAVY_FLOUNDER:
3708 adev->mode_info.num_crtc = 6;
3709 adev->mode_info.num_hpd = 6;
3710 adev->mode_info.num_dig = 6;
3713 case CHIP_DIMGREY_CAVEFISH:
3714 adev->mode_info.num_crtc = 5;
3715 adev->mode_info.num_hpd = 5;
3716 adev->mode_info.num_dig = 5;
3720 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3724 amdgpu_dm_set_irq_funcs(adev);
3726 if (adev->mode_info.funcs == NULL)
3727 adev->mode_info.funcs = &dm_display_funcs;
3730 * Note: Do NOT change adev->audio_endpt_rreg and
3731 * adev->audio_endpt_wreg because they are initialised in
3732 * amdgpu_device_init()
3734 #if defined(CONFIG_DEBUG_KERNEL_DC)
3736 adev_to_drm(adev)->dev,
3737 &dev_attr_s3_debug);
3743 static bool modeset_required(struct drm_crtc_state *crtc_state,
3744 struct dc_stream_state *new_stream,
3745 struct dc_stream_state *old_stream)
3747 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3750 static bool modereset_required(struct drm_crtc_state *crtc_state)
3752 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3755 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3757 drm_encoder_cleanup(encoder);
3761 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3762 .destroy = amdgpu_dm_encoder_destroy,
3766 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3767 struct dc_scaling_info *scaling_info)
3769 int scale_w, scale_h;
3771 memset(scaling_info, 0, sizeof(*scaling_info));
3773 /* Source is fixed 16.16 but we ignore mantissa for now... */
3774 scaling_info->src_rect.x = state->src_x >> 16;
3775 scaling_info->src_rect.y = state->src_y >> 16;
3777 scaling_info->src_rect.width = state->src_w >> 16;
3778 if (scaling_info->src_rect.width == 0)
3781 scaling_info->src_rect.height = state->src_h >> 16;
3782 if (scaling_info->src_rect.height == 0)
3785 scaling_info->dst_rect.x = state->crtc_x;
3786 scaling_info->dst_rect.y = state->crtc_y;
3788 if (state->crtc_w == 0)
3791 scaling_info->dst_rect.width = state->crtc_w;
3793 if (state->crtc_h == 0)
3796 scaling_info->dst_rect.height = state->crtc_h;
3798 /* DRM doesn't specify clipping on destination output. */
3799 scaling_info->clip_rect = scaling_info->dst_rect;
3801 /* TODO: Validate scaling per-format with DC plane caps */
3802 scale_w = scaling_info->dst_rect.width * 1000 /
3803 scaling_info->src_rect.width;
3805 if (scale_w < 250 || scale_w > 16000)
3808 scale_h = scaling_info->dst_rect.height * 1000 /
3809 scaling_info->src_rect.height;
3811 if (scale_h < 250 || scale_h > 16000)
3815 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3816 * assume reasonable defaults based on the format.
3823 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3824 uint64_t tiling_flags)
3826 /* Fill GFX8 params */
3827 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3828 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3830 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3831 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3832 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3833 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3834 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3836 /* XXX fix me for VI */
3837 tiling_info->gfx8.num_banks = num_banks;
3838 tiling_info->gfx8.array_mode =
3839 DC_ARRAY_2D_TILED_THIN1;
3840 tiling_info->gfx8.tile_split = tile_split;
3841 tiling_info->gfx8.bank_width = bankw;
3842 tiling_info->gfx8.bank_height = bankh;
3843 tiling_info->gfx8.tile_aspect = mtaspect;
3844 tiling_info->gfx8.tile_mode =
3845 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3846 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3847 == DC_ARRAY_1D_TILED_THIN1) {
3848 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3851 tiling_info->gfx8.pipe_config =
3852 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3856 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3857 union dc_tiling_info *tiling_info)
3859 tiling_info->gfx9.num_pipes =
3860 adev->gfx.config.gb_addr_config_fields.num_pipes;
3861 tiling_info->gfx9.num_banks =
3862 adev->gfx.config.gb_addr_config_fields.num_banks;
3863 tiling_info->gfx9.pipe_interleave =
3864 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3865 tiling_info->gfx9.num_shader_engines =
3866 adev->gfx.config.gb_addr_config_fields.num_se;
3867 tiling_info->gfx9.max_compressed_frags =
3868 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3869 tiling_info->gfx9.num_rb_per_se =
3870 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3871 tiling_info->gfx9.shaderEnable = 1;
3872 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3873 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3874 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3875 adev->asic_type == CHIP_VANGOGH)
3876 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3880 validate_dcc(struct amdgpu_device *adev,
3881 const enum surface_pixel_format format,
3882 const enum dc_rotation_angle rotation,
3883 const union dc_tiling_info *tiling_info,
3884 const struct dc_plane_dcc_param *dcc,
3885 const struct dc_plane_address *address,
3886 const struct plane_size *plane_size)
3888 struct dc *dc = adev->dm.dc;
3889 struct dc_dcc_surface_param input;
3890 struct dc_surface_dcc_cap output;
3892 memset(&input, 0, sizeof(input));
3893 memset(&output, 0, sizeof(output));
3898 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3899 !dc->cap_funcs.get_dcc_compression_cap)
3902 input.format = format;
3903 input.surface_size.width = plane_size->surface_size.width;
3904 input.surface_size.height = plane_size->surface_size.height;
3905 input.swizzle_mode = tiling_info->gfx9.swizzle;
3907 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3908 input.scan = SCAN_DIRECTION_HORIZONTAL;
3909 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3910 input.scan = SCAN_DIRECTION_VERTICAL;
3912 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3915 if (!output.capable)
3918 if (dcc->independent_64b_blks == 0 &&
3919 output.grph.rgb.independent_64b_blks != 0)
3926 modifier_has_dcc(uint64_t modifier)
3928 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3932 modifier_gfx9_swizzle_mode(uint64_t modifier)
3934 if (modifier == DRM_FORMAT_MOD_LINEAR)
3937 return AMD_FMT_MOD_GET(TILE, modifier);
3940 static const struct drm_format_info *
3941 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3943 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3947 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3948 union dc_tiling_info *tiling_info,
3951 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3952 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3953 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3954 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3956 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3958 if (!IS_AMD_FMT_MOD(modifier))
3961 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3962 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3964 if (adev->family >= AMDGPU_FAMILY_NV) {
3965 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3967 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3969 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3973 enum dm_micro_swizzle {
3974 MICRO_SWIZZLE_Z = 0,
3975 MICRO_SWIZZLE_S = 1,
3976 MICRO_SWIZZLE_D = 2,
3980 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3984 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3985 const struct drm_format_info *info = drm_format_info(format);
3987 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3993 * We always have to allow this modifier, because core DRM still
3994 * checks LINEAR support if userspace does not provide modifers.
3996 if (modifier == DRM_FORMAT_MOD_LINEAR)
4000 * The arbitrary tiling support for multiplane formats has not been hooked
4003 if (info->num_planes > 1)
4007 * For D swizzle the canonical modifier depends on the bpp, so check
4010 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4011 adev->family >= AMDGPU_FAMILY_NV) {
4012 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4016 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4020 if (modifier_has_dcc(modifier)) {
4021 /* Per radeonsi comments 16/64 bpp are more complicated. */
4022 if (info->cpp[0] != 4)
4030 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4035 if (*cap - *size < 1) {
4036 uint64_t new_cap = *cap * 2;
4037 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4045 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4051 (*mods)[*size] = mod;
4056 add_gfx9_modifiers(const struct amdgpu_device *adev,
4057 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4059 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4060 int pipe_xor_bits = min(8, pipes +
4061 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4062 int bank_xor_bits = min(8 - pipe_xor_bits,
4063 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4064 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4065 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4068 if (adev->family == AMDGPU_FAMILY_RV) {
4069 /* Raven2 and later */
4070 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4073 * No _D DCC swizzles yet because we only allow 32bpp, which
4074 * doesn't support _D on DCN
4077 if (has_constant_encode) {
4078 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4079 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4080 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4081 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4082 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4083 AMD_FMT_MOD_SET(DCC, 1) |
4084 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4085 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4086 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4089 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4090 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4091 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4092 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4093 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4094 AMD_FMT_MOD_SET(DCC, 1) |
4095 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4096 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4097 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4099 if (has_constant_encode) {
4100 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4101 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4102 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4103 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4104 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4105 AMD_FMT_MOD_SET(DCC, 1) |
4106 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4107 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4108 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4110 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4111 AMD_FMT_MOD_SET(RB, rb) |
4112 AMD_FMT_MOD_SET(PIPE, pipes));
4115 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4116 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4117 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4118 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4119 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4120 AMD_FMT_MOD_SET(DCC, 1) |
4121 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4122 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4123 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4124 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4125 AMD_FMT_MOD_SET(RB, rb) |
4126 AMD_FMT_MOD_SET(PIPE, pipes));
4130 * Only supported for 64bpp on Raven, will be filtered on format in
4131 * dm_plane_format_mod_supported.
4133 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4134 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4135 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4136 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4137 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4139 if (adev->family == AMDGPU_FAMILY_RV) {
4140 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4141 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4142 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4143 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4144 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4148 * Only supported for 64bpp on Raven, will be filtered on format in
4149 * dm_plane_format_mod_supported.
4151 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4152 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4153 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4155 if (adev->family == AMDGPU_FAMILY_RV) {
4156 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4157 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4158 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4163 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4164 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4166 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4168 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4169 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4170 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4171 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4172 AMD_FMT_MOD_SET(DCC, 1) |
4173 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4174 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4175 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4177 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4178 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4179 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4180 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4181 AMD_FMT_MOD_SET(DCC, 1) |
4182 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4183 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4184 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4185 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4187 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4188 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4189 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4190 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4192 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4193 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4194 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4195 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4198 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4199 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4200 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4201 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4203 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4204 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4205 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4209 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4210 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4212 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4213 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4215 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4216 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4217 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4218 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4219 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4220 AMD_FMT_MOD_SET(DCC, 1) |
4221 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4222 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4223 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4224 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4226 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4228 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4229 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4230 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4231 AMD_FMT_MOD_SET(DCC, 1) |
4232 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4233 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4234 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4235 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4236 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4238 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4240 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4241 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4242 AMD_FMT_MOD_SET(PACKERS, pkrs));
4244 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4245 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4246 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4247 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4248 AMD_FMT_MOD_SET(PACKERS, pkrs));
4250 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4251 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4252 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4253 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4255 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4256 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4257 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4261 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4263 uint64_t size = 0, capacity = 128;
4266 /* We have not hooked up any pre-GFX9 modifiers. */
4267 if (adev->family < AMDGPU_FAMILY_AI)
4270 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4272 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4273 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4274 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4275 return *mods ? 0 : -ENOMEM;
4278 switch (adev->family) {
4279 case AMDGPU_FAMILY_AI:
4280 case AMDGPU_FAMILY_RV:
4281 add_gfx9_modifiers(adev, mods, &size, &capacity);
4283 case AMDGPU_FAMILY_NV:
4284 case AMDGPU_FAMILY_VGH:
4285 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4286 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4288 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4292 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4294 /* INVALID marks the end of the list. */
4295 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4304 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4305 const struct amdgpu_framebuffer *afb,
4306 const enum surface_pixel_format format,
4307 const enum dc_rotation_angle rotation,
4308 const struct plane_size *plane_size,
4309 union dc_tiling_info *tiling_info,
4310 struct dc_plane_dcc_param *dcc,
4311 struct dc_plane_address *address,
4312 const bool force_disable_dcc)
4314 const uint64_t modifier = afb->base.modifier;
4317 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4318 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4320 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4321 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4324 dcc->meta_pitch = afb->base.pitches[1];
4325 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4327 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4328 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4331 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4339 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4340 const struct amdgpu_framebuffer *afb,
4341 const enum surface_pixel_format format,
4342 const enum dc_rotation_angle rotation,
4343 const uint64_t tiling_flags,
4344 union dc_tiling_info *tiling_info,
4345 struct plane_size *plane_size,
4346 struct dc_plane_dcc_param *dcc,
4347 struct dc_plane_address *address,
4349 bool force_disable_dcc)
4351 const struct drm_framebuffer *fb = &afb->base;
4354 memset(tiling_info, 0, sizeof(*tiling_info));
4355 memset(plane_size, 0, sizeof(*plane_size));
4356 memset(dcc, 0, sizeof(*dcc));
4357 memset(address, 0, sizeof(*address));
4359 address->tmz_surface = tmz_surface;
4361 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4362 uint64_t addr = afb->address + fb->offsets[0];
4364 plane_size->surface_size.x = 0;
4365 plane_size->surface_size.y = 0;
4366 plane_size->surface_size.width = fb->width;
4367 plane_size->surface_size.height = fb->height;
4368 plane_size->surface_pitch =
4369 fb->pitches[0] / fb->format->cpp[0];
4371 address->type = PLN_ADDR_TYPE_GRAPHICS;
4372 address->grph.addr.low_part = lower_32_bits(addr);
4373 address->grph.addr.high_part = upper_32_bits(addr);
4374 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4375 uint64_t luma_addr = afb->address + fb->offsets[0];
4376 uint64_t chroma_addr = afb->address + fb->offsets[1];
4378 plane_size->surface_size.x = 0;
4379 plane_size->surface_size.y = 0;
4380 plane_size->surface_size.width = fb->width;
4381 plane_size->surface_size.height = fb->height;
4382 plane_size->surface_pitch =
4383 fb->pitches[0] / fb->format->cpp[0];
4385 plane_size->chroma_size.x = 0;
4386 plane_size->chroma_size.y = 0;
4387 /* TODO: set these based on surface format */
4388 plane_size->chroma_size.width = fb->width / 2;
4389 plane_size->chroma_size.height = fb->height / 2;
4391 plane_size->chroma_pitch =
4392 fb->pitches[1] / fb->format->cpp[1];
4394 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4395 address->video_progressive.luma_addr.low_part =
4396 lower_32_bits(luma_addr);
4397 address->video_progressive.luma_addr.high_part =
4398 upper_32_bits(luma_addr);
4399 address->video_progressive.chroma_addr.low_part =
4400 lower_32_bits(chroma_addr);
4401 address->video_progressive.chroma_addr.high_part =
4402 upper_32_bits(chroma_addr);
4405 if (adev->family >= AMDGPU_FAMILY_AI) {
4406 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4407 rotation, plane_size,
4414 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4421 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4422 bool *per_pixel_alpha, bool *global_alpha,
4423 int *global_alpha_value)
4425 *per_pixel_alpha = false;
4426 *global_alpha = false;
4427 *global_alpha_value = 0xff;
4429 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4432 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4433 static const uint32_t alpha_formats[] = {
4434 DRM_FORMAT_ARGB8888,
4435 DRM_FORMAT_RGBA8888,
4436 DRM_FORMAT_ABGR8888,
4438 uint32_t format = plane_state->fb->format->format;
4441 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4442 if (format == alpha_formats[i]) {
4443 *per_pixel_alpha = true;
4449 if (plane_state->alpha < 0xffff) {
4450 *global_alpha = true;
4451 *global_alpha_value = plane_state->alpha >> 8;
4456 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4457 const enum surface_pixel_format format,
4458 enum dc_color_space *color_space)
4462 *color_space = COLOR_SPACE_SRGB;
4464 /* DRM color properties only affect non-RGB formats. */
4465 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4468 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4470 switch (plane_state->color_encoding) {
4471 case DRM_COLOR_YCBCR_BT601:
4473 *color_space = COLOR_SPACE_YCBCR601;
4475 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4478 case DRM_COLOR_YCBCR_BT709:
4480 *color_space = COLOR_SPACE_YCBCR709;
4482 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4485 case DRM_COLOR_YCBCR_BT2020:
4487 *color_space = COLOR_SPACE_2020_YCBCR;
4500 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4501 const struct drm_plane_state *plane_state,
4502 const uint64_t tiling_flags,
4503 struct dc_plane_info *plane_info,
4504 struct dc_plane_address *address,
4506 bool force_disable_dcc)
4508 const struct drm_framebuffer *fb = plane_state->fb;
4509 const struct amdgpu_framebuffer *afb =
4510 to_amdgpu_framebuffer(plane_state->fb);
4511 struct drm_format_name_buf format_name;
4514 memset(plane_info, 0, sizeof(*plane_info));
4516 switch (fb->format->format) {
4518 plane_info->format =
4519 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4521 case DRM_FORMAT_RGB565:
4522 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4524 case DRM_FORMAT_XRGB8888:
4525 case DRM_FORMAT_ARGB8888:
4526 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4528 case DRM_FORMAT_XRGB2101010:
4529 case DRM_FORMAT_ARGB2101010:
4530 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4532 case DRM_FORMAT_XBGR2101010:
4533 case DRM_FORMAT_ABGR2101010:
4534 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4536 case DRM_FORMAT_XBGR8888:
4537 case DRM_FORMAT_ABGR8888:
4538 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4540 case DRM_FORMAT_NV21:
4541 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4543 case DRM_FORMAT_NV12:
4544 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4546 case DRM_FORMAT_P010:
4547 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4549 case DRM_FORMAT_XRGB16161616F:
4550 case DRM_FORMAT_ARGB16161616F:
4551 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4553 case DRM_FORMAT_XBGR16161616F:
4554 case DRM_FORMAT_ABGR16161616F:
4555 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4559 "Unsupported screen format %s\n",
4560 drm_get_format_name(fb->format->format, &format_name));
4564 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4565 case DRM_MODE_ROTATE_0:
4566 plane_info->rotation = ROTATION_ANGLE_0;
4568 case DRM_MODE_ROTATE_90:
4569 plane_info->rotation = ROTATION_ANGLE_90;
4571 case DRM_MODE_ROTATE_180:
4572 plane_info->rotation = ROTATION_ANGLE_180;
4574 case DRM_MODE_ROTATE_270:
4575 plane_info->rotation = ROTATION_ANGLE_270;
4578 plane_info->rotation = ROTATION_ANGLE_0;
4582 plane_info->visible = true;
4583 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4585 plane_info->layer_index = 0;
4587 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4588 &plane_info->color_space);
4592 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4593 plane_info->rotation, tiling_flags,
4594 &plane_info->tiling_info,
4595 &plane_info->plane_size,
4596 &plane_info->dcc, address, tmz_surface,
4601 fill_blending_from_plane_state(
4602 plane_state, &plane_info->per_pixel_alpha,
4603 &plane_info->global_alpha, &plane_info->global_alpha_value);
4608 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4609 struct dc_plane_state *dc_plane_state,
4610 struct drm_plane_state *plane_state,
4611 struct drm_crtc_state *crtc_state)
4613 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4614 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4615 struct dc_scaling_info scaling_info;
4616 struct dc_plane_info plane_info;
4618 bool force_disable_dcc = false;
4620 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4624 dc_plane_state->src_rect = scaling_info.src_rect;
4625 dc_plane_state->dst_rect = scaling_info.dst_rect;
4626 dc_plane_state->clip_rect = scaling_info.clip_rect;
4627 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4629 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4630 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4633 &dc_plane_state->address,
4639 dc_plane_state->format = plane_info.format;
4640 dc_plane_state->color_space = plane_info.color_space;
4641 dc_plane_state->format = plane_info.format;
4642 dc_plane_state->plane_size = plane_info.plane_size;
4643 dc_plane_state->rotation = plane_info.rotation;
4644 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4645 dc_plane_state->stereo_format = plane_info.stereo_format;
4646 dc_plane_state->tiling_info = plane_info.tiling_info;
4647 dc_plane_state->visible = plane_info.visible;
4648 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4649 dc_plane_state->global_alpha = plane_info.global_alpha;
4650 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4651 dc_plane_state->dcc = plane_info.dcc;
4652 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4655 * Always set input transfer function, since plane state is refreshed
4658 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4665 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4666 const struct dm_connector_state *dm_state,
4667 struct dc_stream_state *stream)
4669 enum amdgpu_rmx_type rmx_type;
4671 struct rect src = { 0 }; /* viewport in composition space*/
4672 struct rect dst = { 0 }; /* stream addressable area */
4674 /* no mode. nothing to be done */
4678 /* Full screen scaling by default */
4679 src.width = mode->hdisplay;
4680 src.height = mode->vdisplay;
4681 dst.width = stream->timing.h_addressable;
4682 dst.height = stream->timing.v_addressable;
4685 rmx_type = dm_state->scaling;
4686 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4687 if (src.width * dst.height <
4688 src.height * dst.width) {
4689 /* height needs less upscaling/more downscaling */
4690 dst.width = src.width *
4691 dst.height / src.height;
4693 /* width needs less upscaling/more downscaling */
4694 dst.height = src.height *
4695 dst.width / src.width;
4697 } else if (rmx_type == RMX_CENTER) {
4701 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4702 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4704 if (dm_state->underscan_enable) {
4705 dst.x += dm_state->underscan_hborder / 2;
4706 dst.y += dm_state->underscan_vborder / 2;
4707 dst.width -= dm_state->underscan_hborder;
4708 dst.height -= dm_state->underscan_vborder;
4715 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4716 dst.x, dst.y, dst.width, dst.height);
4720 static enum dc_color_depth
4721 convert_color_depth_from_display_info(const struct drm_connector *connector,
4722 bool is_y420, int requested_bpc)
4729 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4730 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4732 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4734 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4737 bpc = (uint8_t)connector->display_info.bpc;
4738 /* Assume 8 bpc by default if no bpc is specified. */
4739 bpc = bpc ? bpc : 8;
4742 if (requested_bpc > 0) {
4744 * Cap display bpc based on the user requested value.
4746 * The value for state->max_bpc may not correctly updated
4747 * depending on when the connector gets added to the state
4748 * or if this was called outside of atomic check, so it
4749 * can't be used directly.
4751 bpc = min_t(u8, bpc, requested_bpc);
4753 /* Round down to the nearest even number. */
4754 bpc = bpc - (bpc & 1);
4760 * Temporary Work around, DRM doesn't parse color depth for
4761 * EDID revision before 1.4
4762 * TODO: Fix edid parsing
4764 return COLOR_DEPTH_888;
4766 return COLOR_DEPTH_666;
4768 return COLOR_DEPTH_888;
4770 return COLOR_DEPTH_101010;
4772 return COLOR_DEPTH_121212;
4774 return COLOR_DEPTH_141414;
4776 return COLOR_DEPTH_161616;
4778 return COLOR_DEPTH_UNDEFINED;
4782 static enum dc_aspect_ratio
4783 get_aspect_ratio(const struct drm_display_mode *mode_in)
4785 /* 1-1 mapping, since both enums follow the HDMI spec. */
4786 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4789 static enum dc_color_space
4790 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4792 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4794 switch (dc_crtc_timing->pixel_encoding) {
4795 case PIXEL_ENCODING_YCBCR422:
4796 case PIXEL_ENCODING_YCBCR444:
4797 case PIXEL_ENCODING_YCBCR420:
4800 * 27030khz is the separation point between HDTV and SDTV
4801 * according to HDMI spec, we use YCbCr709 and YCbCr601
4804 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4805 if (dc_crtc_timing->flags.Y_ONLY)
4807 COLOR_SPACE_YCBCR709_LIMITED;
4809 color_space = COLOR_SPACE_YCBCR709;
4811 if (dc_crtc_timing->flags.Y_ONLY)
4813 COLOR_SPACE_YCBCR601_LIMITED;
4815 color_space = COLOR_SPACE_YCBCR601;
4820 case PIXEL_ENCODING_RGB:
4821 color_space = COLOR_SPACE_SRGB;
4832 static bool adjust_colour_depth_from_display_info(
4833 struct dc_crtc_timing *timing_out,
4834 const struct drm_display_info *info)
4836 enum dc_color_depth depth = timing_out->display_color_depth;
4839 normalized_clk = timing_out->pix_clk_100hz / 10;
4840 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4841 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4842 normalized_clk /= 2;
4843 /* Adjusting pix clock following on HDMI spec based on colour depth */
4845 case COLOR_DEPTH_888:
4847 case COLOR_DEPTH_101010:
4848 normalized_clk = (normalized_clk * 30) / 24;
4850 case COLOR_DEPTH_121212:
4851 normalized_clk = (normalized_clk * 36) / 24;
4853 case COLOR_DEPTH_161616:
4854 normalized_clk = (normalized_clk * 48) / 24;
4857 /* The above depths are the only ones valid for HDMI. */
4860 if (normalized_clk <= info->max_tmds_clock) {
4861 timing_out->display_color_depth = depth;
4864 } while (--depth > COLOR_DEPTH_666);
4868 static void fill_stream_properties_from_drm_display_mode(
4869 struct dc_stream_state *stream,
4870 const struct drm_display_mode *mode_in,
4871 const struct drm_connector *connector,
4872 const struct drm_connector_state *connector_state,
4873 const struct dc_stream_state *old_stream,
4876 struct dc_crtc_timing *timing_out = &stream->timing;
4877 const struct drm_display_info *info = &connector->display_info;
4878 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4879 struct hdmi_vendor_infoframe hv_frame;
4880 struct hdmi_avi_infoframe avi_frame;
4882 memset(&hv_frame, 0, sizeof(hv_frame));
4883 memset(&avi_frame, 0, sizeof(avi_frame));
4885 timing_out->h_border_left = 0;
4886 timing_out->h_border_right = 0;
4887 timing_out->v_border_top = 0;
4888 timing_out->v_border_bottom = 0;
4889 /* TODO: un-hardcode */
4890 if (drm_mode_is_420_only(info, mode_in)
4891 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4892 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4893 else if (drm_mode_is_420_also(info, mode_in)
4894 && aconnector->force_yuv420_output)
4895 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4896 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4897 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4898 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4900 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4902 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4903 timing_out->display_color_depth = convert_color_depth_from_display_info(
4905 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4907 timing_out->scan_type = SCANNING_TYPE_NODATA;
4908 timing_out->hdmi_vic = 0;
4911 timing_out->vic = old_stream->timing.vic;
4912 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4913 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4915 timing_out->vic = drm_match_cea_mode(mode_in);
4916 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4917 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4918 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4919 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4922 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4923 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4924 timing_out->vic = avi_frame.video_code;
4925 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4926 timing_out->hdmi_vic = hv_frame.vic;
4929 timing_out->h_addressable = mode_in->crtc_hdisplay;
4930 timing_out->h_total = mode_in->crtc_htotal;
4931 timing_out->h_sync_width =
4932 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4933 timing_out->h_front_porch =
4934 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4935 timing_out->v_total = mode_in->crtc_vtotal;
4936 timing_out->v_addressable = mode_in->crtc_vdisplay;
4937 timing_out->v_front_porch =
4938 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4939 timing_out->v_sync_width =
4940 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4941 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4942 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4944 stream->output_color_space = get_output_color_space(timing_out);
4946 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4947 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4948 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4949 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4950 drm_mode_is_420_also(info, mode_in) &&
4951 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4952 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4953 adjust_colour_depth_from_display_info(timing_out, info);
4958 static void fill_audio_info(struct audio_info *audio_info,
4959 const struct drm_connector *drm_connector,
4960 const struct dc_sink *dc_sink)
4963 int cea_revision = 0;
4964 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4966 audio_info->manufacture_id = edid_caps->manufacturer_id;
4967 audio_info->product_id = edid_caps->product_id;
4969 cea_revision = drm_connector->display_info.cea_rev;
4971 strscpy(audio_info->display_name,
4972 edid_caps->display_name,
4973 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4975 if (cea_revision >= 3) {
4976 audio_info->mode_count = edid_caps->audio_mode_count;
4978 for (i = 0; i < audio_info->mode_count; ++i) {
4979 audio_info->modes[i].format_code =
4980 (enum audio_format_code)
4981 (edid_caps->audio_modes[i].format_code);
4982 audio_info->modes[i].channel_count =
4983 edid_caps->audio_modes[i].channel_count;
4984 audio_info->modes[i].sample_rates.all =
4985 edid_caps->audio_modes[i].sample_rate;
4986 audio_info->modes[i].sample_size =
4987 edid_caps->audio_modes[i].sample_size;
4991 audio_info->flags.all = edid_caps->speaker_flags;
4993 /* TODO: We only check for the progressive mode, check for interlace mode too */
4994 if (drm_connector->latency_present[0]) {
4995 audio_info->video_latency = drm_connector->video_latency[0];
4996 audio_info->audio_latency = drm_connector->audio_latency[0];
4999 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5004 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5005 struct drm_display_mode *dst_mode)
5007 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5008 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5009 dst_mode->crtc_clock = src_mode->crtc_clock;
5010 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5011 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5012 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5013 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5014 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5015 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5016 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5017 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5018 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5019 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5020 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5024 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5025 const struct drm_display_mode *native_mode,
5028 if (scale_enabled) {
5029 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5030 } else if (native_mode->clock == drm_mode->clock &&
5031 native_mode->htotal == drm_mode->htotal &&
5032 native_mode->vtotal == drm_mode->vtotal) {
5033 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5035 /* no scaling nor amdgpu inserted, no need to patch */
5039 static struct dc_sink *
5040 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5042 struct dc_sink_init_data sink_init_data = { 0 };
5043 struct dc_sink *sink = NULL;
5044 sink_init_data.link = aconnector->dc_link;
5045 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5047 sink = dc_sink_create(&sink_init_data);
5049 DRM_ERROR("Failed to create sink!\n");
5052 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5057 static void set_multisync_trigger_params(
5058 struct dc_stream_state *stream)
5060 if (stream->triggered_crtc_reset.enabled) {
5061 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5062 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5066 static void set_master_stream(struct dc_stream_state *stream_set[],
5069 int j, highest_rfr = 0, master_stream = 0;
5071 for (j = 0; j < stream_count; j++) {
5072 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5073 int refresh_rate = 0;
5075 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5076 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5077 if (refresh_rate > highest_rfr) {
5078 highest_rfr = refresh_rate;
5083 for (j = 0; j < stream_count; j++) {
5085 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5089 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5093 if (context->stream_count < 2)
5095 for (i = 0; i < context->stream_count ; i++) {
5096 if (!context->streams[i])
5099 * TODO: add a function to read AMD VSDB bits and set
5100 * crtc_sync_master.multi_sync_enabled flag
5101 * For now it's set to false
5103 set_multisync_trigger_params(context->streams[i]);
5105 set_master_stream(context->streams, context->stream_count);
5108 static struct dc_stream_state *
5109 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5110 const struct drm_display_mode *drm_mode,
5111 const struct dm_connector_state *dm_state,
5112 const struct dc_stream_state *old_stream,
5115 struct drm_display_mode *preferred_mode = NULL;
5116 struct drm_connector *drm_connector;
5117 const struct drm_connector_state *con_state =
5118 dm_state ? &dm_state->base : NULL;
5119 struct dc_stream_state *stream = NULL;
5120 struct drm_display_mode mode = *drm_mode;
5121 bool native_mode_found = false;
5122 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5124 int preferred_refresh = 0;
5125 #if defined(CONFIG_DRM_AMD_DC_DCN)
5126 struct dsc_dec_dpcd_caps dsc_caps;
5128 uint32_t link_bandwidth_kbps;
5130 struct dc_sink *sink = NULL;
5131 if (aconnector == NULL) {
5132 DRM_ERROR("aconnector is NULL!\n");
5136 drm_connector = &aconnector->base;
5138 if (!aconnector->dc_sink) {
5139 sink = create_fake_sink(aconnector);
5143 sink = aconnector->dc_sink;
5144 dc_sink_retain(sink);
5147 stream = dc_create_stream_for_sink(sink);
5149 if (stream == NULL) {
5150 DRM_ERROR("Failed to create stream for sink!\n");
5154 stream->dm_stream_context = aconnector;
5156 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5157 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5159 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5160 /* Search for preferred mode */
5161 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5162 native_mode_found = true;
5166 if (!native_mode_found)
5167 preferred_mode = list_first_entry_or_null(
5168 &aconnector->base.modes,
5169 struct drm_display_mode,
5172 mode_refresh = drm_mode_vrefresh(&mode);
5174 if (preferred_mode == NULL) {
5176 * This may not be an error, the use case is when we have no
5177 * usermode calls to reset and set mode upon hotplug. In this
5178 * case, we call set mode ourselves to restore the previous mode
5179 * and the modelist may not be filled in in time.
5181 DRM_DEBUG_DRIVER("No preferred mode found\n");
5183 decide_crtc_timing_for_drm_display_mode(
5184 &mode, preferred_mode,
5185 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5186 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5190 drm_mode_set_crtcinfo(&mode, 0);
5193 * If scaling is enabled and refresh rate didn't change
5194 * we copy the vic and polarities of the old timings
5196 if (!scale || mode_refresh != preferred_refresh)
5197 fill_stream_properties_from_drm_display_mode(stream,
5198 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5200 fill_stream_properties_from_drm_display_mode(stream,
5201 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5203 stream->timing.flags.DSC = 0;
5205 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5206 #if defined(CONFIG_DRM_AMD_DC_DCN)
5207 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5208 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5209 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5212 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5213 dc_link_get_link_cap(aconnector->dc_link));
5215 #if defined(CONFIG_DRM_AMD_DC_DCN)
5216 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5217 /* Set DSC policy according to dsc_clock_en */
5218 dc_dsc_policy_set_enable_dsc_when_not_needed(
5219 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5221 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5223 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5225 link_bandwidth_kbps,
5227 &stream->timing.dsc_cfg))
5228 stream->timing.flags.DSC = 1;
5229 /* Overwrite the stream flag if DSC is enabled through debugfs */
5230 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5231 stream->timing.flags.DSC = 1;
5233 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5234 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5236 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5237 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5239 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5240 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5245 update_stream_scaling_settings(&mode, dm_state, stream);
5248 &stream->audio_info,
5252 update_stream_signal(stream, sink);
5254 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5255 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5257 if (stream->link->psr_settings.psr_feature_enabled) {
5259 // should decide stream support vsc sdp colorimetry capability
5260 // before building vsc info packet
5262 stream->use_vsc_sdp_for_colorimetry = false;
5263 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5264 stream->use_vsc_sdp_for_colorimetry =
5265 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5267 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5268 stream->use_vsc_sdp_for_colorimetry = true;
5270 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5273 dc_sink_release(sink);
5278 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5280 drm_crtc_cleanup(crtc);
5284 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5285 struct drm_crtc_state *state)
5287 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5289 /* TODO Destroy dc_stream objects are stream object is flattened */
5291 dc_stream_release(cur->stream);
5294 __drm_atomic_helper_crtc_destroy_state(state);
5300 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5302 struct dm_crtc_state *state;
5305 dm_crtc_destroy_state(crtc, crtc->state);
5307 state = kzalloc(sizeof(*state), GFP_KERNEL);
5308 if (WARN_ON(!state))
5311 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5314 static struct drm_crtc_state *
5315 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5317 struct dm_crtc_state *state, *cur;
5319 cur = to_dm_crtc_state(crtc->state);
5321 if (WARN_ON(!crtc->state))
5324 state = kzalloc(sizeof(*state), GFP_KERNEL);
5328 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5331 state->stream = cur->stream;
5332 dc_stream_retain(state->stream);
5335 state->active_planes = cur->active_planes;
5336 state->vrr_infopacket = cur->vrr_infopacket;
5337 state->abm_level = cur->abm_level;
5338 state->vrr_supported = cur->vrr_supported;
5339 state->freesync_config = cur->freesync_config;
5340 state->crc_src = cur->crc_src;
5341 state->cm_has_degamma = cur->cm_has_degamma;
5342 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5343 #ifdef CONFIG_DEBUG_FS
5344 state->crc_window = cur->crc_window;
5346 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5348 return &state->base;
5351 #ifdef CONFIG_DEBUG_FS
5352 int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5353 struct drm_crtc_state *crtc_state,
5354 struct drm_property *property,
5357 struct drm_device *dev = crtc->dev;
5358 struct amdgpu_device *adev = drm_to_adev(dev);
5359 struct dm_crtc_state *dm_new_state =
5360 to_dm_crtc_state(crtc_state);
5362 if (property == adev->dm.crc_win_x_start_property)
5363 dm_new_state->crc_window.x_start = val;
5364 else if (property == adev->dm.crc_win_y_start_property)
5365 dm_new_state->crc_window.y_start = val;
5366 else if (property == adev->dm.crc_win_x_end_property)
5367 dm_new_state->crc_window.x_end = val;
5368 else if (property == adev->dm.crc_win_y_end_property)
5369 dm_new_state->crc_window.y_end = val;
5376 int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5377 const struct drm_crtc_state *state,
5378 struct drm_property *property,
5381 struct drm_device *dev = crtc->dev;
5382 struct amdgpu_device *adev = drm_to_adev(dev);
5383 struct dm_crtc_state *dm_state =
5384 to_dm_crtc_state(state);
5386 if (property == adev->dm.crc_win_x_start_property)
5387 *val = dm_state->crc_window.x_start;
5388 else if (property == adev->dm.crc_win_y_start_property)
5389 *val = dm_state->crc_window.y_start;
5390 else if (property == adev->dm.crc_win_x_end_property)
5391 *val = dm_state->crc_window.x_end;
5392 else if (property == adev->dm.crc_win_y_end_property)
5393 *val = dm_state->crc_window.y_end;
5401 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5403 enum dc_irq_source irq_source;
5404 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5405 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5408 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5410 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5412 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5413 acrtc->crtc_id, enable ? "en" : "dis", rc);
5417 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5419 enum dc_irq_source irq_source;
5420 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5421 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5422 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5426 /* vblank irq on -> Only need vupdate irq in vrr mode */
5427 if (amdgpu_dm_vrr_active(acrtc_state))
5428 rc = dm_set_vupdate_irq(crtc, true);
5430 /* vblank irq off -> vupdate irq off */
5431 rc = dm_set_vupdate_irq(crtc, false);
5437 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5438 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5441 static int dm_enable_vblank(struct drm_crtc *crtc)
5443 return dm_set_vblank(crtc, true);
5446 static void dm_disable_vblank(struct drm_crtc *crtc)
5448 dm_set_vblank(crtc, false);
5451 /* Implemented only the options currently availible for the driver */
5452 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5453 .reset = dm_crtc_reset_state,
5454 .destroy = amdgpu_dm_crtc_destroy,
5455 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5456 .set_config = drm_atomic_helper_set_config,
5457 .page_flip = drm_atomic_helper_page_flip,
5458 .atomic_duplicate_state = dm_crtc_duplicate_state,
5459 .atomic_destroy_state = dm_crtc_destroy_state,
5460 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5461 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5462 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5463 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5464 .enable_vblank = dm_enable_vblank,
5465 .disable_vblank = dm_disable_vblank,
5466 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5467 #ifdef CONFIG_DEBUG_FS
5468 .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5469 .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5473 static enum drm_connector_status
5474 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5477 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5481 * 1. This interface is NOT called in context of HPD irq.
5482 * 2. This interface *is called* in context of user-mode ioctl. Which
5483 * makes it a bad place for *any* MST-related activity.
5486 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5487 !aconnector->fake_enable)
5488 connected = (aconnector->dc_sink != NULL);
5490 connected = (aconnector->base.force == DRM_FORCE_ON);
5492 update_subconnector_property(aconnector);
5494 return (connected ? connector_status_connected :
5495 connector_status_disconnected);
5498 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5499 struct drm_connector_state *connector_state,
5500 struct drm_property *property,
5503 struct drm_device *dev = connector->dev;
5504 struct amdgpu_device *adev = drm_to_adev(dev);
5505 struct dm_connector_state *dm_old_state =
5506 to_dm_connector_state(connector->state);
5507 struct dm_connector_state *dm_new_state =
5508 to_dm_connector_state(connector_state);
5512 if (property == dev->mode_config.scaling_mode_property) {
5513 enum amdgpu_rmx_type rmx_type;
5516 case DRM_MODE_SCALE_CENTER:
5517 rmx_type = RMX_CENTER;
5519 case DRM_MODE_SCALE_ASPECT:
5520 rmx_type = RMX_ASPECT;
5522 case DRM_MODE_SCALE_FULLSCREEN:
5523 rmx_type = RMX_FULL;
5525 case DRM_MODE_SCALE_NONE:
5531 if (dm_old_state->scaling == rmx_type)
5534 dm_new_state->scaling = rmx_type;
5536 } else if (property == adev->mode_info.underscan_hborder_property) {
5537 dm_new_state->underscan_hborder = val;
5539 } else if (property == adev->mode_info.underscan_vborder_property) {
5540 dm_new_state->underscan_vborder = val;
5542 } else if (property == adev->mode_info.underscan_property) {
5543 dm_new_state->underscan_enable = val;
5545 } else if (property == adev->mode_info.abm_level_property) {
5546 dm_new_state->abm_level = val;
5553 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5554 const struct drm_connector_state *state,
5555 struct drm_property *property,
5558 struct drm_device *dev = connector->dev;
5559 struct amdgpu_device *adev = drm_to_adev(dev);
5560 struct dm_connector_state *dm_state =
5561 to_dm_connector_state(state);
5564 if (property == dev->mode_config.scaling_mode_property) {
5565 switch (dm_state->scaling) {
5567 *val = DRM_MODE_SCALE_CENTER;
5570 *val = DRM_MODE_SCALE_ASPECT;
5573 *val = DRM_MODE_SCALE_FULLSCREEN;
5577 *val = DRM_MODE_SCALE_NONE;
5581 } else if (property == adev->mode_info.underscan_hborder_property) {
5582 *val = dm_state->underscan_hborder;
5584 } else if (property == adev->mode_info.underscan_vborder_property) {
5585 *val = dm_state->underscan_vborder;
5587 } else if (property == adev->mode_info.underscan_property) {
5588 *val = dm_state->underscan_enable;
5590 } else if (property == adev->mode_info.abm_level_property) {
5591 *val = dm_state->abm_level;
5598 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5600 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5602 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5605 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5607 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5608 const struct dc_link *link = aconnector->dc_link;
5609 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5610 struct amdgpu_display_manager *dm = &adev->dm;
5613 * Call only if mst_mgr was iniitalized before since it's not done
5614 * for all connector types.
5616 if (aconnector->mst_mgr.dev)
5617 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5619 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5620 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5622 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5623 link->type != dc_connection_none &&
5624 dm->backlight_dev) {
5625 backlight_device_unregister(dm->backlight_dev);
5626 dm->backlight_dev = NULL;
5630 if (aconnector->dc_em_sink)
5631 dc_sink_release(aconnector->dc_em_sink);
5632 aconnector->dc_em_sink = NULL;
5633 if (aconnector->dc_sink)
5634 dc_sink_release(aconnector->dc_sink);
5635 aconnector->dc_sink = NULL;
5637 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5638 drm_connector_unregister(connector);
5639 drm_connector_cleanup(connector);
5640 if (aconnector->i2c) {
5641 i2c_del_adapter(&aconnector->i2c->base);
5642 kfree(aconnector->i2c);
5644 kfree(aconnector->dm_dp_aux.aux.name);
5649 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5651 struct dm_connector_state *state =
5652 to_dm_connector_state(connector->state);
5654 if (connector->state)
5655 __drm_atomic_helper_connector_destroy_state(connector->state);
5659 state = kzalloc(sizeof(*state), GFP_KERNEL);
5662 state->scaling = RMX_OFF;
5663 state->underscan_enable = false;
5664 state->underscan_hborder = 0;
5665 state->underscan_vborder = 0;
5666 state->base.max_requested_bpc = 8;
5667 state->vcpi_slots = 0;
5669 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5670 state->abm_level = amdgpu_dm_abm_level;
5672 __drm_atomic_helper_connector_reset(connector, &state->base);
5676 struct drm_connector_state *
5677 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5679 struct dm_connector_state *state =
5680 to_dm_connector_state(connector->state);
5682 struct dm_connector_state *new_state =
5683 kmemdup(state, sizeof(*state), GFP_KERNEL);
5688 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5690 new_state->freesync_capable = state->freesync_capable;
5691 new_state->abm_level = state->abm_level;
5692 new_state->scaling = state->scaling;
5693 new_state->underscan_enable = state->underscan_enable;
5694 new_state->underscan_hborder = state->underscan_hborder;
5695 new_state->underscan_vborder = state->underscan_vborder;
5696 new_state->vcpi_slots = state->vcpi_slots;
5697 new_state->pbn = state->pbn;
5698 return &new_state->base;
5702 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5704 struct amdgpu_dm_connector *amdgpu_dm_connector =
5705 to_amdgpu_dm_connector(connector);
5708 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5709 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5710 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5711 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5716 #if defined(CONFIG_DEBUG_FS)
5717 connector_debugfs_init(amdgpu_dm_connector);
5723 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5724 .reset = amdgpu_dm_connector_funcs_reset,
5725 .detect = amdgpu_dm_connector_detect,
5726 .fill_modes = drm_helper_probe_single_connector_modes,
5727 .destroy = amdgpu_dm_connector_destroy,
5728 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5729 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5730 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5731 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5732 .late_register = amdgpu_dm_connector_late_register,
5733 .early_unregister = amdgpu_dm_connector_unregister
5736 static int get_modes(struct drm_connector *connector)
5738 return amdgpu_dm_connector_get_modes(connector);
5741 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5743 struct dc_sink_init_data init_params = {
5744 .link = aconnector->dc_link,
5745 .sink_signal = SIGNAL_TYPE_VIRTUAL
5749 if (!aconnector->base.edid_blob_ptr) {
5750 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5751 aconnector->base.name);
5753 aconnector->base.force = DRM_FORCE_OFF;
5754 aconnector->base.override_edid = false;
5758 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5760 aconnector->edid = edid;
5762 aconnector->dc_em_sink = dc_link_add_remote_sink(
5763 aconnector->dc_link,
5765 (edid->extensions + 1) * EDID_LENGTH,
5768 if (aconnector->base.force == DRM_FORCE_ON) {
5769 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5770 aconnector->dc_link->local_sink :
5771 aconnector->dc_em_sink;
5772 dc_sink_retain(aconnector->dc_sink);
5776 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5778 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5781 * In case of headless boot with force on for DP managed connector
5782 * Those settings have to be != 0 to get initial modeset
5784 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5785 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5786 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5790 aconnector->base.override_edid = true;
5791 create_eml_sink(aconnector);
5794 static struct dc_stream_state *
5795 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5796 const struct drm_display_mode *drm_mode,
5797 const struct dm_connector_state *dm_state,
5798 const struct dc_stream_state *old_stream)
5800 struct drm_connector *connector = &aconnector->base;
5801 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5802 struct dc_stream_state *stream;
5803 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5804 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5805 enum dc_status dc_result = DC_OK;
5808 stream = create_stream_for_sink(aconnector, drm_mode,
5809 dm_state, old_stream,
5811 if (stream == NULL) {
5812 DRM_ERROR("Failed to create stream for sink!\n");
5816 dc_result = dc_validate_stream(adev->dm.dc, stream);
5818 if (dc_result != DC_OK) {
5819 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5824 dc_status_to_str(dc_result));
5826 dc_stream_release(stream);
5828 requested_bpc -= 2; /* lower bpc to retry validation */
5831 } while (stream == NULL && requested_bpc >= 6);
5836 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5837 struct drm_display_mode *mode)
5839 int result = MODE_ERROR;
5840 struct dc_sink *dc_sink;
5841 /* TODO: Unhardcode stream count */
5842 struct dc_stream_state *stream;
5843 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5845 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5846 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5850 * Only run this the first time mode_valid is called to initilialize
5853 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5854 !aconnector->dc_em_sink)
5855 handle_edid_mgmt(aconnector);
5857 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5859 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5860 aconnector->base.force != DRM_FORCE_ON) {
5861 DRM_ERROR("dc_sink is NULL!\n");
5865 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5867 dc_stream_release(stream);
5872 /* TODO: error handling*/
5876 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5877 struct dc_info_packet *out)
5879 struct hdmi_drm_infoframe frame;
5880 unsigned char buf[30]; /* 26 + 4 */
5884 memset(out, 0, sizeof(*out));
5886 if (!state->hdr_output_metadata)
5889 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5893 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5897 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5901 /* Prepare the infopacket for DC. */
5902 switch (state->connector->connector_type) {
5903 case DRM_MODE_CONNECTOR_HDMIA:
5904 out->hb0 = 0x87; /* type */
5905 out->hb1 = 0x01; /* version */
5906 out->hb2 = 0x1A; /* length */
5907 out->sb[0] = buf[3]; /* checksum */
5911 case DRM_MODE_CONNECTOR_DisplayPort:
5912 case DRM_MODE_CONNECTOR_eDP:
5913 out->hb0 = 0x00; /* sdp id, zero */
5914 out->hb1 = 0x87; /* type */
5915 out->hb2 = 0x1D; /* payload len - 1 */
5916 out->hb3 = (0x13 << 2); /* sdp version */
5917 out->sb[0] = 0x01; /* version */
5918 out->sb[1] = 0x1A; /* length */
5926 memcpy(&out->sb[i], &buf[4], 26);
5929 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5930 sizeof(out->sb), false);
5936 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5937 const struct drm_connector_state *new_state)
5939 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5940 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5942 if (old_blob != new_blob) {
5943 if (old_blob && new_blob &&
5944 old_blob->length == new_blob->length)
5945 return memcmp(old_blob->data, new_blob->data,
5955 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5956 struct drm_atomic_state *state)
5958 struct drm_connector_state *new_con_state =
5959 drm_atomic_get_new_connector_state(state, conn);
5960 struct drm_connector_state *old_con_state =
5961 drm_atomic_get_old_connector_state(state, conn);
5962 struct drm_crtc *crtc = new_con_state->crtc;
5963 struct drm_crtc_state *new_crtc_state;
5966 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5971 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5972 struct dc_info_packet hdr_infopacket;
5974 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5978 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5979 if (IS_ERR(new_crtc_state))
5980 return PTR_ERR(new_crtc_state);
5983 * DC considers the stream backends changed if the
5984 * static metadata changes. Forcing the modeset also
5985 * gives a simple way for userspace to switch from
5986 * 8bpc to 10bpc when setting the metadata to enter
5989 * Changing the static metadata after it's been
5990 * set is permissible, however. So only force a
5991 * modeset if we're entering or exiting HDR.
5993 new_crtc_state->mode_changed =
5994 !old_con_state->hdr_output_metadata ||
5995 !new_con_state->hdr_output_metadata;
6001 static const struct drm_connector_helper_funcs
6002 amdgpu_dm_connector_helper_funcs = {
6004 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6005 * modes will be filtered by drm_mode_validate_size(), and those modes
6006 * are missing after user start lightdm. So we need to renew modes list.
6007 * in get_modes call back, not just return the modes count
6009 .get_modes = get_modes,
6010 .mode_valid = amdgpu_dm_connector_mode_valid,
6011 .atomic_check = amdgpu_dm_connector_atomic_check,
6014 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6018 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6020 struct drm_atomic_state *state = new_crtc_state->state;
6021 struct drm_plane *plane;
6024 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6025 struct drm_plane_state *new_plane_state;
6027 /* Cursor planes are "fake". */
6028 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6031 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6033 if (!new_plane_state) {
6035 * The plane is enable on the CRTC and hasn't changed
6036 * state. This means that it previously passed
6037 * validation and is therefore enabled.
6043 /* We need a framebuffer to be considered enabled. */
6044 num_active += (new_plane_state->fb != NULL);
6050 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6051 struct drm_crtc_state *new_crtc_state)
6053 struct dm_crtc_state *dm_new_crtc_state =
6054 to_dm_crtc_state(new_crtc_state);
6056 dm_new_crtc_state->active_planes = 0;
6058 if (!dm_new_crtc_state->stream)
6061 dm_new_crtc_state->active_planes =
6062 count_crtc_active_planes(new_crtc_state);
6065 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6066 struct drm_atomic_state *state)
6068 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6070 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6071 struct dc *dc = adev->dm.dc;
6072 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6075 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6077 dm_update_crtc_active_planes(crtc, crtc_state);
6079 if (unlikely(!dm_crtc_state->stream &&
6080 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6086 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6087 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6088 * planes are disabled, which is not supported by the hardware. And there is legacy
6089 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6091 if (crtc_state->enable &&
6092 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6093 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6097 /* In some use cases, like reset, no stream is attached */
6098 if (!dm_crtc_state->stream)
6101 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6104 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6108 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6109 const struct drm_display_mode *mode,
6110 struct drm_display_mode *adjusted_mode)
6115 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6116 .disable = dm_crtc_helper_disable,
6117 .atomic_check = dm_crtc_helper_atomic_check,
6118 .mode_fixup = dm_crtc_helper_mode_fixup,
6119 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6122 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6127 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6129 switch (display_color_depth) {
6130 case COLOR_DEPTH_666:
6132 case COLOR_DEPTH_888:
6134 case COLOR_DEPTH_101010:
6136 case COLOR_DEPTH_121212:
6138 case COLOR_DEPTH_141414:
6140 case COLOR_DEPTH_161616:
6148 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6149 struct drm_crtc_state *crtc_state,
6150 struct drm_connector_state *conn_state)
6152 struct drm_atomic_state *state = crtc_state->state;
6153 struct drm_connector *connector = conn_state->connector;
6154 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6155 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6156 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6157 struct drm_dp_mst_topology_mgr *mst_mgr;
6158 struct drm_dp_mst_port *mst_port;
6159 enum dc_color_depth color_depth;
6161 bool is_y420 = false;
6163 if (!aconnector->port || !aconnector->dc_sink)
6166 mst_port = aconnector->port;
6167 mst_mgr = &aconnector->mst_port->mst_mgr;
6169 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6172 if (!state->duplicated) {
6173 int max_bpc = conn_state->max_requested_bpc;
6174 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6175 aconnector->force_yuv420_output;
6176 color_depth = convert_color_depth_from_display_info(connector,
6179 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6180 clock = adjusted_mode->clock;
6181 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6183 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6186 dm_new_connector_state->pbn,
6187 dm_mst_get_pbn_divider(aconnector->dc_link));
6188 if (dm_new_connector_state->vcpi_slots < 0) {
6189 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6190 return dm_new_connector_state->vcpi_slots;
6195 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6196 .disable = dm_encoder_helper_disable,
6197 .atomic_check = dm_encoder_helper_atomic_check
6200 #if defined(CONFIG_DRM_AMD_DC_DCN)
6201 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6202 struct dc_state *dc_state)
6204 struct dc_stream_state *stream = NULL;
6205 struct drm_connector *connector;
6206 struct drm_connector_state *new_con_state, *old_con_state;
6207 struct amdgpu_dm_connector *aconnector;
6208 struct dm_connector_state *dm_conn_state;
6209 int i, j, clock, bpp;
6210 int vcpi, pbn_div, pbn = 0;
6212 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6214 aconnector = to_amdgpu_dm_connector(connector);
6216 if (!aconnector->port)
6219 if (!new_con_state || !new_con_state->crtc)
6222 dm_conn_state = to_dm_connector_state(new_con_state);
6224 for (j = 0; j < dc_state->stream_count; j++) {
6225 stream = dc_state->streams[j];
6229 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6238 if (stream->timing.flags.DSC != 1) {
6239 drm_dp_mst_atomic_enable_dsc(state,
6247 pbn_div = dm_mst_get_pbn_divider(stream->link);
6248 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6249 clock = stream->timing.pix_clk_100hz / 10;
6250 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6251 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6258 dm_conn_state->pbn = pbn;
6259 dm_conn_state->vcpi_slots = vcpi;
6265 static void dm_drm_plane_reset(struct drm_plane *plane)
6267 struct dm_plane_state *amdgpu_state = NULL;
6270 plane->funcs->atomic_destroy_state(plane, plane->state);
6272 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6273 WARN_ON(amdgpu_state == NULL);
6276 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6279 static struct drm_plane_state *
6280 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6282 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6284 old_dm_plane_state = to_dm_plane_state(plane->state);
6285 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6286 if (!dm_plane_state)
6289 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6291 if (old_dm_plane_state->dc_state) {
6292 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6293 dc_plane_state_retain(dm_plane_state->dc_state);
6296 return &dm_plane_state->base;
6299 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6300 struct drm_plane_state *state)
6302 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6304 if (dm_plane_state->dc_state)
6305 dc_plane_state_release(dm_plane_state->dc_state);
6307 drm_atomic_helper_plane_destroy_state(plane, state);
6310 static const struct drm_plane_funcs dm_plane_funcs = {
6311 .update_plane = drm_atomic_helper_update_plane,
6312 .disable_plane = drm_atomic_helper_disable_plane,
6313 .destroy = drm_primary_helper_destroy,
6314 .reset = dm_drm_plane_reset,
6315 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6316 .atomic_destroy_state = dm_drm_plane_destroy_state,
6317 .format_mod_supported = dm_plane_format_mod_supported,
6320 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6321 struct drm_plane_state *new_state)
6323 struct amdgpu_framebuffer *afb;
6324 struct drm_gem_object *obj;
6325 struct amdgpu_device *adev;
6326 struct amdgpu_bo *rbo;
6327 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6328 struct list_head list;
6329 struct ttm_validate_buffer tv;
6330 struct ww_acquire_ctx ticket;
6334 if (!new_state->fb) {
6335 DRM_DEBUG_DRIVER("No FB bound\n");
6339 afb = to_amdgpu_framebuffer(new_state->fb);
6340 obj = new_state->fb->obj[0];
6341 rbo = gem_to_amdgpu_bo(obj);
6342 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6343 INIT_LIST_HEAD(&list);
6347 list_add(&tv.head, &list);
6349 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6351 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6355 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6356 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6358 domain = AMDGPU_GEM_DOMAIN_VRAM;
6360 r = amdgpu_bo_pin(rbo, domain);
6361 if (unlikely(r != 0)) {
6362 if (r != -ERESTARTSYS)
6363 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6364 ttm_eu_backoff_reservation(&ticket, &list);
6368 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6369 if (unlikely(r != 0)) {
6370 amdgpu_bo_unpin(rbo);
6371 ttm_eu_backoff_reservation(&ticket, &list);
6372 DRM_ERROR("%p bind failed\n", rbo);
6376 ttm_eu_backoff_reservation(&ticket, &list);
6378 afb->address = amdgpu_bo_gpu_offset(rbo);
6383 * We don't do surface updates on planes that have been newly created,
6384 * but we also don't have the afb->address during atomic check.
6386 * Fill in buffer attributes depending on the address here, but only on
6387 * newly created planes since they're not being used by DC yet and this
6388 * won't modify global state.
6390 dm_plane_state_old = to_dm_plane_state(plane->state);
6391 dm_plane_state_new = to_dm_plane_state(new_state);
6393 if (dm_plane_state_new->dc_state &&
6394 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6395 struct dc_plane_state *plane_state =
6396 dm_plane_state_new->dc_state;
6397 bool force_disable_dcc = !plane_state->dcc.enable;
6399 fill_plane_buffer_attributes(
6400 adev, afb, plane_state->format, plane_state->rotation,
6402 &plane_state->tiling_info, &plane_state->plane_size,
6403 &plane_state->dcc, &plane_state->address,
6404 afb->tmz_surface, force_disable_dcc);
6410 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6411 struct drm_plane_state *old_state)
6413 struct amdgpu_bo *rbo;
6419 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6420 r = amdgpu_bo_reserve(rbo, false);
6422 DRM_ERROR("failed to reserve rbo before unpin\n");
6426 amdgpu_bo_unpin(rbo);
6427 amdgpu_bo_unreserve(rbo);
6428 amdgpu_bo_unref(&rbo);
6431 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6432 struct drm_crtc_state *new_crtc_state)
6434 int max_downscale = 0;
6435 int max_upscale = INT_MAX;
6437 /* TODO: These should be checked against DC plane caps */
6438 return drm_atomic_helper_check_plane_state(
6439 state, new_crtc_state, max_downscale, max_upscale, true, true);
6442 static int dm_plane_atomic_check(struct drm_plane *plane,
6443 struct drm_plane_state *state)
6445 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6446 struct dc *dc = adev->dm.dc;
6447 struct dm_plane_state *dm_plane_state;
6448 struct dc_scaling_info scaling_info;
6449 struct drm_crtc_state *new_crtc_state;
6452 trace_amdgpu_dm_plane_atomic_check(state);
6454 dm_plane_state = to_dm_plane_state(state);
6456 if (!dm_plane_state->dc_state)
6460 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6461 if (!new_crtc_state)
6464 ret = dm_plane_helper_check_state(state, new_crtc_state);
6468 ret = fill_dc_scaling_info(state, &scaling_info);
6472 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6478 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6479 struct drm_plane_state *new_plane_state)
6481 /* Only support async updates on cursor planes. */
6482 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6488 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6489 struct drm_plane_state *new_state)
6491 struct drm_plane_state *old_state =
6492 drm_atomic_get_old_plane_state(new_state->state, plane);
6494 trace_amdgpu_dm_atomic_update_cursor(new_state);
6496 swap(plane->state->fb, new_state->fb);
6498 plane->state->src_x = new_state->src_x;
6499 plane->state->src_y = new_state->src_y;
6500 plane->state->src_w = new_state->src_w;
6501 plane->state->src_h = new_state->src_h;
6502 plane->state->crtc_x = new_state->crtc_x;
6503 plane->state->crtc_y = new_state->crtc_y;
6504 plane->state->crtc_w = new_state->crtc_w;
6505 plane->state->crtc_h = new_state->crtc_h;
6507 handle_cursor_update(plane, old_state);
6510 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6511 .prepare_fb = dm_plane_helper_prepare_fb,
6512 .cleanup_fb = dm_plane_helper_cleanup_fb,
6513 .atomic_check = dm_plane_atomic_check,
6514 .atomic_async_check = dm_plane_atomic_async_check,
6515 .atomic_async_update = dm_plane_atomic_async_update
6519 * TODO: these are currently initialized to rgb formats only.
6520 * For future use cases we should either initialize them dynamically based on
6521 * plane capabilities, or initialize this array to all formats, so internal drm
6522 * check will succeed, and let DC implement proper check
6524 static const uint32_t rgb_formats[] = {
6525 DRM_FORMAT_XRGB8888,
6526 DRM_FORMAT_ARGB8888,
6527 DRM_FORMAT_RGBA8888,
6528 DRM_FORMAT_XRGB2101010,
6529 DRM_FORMAT_XBGR2101010,
6530 DRM_FORMAT_ARGB2101010,
6531 DRM_FORMAT_ABGR2101010,
6532 DRM_FORMAT_XBGR8888,
6533 DRM_FORMAT_ABGR8888,
6537 static const uint32_t overlay_formats[] = {
6538 DRM_FORMAT_XRGB8888,
6539 DRM_FORMAT_ARGB8888,
6540 DRM_FORMAT_RGBA8888,
6541 DRM_FORMAT_XBGR8888,
6542 DRM_FORMAT_ABGR8888,
6546 static const u32 cursor_formats[] = {
6550 static int get_plane_formats(const struct drm_plane *plane,
6551 const struct dc_plane_cap *plane_cap,
6552 uint32_t *formats, int max_formats)
6554 int i, num_formats = 0;
6557 * TODO: Query support for each group of formats directly from
6558 * DC plane caps. This will require adding more formats to the
6562 switch (plane->type) {
6563 case DRM_PLANE_TYPE_PRIMARY:
6564 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6565 if (num_formats >= max_formats)
6568 formats[num_formats++] = rgb_formats[i];
6571 if (plane_cap && plane_cap->pixel_format_support.nv12)
6572 formats[num_formats++] = DRM_FORMAT_NV12;
6573 if (plane_cap && plane_cap->pixel_format_support.p010)
6574 formats[num_formats++] = DRM_FORMAT_P010;
6575 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6576 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6577 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6578 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6579 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6583 case DRM_PLANE_TYPE_OVERLAY:
6584 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6585 if (num_formats >= max_formats)
6588 formats[num_formats++] = overlay_formats[i];
6592 case DRM_PLANE_TYPE_CURSOR:
6593 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6594 if (num_formats >= max_formats)
6597 formats[num_formats++] = cursor_formats[i];
6605 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6606 struct drm_plane *plane,
6607 unsigned long possible_crtcs,
6608 const struct dc_plane_cap *plane_cap)
6610 uint32_t formats[32];
6613 unsigned int supported_rotations;
6614 uint64_t *modifiers = NULL;
6616 num_formats = get_plane_formats(plane, plane_cap, formats,
6617 ARRAY_SIZE(formats));
6619 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6623 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6624 &dm_plane_funcs, formats, num_formats,
6625 modifiers, plane->type, NULL);
6630 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6631 plane_cap && plane_cap->per_pixel_alpha) {
6632 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6633 BIT(DRM_MODE_BLEND_PREMULTI);
6635 drm_plane_create_alpha_property(plane);
6636 drm_plane_create_blend_mode_property(plane, blend_caps);
6639 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6641 (plane_cap->pixel_format_support.nv12 ||
6642 plane_cap->pixel_format_support.p010)) {
6643 /* This only affects YUV formats. */
6644 drm_plane_create_color_properties(
6646 BIT(DRM_COLOR_YCBCR_BT601) |
6647 BIT(DRM_COLOR_YCBCR_BT709) |
6648 BIT(DRM_COLOR_YCBCR_BT2020),
6649 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6650 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6651 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6654 supported_rotations =
6655 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6656 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6658 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6659 plane->type != DRM_PLANE_TYPE_CURSOR)
6660 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6661 supported_rotations);
6663 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6665 /* Create (reset) the plane state */
6666 if (plane->funcs->reset)
6667 plane->funcs->reset(plane);
6672 #ifdef CONFIG_DEBUG_FS
6673 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6674 struct amdgpu_crtc *acrtc)
6676 drm_object_attach_property(&acrtc->base.base,
6677 dm->crc_win_x_start_property,
6679 drm_object_attach_property(&acrtc->base.base,
6680 dm->crc_win_y_start_property,
6682 drm_object_attach_property(&acrtc->base.base,
6683 dm->crc_win_x_end_property,
6685 drm_object_attach_property(&acrtc->base.base,
6686 dm->crc_win_y_end_property,
6691 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6692 struct drm_plane *plane,
6693 uint32_t crtc_index)
6695 struct amdgpu_crtc *acrtc = NULL;
6696 struct drm_plane *cursor_plane;
6700 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6704 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6705 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6707 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6711 res = drm_crtc_init_with_planes(
6716 &amdgpu_dm_crtc_funcs, NULL);
6721 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6723 /* Create (reset) the plane state */
6724 if (acrtc->base.funcs->reset)
6725 acrtc->base.funcs->reset(&acrtc->base);
6727 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6728 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6730 acrtc->crtc_id = crtc_index;
6731 acrtc->base.enabled = false;
6732 acrtc->otg_inst = -1;
6734 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6735 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6736 true, MAX_COLOR_LUT_ENTRIES);
6737 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6738 #ifdef CONFIG_DEBUG_FS
6739 attach_crtc_crc_properties(dm, acrtc);
6745 kfree(cursor_plane);
6750 static int to_drm_connector_type(enum signal_type st)
6753 case SIGNAL_TYPE_HDMI_TYPE_A:
6754 return DRM_MODE_CONNECTOR_HDMIA;
6755 case SIGNAL_TYPE_EDP:
6756 return DRM_MODE_CONNECTOR_eDP;
6757 case SIGNAL_TYPE_LVDS:
6758 return DRM_MODE_CONNECTOR_LVDS;
6759 case SIGNAL_TYPE_RGB:
6760 return DRM_MODE_CONNECTOR_VGA;
6761 case SIGNAL_TYPE_DISPLAY_PORT:
6762 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6763 return DRM_MODE_CONNECTOR_DisplayPort;
6764 case SIGNAL_TYPE_DVI_DUAL_LINK:
6765 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6766 return DRM_MODE_CONNECTOR_DVID;
6767 case SIGNAL_TYPE_VIRTUAL:
6768 return DRM_MODE_CONNECTOR_VIRTUAL;
6771 return DRM_MODE_CONNECTOR_Unknown;
6775 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6777 struct drm_encoder *encoder;
6779 /* There is only one encoder per connector */
6780 drm_connector_for_each_possible_encoder(connector, encoder)
6786 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6788 struct drm_encoder *encoder;
6789 struct amdgpu_encoder *amdgpu_encoder;
6791 encoder = amdgpu_dm_connector_to_encoder(connector);
6793 if (encoder == NULL)
6796 amdgpu_encoder = to_amdgpu_encoder(encoder);
6798 amdgpu_encoder->native_mode.clock = 0;
6800 if (!list_empty(&connector->probed_modes)) {
6801 struct drm_display_mode *preferred_mode = NULL;
6803 list_for_each_entry(preferred_mode,
6804 &connector->probed_modes,
6806 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6807 amdgpu_encoder->native_mode = *preferred_mode;
6815 static struct drm_display_mode *
6816 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6818 int hdisplay, int vdisplay)
6820 struct drm_device *dev = encoder->dev;
6821 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6822 struct drm_display_mode *mode = NULL;
6823 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6825 mode = drm_mode_duplicate(dev, native_mode);
6830 mode->hdisplay = hdisplay;
6831 mode->vdisplay = vdisplay;
6832 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6833 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6839 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6840 struct drm_connector *connector)
6842 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6843 struct drm_display_mode *mode = NULL;
6844 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6845 struct amdgpu_dm_connector *amdgpu_dm_connector =
6846 to_amdgpu_dm_connector(connector);
6850 char name[DRM_DISPLAY_MODE_LEN];
6853 } common_modes[] = {
6854 { "640x480", 640, 480},
6855 { "800x600", 800, 600},
6856 { "1024x768", 1024, 768},
6857 { "1280x720", 1280, 720},
6858 { "1280x800", 1280, 800},
6859 {"1280x1024", 1280, 1024},
6860 { "1440x900", 1440, 900},
6861 {"1680x1050", 1680, 1050},
6862 {"1600x1200", 1600, 1200},
6863 {"1920x1080", 1920, 1080},
6864 {"1920x1200", 1920, 1200}
6867 n = ARRAY_SIZE(common_modes);
6869 for (i = 0; i < n; i++) {
6870 struct drm_display_mode *curmode = NULL;
6871 bool mode_existed = false;
6873 if (common_modes[i].w > native_mode->hdisplay ||
6874 common_modes[i].h > native_mode->vdisplay ||
6875 (common_modes[i].w == native_mode->hdisplay &&
6876 common_modes[i].h == native_mode->vdisplay))
6879 list_for_each_entry(curmode, &connector->probed_modes, head) {
6880 if (common_modes[i].w == curmode->hdisplay &&
6881 common_modes[i].h == curmode->vdisplay) {
6882 mode_existed = true;
6890 mode = amdgpu_dm_create_common_mode(encoder,
6891 common_modes[i].name, common_modes[i].w,
6893 drm_mode_probed_add(connector, mode);
6894 amdgpu_dm_connector->num_modes++;
6898 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6901 struct amdgpu_dm_connector *amdgpu_dm_connector =
6902 to_amdgpu_dm_connector(connector);
6905 /* empty probed_modes */
6906 INIT_LIST_HEAD(&connector->probed_modes);
6907 amdgpu_dm_connector->num_modes =
6908 drm_add_edid_modes(connector, edid);
6910 /* sorting the probed modes before calling function
6911 * amdgpu_dm_get_native_mode() since EDID can have
6912 * more than one preferred mode. The modes that are
6913 * later in the probed mode list could be of higher
6914 * and preferred resolution. For example, 3840x2160
6915 * resolution in base EDID preferred timing and 4096x2160
6916 * preferred resolution in DID extension block later.
6918 drm_mode_sort(&connector->probed_modes);
6919 amdgpu_dm_get_native_mode(connector);
6921 amdgpu_dm_connector->num_modes = 0;
6925 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6927 struct amdgpu_dm_connector *amdgpu_dm_connector =
6928 to_amdgpu_dm_connector(connector);
6929 struct drm_encoder *encoder;
6930 struct edid *edid = amdgpu_dm_connector->edid;
6932 encoder = amdgpu_dm_connector_to_encoder(connector);
6934 if (!drm_edid_is_valid(edid)) {
6935 amdgpu_dm_connector->num_modes =
6936 drm_add_modes_noedid(connector, 640, 480);
6938 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6939 amdgpu_dm_connector_add_common_modes(encoder, connector);
6941 amdgpu_dm_fbc_init(connector);
6943 return amdgpu_dm_connector->num_modes;
6946 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6947 struct amdgpu_dm_connector *aconnector,
6949 struct dc_link *link,
6952 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6955 * Some of the properties below require access to state, like bpc.
6956 * Allocate some default initial connector state with our reset helper.
6958 if (aconnector->base.funcs->reset)
6959 aconnector->base.funcs->reset(&aconnector->base);
6961 aconnector->connector_id = link_index;
6962 aconnector->dc_link = link;
6963 aconnector->base.interlace_allowed = false;
6964 aconnector->base.doublescan_allowed = false;
6965 aconnector->base.stereo_allowed = false;
6966 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6967 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6968 aconnector->audio_inst = -1;
6969 mutex_init(&aconnector->hpd_lock);
6972 * configure support HPD hot plug connector_>polled default value is 0
6973 * which means HPD hot plug not supported
6975 switch (connector_type) {
6976 case DRM_MODE_CONNECTOR_HDMIA:
6977 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6978 aconnector->base.ycbcr_420_allowed =
6979 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6981 case DRM_MODE_CONNECTOR_DisplayPort:
6982 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6983 aconnector->base.ycbcr_420_allowed =
6984 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6986 case DRM_MODE_CONNECTOR_DVID:
6987 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6993 drm_object_attach_property(&aconnector->base.base,
6994 dm->ddev->mode_config.scaling_mode_property,
6995 DRM_MODE_SCALE_NONE);
6997 drm_object_attach_property(&aconnector->base.base,
6998 adev->mode_info.underscan_property,
7000 drm_object_attach_property(&aconnector->base.base,
7001 adev->mode_info.underscan_hborder_property,
7003 drm_object_attach_property(&aconnector->base.base,
7004 adev->mode_info.underscan_vborder_property,
7007 if (!aconnector->mst_port)
7008 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7010 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7011 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7012 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7014 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7015 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7016 drm_object_attach_property(&aconnector->base.base,
7017 adev->mode_info.abm_level_property, 0);
7020 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7021 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7022 connector_type == DRM_MODE_CONNECTOR_eDP) {
7023 drm_object_attach_property(
7024 &aconnector->base.base,
7025 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7027 if (!aconnector->mst_port)
7028 drm_connector_attach_vrr_capable_property(&aconnector->base);
7030 #ifdef CONFIG_DRM_AMD_DC_HDCP
7031 if (adev->dm.hdcp_workqueue)
7032 drm_connector_attach_content_protection_property(&aconnector->base, true);
7037 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7038 struct i2c_msg *msgs, int num)
7040 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7041 struct ddc_service *ddc_service = i2c->ddc_service;
7042 struct i2c_command cmd;
7046 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7051 cmd.number_of_payloads = num;
7052 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7055 for (i = 0; i < num; i++) {
7056 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7057 cmd.payloads[i].address = msgs[i].addr;
7058 cmd.payloads[i].length = msgs[i].len;
7059 cmd.payloads[i].data = msgs[i].buf;
7063 ddc_service->ctx->dc,
7064 ddc_service->ddc_pin->hw_info.ddc_channel,
7068 kfree(cmd.payloads);
7072 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7074 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7077 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7078 .master_xfer = amdgpu_dm_i2c_xfer,
7079 .functionality = amdgpu_dm_i2c_func,
7082 static struct amdgpu_i2c_adapter *
7083 create_i2c(struct ddc_service *ddc_service,
7087 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7088 struct amdgpu_i2c_adapter *i2c;
7090 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7093 i2c->base.owner = THIS_MODULE;
7094 i2c->base.class = I2C_CLASS_DDC;
7095 i2c->base.dev.parent = &adev->pdev->dev;
7096 i2c->base.algo = &amdgpu_dm_i2c_algo;
7097 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7098 i2c_set_adapdata(&i2c->base, i2c);
7099 i2c->ddc_service = ddc_service;
7100 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7107 * Note: this function assumes that dc_link_detect() was called for the
7108 * dc_link which will be represented by this aconnector.
7110 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7111 struct amdgpu_dm_connector *aconnector,
7112 uint32_t link_index,
7113 struct amdgpu_encoder *aencoder)
7117 struct dc *dc = dm->dc;
7118 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7119 struct amdgpu_i2c_adapter *i2c;
7121 link->priv = aconnector;
7123 DRM_DEBUG_DRIVER("%s()\n", __func__);
7125 i2c = create_i2c(link->ddc, link->link_index, &res);
7127 DRM_ERROR("Failed to create i2c adapter data\n");
7131 aconnector->i2c = i2c;
7132 res = i2c_add_adapter(&i2c->base);
7135 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7139 connector_type = to_drm_connector_type(link->connector_signal);
7141 res = drm_connector_init_with_ddc(
7144 &amdgpu_dm_connector_funcs,
7149 DRM_ERROR("connector_init failed\n");
7150 aconnector->connector_id = -1;
7154 drm_connector_helper_add(
7156 &amdgpu_dm_connector_helper_funcs);
7158 amdgpu_dm_connector_init_helper(
7165 drm_connector_attach_encoder(
7166 &aconnector->base, &aencoder->base);
7168 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7169 || connector_type == DRM_MODE_CONNECTOR_eDP)
7170 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7175 aconnector->i2c = NULL;
7180 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7182 switch (adev->mode_info.num_crtc) {
7199 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7200 struct amdgpu_encoder *aencoder,
7201 uint32_t link_index)
7203 struct amdgpu_device *adev = drm_to_adev(dev);
7205 int res = drm_encoder_init(dev,
7207 &amdgpu_dm_encoder_funcs,
7208 DRM_MODE_ENCODER_TMDS,
7211 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7214 aencoder->encoder_id = link_index;
7216 aencoder->encoder_id = -1;
7218 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7223 static void manage_dm_interrupts(struct amdgpu_device *adev,
7224 struct amdgpu_crtc *acrtc,
7228 * We have no guarantee that the frontend index maps to the same
7229 * backend index - some even map to more than one.
7231 * TODO: Use a different interrupt or check DC itself for the mapping.
7234 amdgpu_display_crtc_idx_to_irq_type(
7239 drm_crtc_vblank_on(&acrtc->base);
7242 &adev->pageflip_irq,
7248 &adev->pageflip_irq,
7250 drm_crtc_vblank_off(&acrtc->base);
7254 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7255 struct amdgpu_crtc *acrtc)
7258 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7261 * This reads the current state for the IRQ and force reapplies
7262 * the setting to hardware.
7264 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7268 is_scaling_state_different(const struct dm_connector_state *dm_state,
7269 const struct dm_connector_state *old_dm_state)
7271 if (dm_state->scaling != old_dm_state->scaling)
7273 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7274 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7276 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7277 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7279 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7280 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7285 #ifdef CONFIG_DRM_AMD_DC_HDCP
7286 static bool is_content_protection_different(struct drm_connector_state *state,
7287 const struct drm_connector_state *old_state,
7288 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7290 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7291 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7293 /* Handle: Type0/1 change */
7294 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7295 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7296 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7300 /* CP is being re enabled, ignore this
7302 * Handles: ENABLED -> DESIRED
7304 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7305 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7306 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7310 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7312 * Handles: UNDESIRED -> ENABLED
7314 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7315 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7316 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7318 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7319 * hot-plug, headless s3, dpms
7321 * Handles: DESIRED -> DESIRED (Special case)
7323 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7324 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7325 dm_con_state->update_hdcp = false;
7330 * Handles: UNDESIRED -> UNDESIRED
7331 * DESIRED -> DESIRED
7332 * ENABLED -> ENABLED
7334 if (old_state->content_protection == state->content_protection)
7338 * Handles: UNDESIRED -> DESIRED
7339 * DESIRED -> UNDESIRED
7340 * ENABLED -> UNDESIRED
7342 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7346 * Handles: DESIRED -> ENABLED
7352 static void remove_stream(struct amdgpu_device *adev,
7353 struct amdgpu_crtc *acrtc,
7354 struct dc_stream_state *stream)
7356 /* this is the update mode case */
7358 acrtc->otg_inst = -1;
7359 acrtc->enabled = false;
7362 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7363 struct dc_cursor_position *position)
7365 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7367 int xorigin = 0, yorigin = 0;
7369 position->enable = false;
7373 if (!crtc || !plane->state->fb)
7376 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7377 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7378 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7380 plane->state->crtc_w,
7381 plane->state->crtc_h);
7385 x = plane->state->crtc_x;
7386 y = plane->state->crtc_y;
7388 if (x <= -amdgpu_crtc->max_cursor_width ||
7389 y <= -amdgpu_crtc->max_cursor_height)
7393 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7397 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7400 position->enable = true;
7401 position->translate_by_source = true;
7404 position->x_hotspot = xorigin;
7405 position->y_hotspot = yorigin;
7410 static void handle_cursor_update(struct drm_plane *plane,
7411 struct drm_plane_state *old_plane_state)
7413 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7414 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7415 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7416 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7417 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7418 uint64_t address = afb ? afb->address : 0;
7419 struct dc_cursor_position position;
7420 struct dc_cursor_attributes attributes;
7423 if (!plane->state->fb && !old_plane_state->fb)
7426 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7428 amdgpu_crtc->crtc_id,
7429 plane->state->crtc_w,
7430 plane->state->crtc_h);
7432 ret = get_cursor_position(plane, crtc, &position);
7436 if (!position.enable) {
7437 /* turn off cursor */
7438 if (crtc_state && crtc_state->stream) {
7439 mutex_lock(&adev->dm.dc_lock);
7440 dc_stream_set_cursor_position(crtc_state->stream,
7442 mutex_unlock(&adev->dm.dc_lock);
7447 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7448 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7450 memset(&attributes, 0, sizeof(attributes));
7451 attributes.address.high_part = upper_32_bits(address);
7452 attributes.address.low_part = lower_32_bits(address);
7453 attributes.width = plane->state->crtc_w;
7454 attributes.height = plane->state->crtc_h;
7455 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7456 attributes.rotation_angle = 0;
7457 attributes.attribute_flags.value = 0;
7459 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7461 if (crtc_state->stream) {
7462 mutex_lock(&adev->dm.dc_lock);
7463 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7465 DRM_ERROR("DC failed to set cursor attributes\n");
7467 if (!dc_stream_set_cursor_position(crtc_state->stream,
7469 DRM_ERROR("DC failed to set cursor position\n");
7470 mutex_unlock(&adev->dm.dc_lock);
7474 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7477 assert_spin_locked(&acrtc->base.dev->event_lock);
7478 WARN_ON(acrtc->event);
7480 acrtc->event = acrtc->base.state->event;
7482 /* Set the flip status */
7483 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7485 /* Mark this event as consumed */
7486 acrtc->base.state->event = NULL;
7488 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7492 static void update_freesync_state_on_stream(
7493 struct amdgpu_display_manager *dm,
7494 struct dm_crtc_state *new_crtc_state,
7495 struct dc_stream_state *new_stream,
7496 struct dc_plane_state *surface,
7497 u32 flip_timestamp_in_us)
7499 struct mod_vrr_params vrr_params;
7500 struct dc_info_packet vrr_infopacket = {0};
7501 struct amdgpu_device *adev = dm->adev;
7502 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7503 unsigned long flags;
7509 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7510 * For now it's sufficient to just guard against these conditions.
7513 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7516 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7517 vrr_params = acrtc->dm_irq_params.vrr_params;
7520 mod_freesync_handle_preflip(
7521 dm->freesync_module,
7524 flip_timestamp_in_us,
7527 if (adev->family < AMDGPU_FAMILY_AI &&
7528 amdgpu_dm_vrr_active(new_crtc_state)) {
7529 mod_freesync_handle_v_update(dm->freesync_module,
7530 new_stream, &vrr_params);
7532 /* Need to call this before the frame ends. */
7533 dc_stream_adjust_vmin_vmax(dm->dc,
7534 new_crtc_state->stream,
7535 &vrr_params.adjust);
7539 mod_freesync_build_vrr_infopacket(
7540 dm->freesync_module,
7544 TRANSFER_FUNC_UNKNOWN,
7547 new_crtc_state->freesync_timing_changed |=
7548 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7550 sizeof(vrr_params.adjust)) != 0);
7552 new_crtc_state->freesync_vrr_info_changed |=
7553 (memcmp(&new_crtc_state->vrr_infopacket,
7555 sizeof(vrr_infopacket)) != 0);
7557 acrtc->dm_irq_params.vrr_params = vrr_params;
7558 new_crtc_state->vrr_infopacket = vrr_infopacket;
7560 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7561 new_stream->vrr_infopacket = vrr_infopacket;
7563 if (new_crtc_state->freesync_vrr_info_changed)
7564 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7565 new_crtc_state->base.crtc->base.id,
7566 (int)new_crtc_state->base.vrr_enabled,
7567 (int)vrr_params.state);
7569 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7572 static void update_stream_irq_parameters(
7573 struct amdgpu_display_manager *dm,
7574 struct dm_crtc_state *new_crtc_state)
7576 struct dc_stream_state *new_stream = new_crtc_state->stream;
7577 struct mod_vrr_params vrr_params;
7578 struct mod_freesync_config config = new_crtc_state->freesync_config;
7579 struct amdgpu_device *adev = dm->adev;
7580 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7581 unsigned long flags;
7587 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7588 * For now it's sufficient to just guard against these conditions.
7590 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7593 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7594 vrr_params = acrtc->dm_irq_params.vrr_params;
7596 if (new_crtc_state->vrr_supported &&
7597 config.min_refresh_in_uhz &&
7598 config.max_refresh_in_uhz) {
7599 config.state = new_crtc_state->base.vrr_enabled ?
7600 VRR_STATE_ACTIVE_VARIABLE :
7603 config.state = VRR_STATE_UNSUPPORTED;
7606 mod_freesync_build_vrr_params(dm->freesync_module,
7608 &config, &vrr_params);
7610 new_crtc_state->freesync_timing_changed |=
7611 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7612 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7614 new_crtc_state->freesync_config = config;
7615 /* Copy state for access from DM IRQ handler */
7616 acrtc->dm_irq_params.freesync_config = config;
7617 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7618 acrtc->dm_irq_params.vrr_params = vrr_params;
7619 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7622 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7623 struct dm_crtc_state *new_state)
7625 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7626 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7628 if (!old_vrr_active && new_vrr_active) {
7629 /* Transition VRR inactive -> active:
7630 * While VRR is active, we must not disable vblank irq, as a
7631 * reenable after disable would compute bogus vblank/pflip
7632 * timestamps if it likely happened inside display front-porch.
7634 * We also need vupdate irq for the actual core vblank handling
7637 dm_set_vupdate_irq(new_state->base.crtc, true);
7638 drm_crtc_vblank_get(new_state->base.crtc);
7639 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7640 __func__, new_state->base.crtc->base.id);
7641 } else if (old_vrr_active && !new_vrr_active) {
7642 /* Transition VRR active -> inactive:
7643 * Allow vblank irq disable again for fixed refresh rate.
7645 dm_set_vupdate_irq(new_state->base.crtc, false);
7646 drm_crtc_vblank_put(new_state->base.crtc);
7647 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7648 __func__, new_state->base.crtc->base.id);
7652 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7654 struct drm_plane *plane;
7655 struct drm_plane_state *old_plane_state, *new_plane_state;
7659 * TODO: Make this per-stream so we don't issue redundant updates for
7660 * commits with multiple streams.
7662 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7664 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7665 handle_cursor_update(plane, old_plane_state);
7668 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7669 struct dc_state *dc_state,
7670 struct drm_device *dev,
7671 struct amdgpu_display_manager *dm,
7672 struct drm_crtc *pcrtc,
7673 bool wait_for_vblank)
7676 uint64_t timestamp_ns;
7677 struct drm_plane *plane;
7678 struct drm_plane_state *old_plane_state, *new_plane_state;
7679 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7680 struct drm_crtc_state *new_pcrtc_state =
7681 drm_atomic_get_new_crtc_state(state, pcrtc);
7682 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7683 struct dm_crtc_state *dm_old_crtc_state =
7684 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7685 int planes_count = 0, vpos, hpos;
7687 unsigned long flags;
7688 struct amdgpu_bo *abo;
7689 uint32_t target_vblank, last_flip_vblank;
7690 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7691 bool pflip_present = false;
7693 struct dc_surface_update surface_updates[MAX_SURFACES];
7694 struct dc_plane_info plane_infos[MAX_SURFACES];
7695 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7696 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7697 struct dc_stream_update stream_update;
7700 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7703 dm_error("Failed to allocate update bundle\n");
7708 * Disable the cursor first if we're disabling all the planes.
7709 * It'll remain on the screen after the planes are re-enabled
7712 if (acrtc_state->active_planes == 0)
7713 amdgpu_dm_commit_cursors(state);
7715 /* update planes when needed */
7716 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7717 struct drm_crtc *crtc = new_plane_state->crtc;
7718 struct drm_crtc_state *new_crtc_state;
7719 struct drm_framebuffer *fb = new_plane_state->fb;
7720 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7721 bool plane_needs_flip;
7722 struct dc_plane_state *dc_plane;
7723 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7725 /* Cursor plane is handled after stream updates */
7726 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7729 if (!fb || !crtc || pcrtc != crtc)
7732 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7733 if (!new_crtc_state->active)
7736 dc_plane = dm_new_plane_state->dc_state;
7738 bundle->surface_updates[planes_count].surface = dc_plane;
7739 if (new_pcrtc_state->color_mgmt_changed) {
7740 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7741 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7742 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7745 fill_dc_scaling_info(new_plane_state,
7746 &bundle->scaling_infos[planes_count]);
7748 bundle->surface_updates[planes_count].scaling_info =
7749 &bundle->scaling_infos[planes_count];
7751 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7753 pflip_present = pflip_present || plane_needs_flip;
7755 if (!plane_needs_flip) {
7760 abo = gem_to_amdgpu_bo(fb->obj[0]);
7763 * Wait for all fences on this FB. Do limited wait to avoid
7764 * deadlock during GPU reset when this fence will not signal
7765 * but we hold reservation lock for the BO.
7767 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7769 msecs_to_jiffies(5000));
7770 if (unlikely(r <= 0))
7771 DRM_ERROR("Waiting for fences timed out!");
7773 fill_dc_plane_info_and_addr(
7774 dm->adev, new_plane_state,
7776 &bundle->plane_infos[planes_count],
7777 &bundle->flip_addrs[planes_count].address,
7778 afb->tmz_surface, false);
7780 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7781 new_plane_state->plane->index,
7782 bundle->plane_infos[planes_count].dcc.enable);
7784 bundle->surface_updates[planes_count].plane_info =
7785 &bundle->plane_infos[planes_count];
7788 * Only allow immediate flips for fast updates that don't
7789 * change FB pitch, DCC state, rotation or mirroing.
7791 bundle->flip_addrs[planes_count].flip_immediate =
7792 crtc->state->async_flip &&
7793 acrtc_state->update_type == UPDATE_TYPE_FAST;
7795 timestamp_ns = ktime_get_ns();
7796 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7797 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7798 bundle->surface_updates[planes_count].surface = dc_plane;
7800 if (!bundle->surface_updates[planes_count].surface) {
7801 DRM_ERROR("No surface for CRTC: id=%d\n",
7802 acrtc_attach->crtc_id);
7806 if (plane == pcrtc->primary)
7807 update_freesync_state_on_stream(
7810 acrtc_state->stream,
7812 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7814 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7816 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7817 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7823 if (pflip_present) {
7825 /* Use old throttling in non-vrr fixed refresh rate mode
7826 * to keep flip scheduling based on target vblank counts
7827 * working in a backwards compatible way, e.g., for
7828 * clients using the GLX_OML_sync_control extension or
7829 * DRI3/Present extension with defined target_msc.
7831 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7834 /* For variable refresh rate mode only:
7835 * Get vblank of last completed flip to avoid > 1 vrr
7836 * flips per video frame by use of throttling, but allow
7837 * flip programming anywhere in the possibly large
7838 * variable vrr vblank interval for fine-grained flip
7839 * timing control and more opportunity to avoid stutter
7840 * on late submission of flips.
7842 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7843 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7844 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7847 target_vblank = last_flip_vblank + wait_for_vblank;
7850 * Wait until we're out of the vertical blank period before the one
7851 * targeted by the flip
7853 while ((acrtc_attach->enabled &&
7854 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7855 0, &vpos, &hpos, NULL,
7856 NULL, &pcrtc->hwmode)
7857 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7858 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7859 (int)(target_vblank -
7860 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7861 usleep_range(1000, 1100);
7865 * Prepare the flip event for the pageflip interrupt to handle.
7867 * This only works in the case where we've already turned on the
7868 * appropriate hardware blocks (eg. HUBP) so in the transition case
7869 * from 0 -> n planes we have to skip a hardware generated event
7870 * and rely on sending it from software.
7872 if (acrtc_attach->base.state->event &&
7873 acrtc_state->active_planes > 0) {
7874 drm_crtc_vblank_get(pcrtc);
7876 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7878 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7879 prepare_flip_isr(acrtc_attach);
7881 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7884 if (acrtc_state->stream) {
7885 if (acrtc_state->freesync_vrr_info_changed)
7886 bundle->stream_update.vrr_infopacket =
7887 &acrtc_state->stream->vrr_infopacket;
7891 /* Update the planes if changed or disable if we don't have any. */
7892 if ((planes_count || acrtc_state->active_planes == 0) &&
7893 acrtc_state->stream) {
7894 bundle->stream_update.stream = acrtc_state->stream;
7895 if (new_pcrtc_state->mode_changed) {
7896 bundle->stream_update.src = acrtc_state->stream->src;
7897 bundle->stream_update.dst = acrtc_state->stream->dst;
7900 if (new_pcrtc_state->color_mgmt_changed) {
7902 * TODO: This isn't fully correct since we've actually
7903 * already modified the stream in place.
7905 bundle->stream_update.gamut_remap =
7906 &acrtc_state->stream->gamut_remap_matrix;
7907 bundle->stream_update.output_csc_transform =
7908 &acrtc_state->stream->csc_color_matrix;
7909 bundle->stream_update.out_transfer_func =
7910 acrtc_state->stream->out_transfer_func;
7913 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7914 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7915 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7918 * If FreeSync state on the stream has changed then we need to
7919 * re-adjust the min/max bounds now that DC doesn't handle this
7920 * as part of commit.
7922 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7923 amdgpu_dm_vrr_active(acrtc_state)) {
7924 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7925 dc_stream_adjust_vmin_vmax(
7926 dm->dc, acrtc_state->stream,
7927 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7928 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7930 mutex_lock(&dm->dc_lock);
7931 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7932 acrtc_state->stream->link->psr_settings.psr_allow_active)
7933 amdgpu_dm_psr_disable(acrtc_state->stream);
7935 dc_commit_updates_for_stream(dm->dc,
7936 bundle->surface_updates,
7938 acrtc_state->stream,
7939 &bundle->stream_update,
7943 * Enable or disable the interrupts on the backend.
7945 * Most pipes are put into power gating when unused.
7947 * When power gating is enabled on a pipe we lose the
7948 * interrupt enablement state when power gating is disabled.
7950 * So we need to update the IRQ control state in hardware
7951 * whenever the pipe turns on (since it could be previously
7952 * power gated) or off (since some pipes can't be power gated
7955 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7956 dm_update_pflip_irq_state(drm_to_adev(dev),
7959 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7960 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7961 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7962 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7963 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7964 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7965 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7966 amdgpu_dm_psr_enable(acrtc_state->stream);
7969 mutex_unlock(&dm->dc_lock);
7973 * Update cursor state *after* programming all the planes.
7974 * This avoids redundant programming in the case where we're going
7975 * to be disabling a single plane - those pipes are being disabled.
7977 if (acrtc_state->active_planes)
7978 amdgpu_dm_commit_cursors(state);
7984 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7985 struct drm_atomic_state *state)
7987 struct amdgpu_device *adev = drm_to_adev(dev);
7988 struct amdgpu_dm_connector *aconnector;
7989 struct drm_connector *connector;
7990 struct drm_connector_state *old_con_state, *new_con_state;
7991 struct drm_crtc_state *new_crtc_state;
7992 struct dm_crtc_state *new_dm_crtc_state;
7993 const struct dc_stream_status *status;
7996 /* Notify device removals. */
7997 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7998 if (old_con_state->crtc != new_con_state->crtc) {
7999 /* CRTC changes require notification. */
8003 if (!new_con_state->crtc)
8006 new_crtc_state = drm_atomic_get_new_crtc_state(
8007 state, new_con_state->crtc);
8009 if (!new_crtc_state)
8012 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8016 aconnector = to_amdgpu_dm_connector(connector);
8018 mutex_lock(&adev->dm.audio_lock);
8019 inst = aconnector->audio_inst;
8020 aconnector->audio_inst = -1;
8021 mutex_unlock(&adev->dm.audio_lock);
8023 amdgpu_dm_audio_eld_notify(adev, inst);
8026 /* Notify audio device additions. */
8027 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8028 if (!new_con_state->crtc)
8031 new_crtc_state = drm_atomic_get_new_crtc_state(
8032 state, new_con_state->crtc);
8034 if (!new_crtc_state)
8037 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8040 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8041 if (!new_dm_crtc_state->stream)
8044 status = dc_stream_get_status(new_dm_crtc_state->stream);
8048 aconnector = to_amdgpu_dm_connector(connector);
8050 mutex_lock(&adev->dm.audio_lock);
8051 inst = status->audio_inst;
8052 aconnector->audio_inst = inst;
8053 mutex_unlock(&adev->dm.audio_lock);
8055 amdgpu_dm_audio_eld_notify(adev, inst);
8060 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8061 * @crtc_state: the DRM CRTC state
8062 * @stream_state: the DC stream state.
8064 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8065 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8067 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8068 struct dc_stream_state *stream_state)
8070 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8073 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
8074 struct drm_atomic_state *state,
8078 * Add check here for SoC's that support hardware cursor plane, to
8079 * unset legacy_cursor_update
8082 return drm_atomic_helper_commit(dev, state, nonblock);
8084 /*TODO Handle EINTR, reenable IRQ*/
8088 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8089 * @state: The atomic state to commit
8091 * This will tell DC to commit the constructed DC state from atomic_check,
8092 * programming the hardware. Any failures here implies a hardware failure, since
8093 * atomic check should have filtered anything non-kosher.
8095 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8097 struct drm_device *dev = state->dev;
8098 struct amdgpu_device *adev = drm_to_adev(dev);
8099 struct amdgpu_display_manager *dm = &adev->dm;
8100 struct dm_atomic_state *dm_state;
8101 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8103 struct drm_crtc *crtc;
8104 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8105 unsigned long flags;
8106 bool wait_for_vblank = true;
8107 struct drm_connector *connector;
8108 struct drm_connector_state *old_con_state, *new_con_state;
8109 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8110 int crtc_disable_count = 0;
8111 bool mode_set_reset_required = false;
8113 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8115 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8117 dm_state = dm_atomic_get_new_state(state);
8118 if (dm_state && dm_state->context) {
8119 dc_state = dm_state->context;
8121 /* No state changes, retain current state. */
8122 dc_state_temp = dc_create_state(dm->dc);
8123 ASSERT(dc_state_temp);
8124 dc_state = dc_state_temp;
8125 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8128 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8129 new_crtc_state, i) {
8130 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8132 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8134 if (old_crtc_state->active &&
8135 (!new_crtc_state->active ||
8136 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8137 manage_dm_interrupts(adev, acrtc, false);
8138 dc_stream_release(dm_old_crtc_state->stream);
8142 drm_atomic_helper_calc_timestamping_constants(state);
8144 /* update changed items */
8145 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8146 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8148 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8149 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8152 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8153 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8154 "connectors_changed:%d\n",
8156 new_crtc_state->enable,
8157 new_crtc_state->active,
8158 new_crtc_state->planes_changed,
8159 new_crtc_state->mode_changed,
8160 new_crtc_state->active_changed,
8161 new_crtc_state->connectors_changed);
8163 /* Disable cursor if disabling crtc */
8164 if (old_crtc_state->active && !new_crtc_state->active) {
8165 struct dc_cursor_position position;
8167 memset(&position, 0, sizeof(position));
8168 mutex_lock(&dm->dc_lock);
8169 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8170 mutex_unlock(&dm->dc_lock);
8173 /* Copy all transient state flags into dc state */
8174 if (dm_new_crtc_state->stream) {
8175 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8176 dm_new_crtc_state->stream);
8179 /* handles headless hotplug case, updating new_state and
8180 * aconnector as needed
8183 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8185 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8187 if (!dm_new_crtc_state->stream) {
8189 * this could happen because of issues with
8190 * userspace notifications delivery.
8191 * In this case userspace tries to set mode on
8192 * display which is disconnected in fact.
8193 * dc_sink is NULL in this case on aconnector.
8194 * We expect reset mode will come soon.
8196 * This can also happen when unplug is done
8197 * during resume sequence ended
8199 * In this case, we want to pretend we still
8200 * have a sink to keep the pipe running so that
8201 * hw state is consistent with the sw state
8203 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8204 __func__, acrtc->base.base.id);
8208 if (dm_old_crtc_state->stream)
8209 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8211 pm_runtime_get_noresume(dev->dev);
8213 acrtc->enabled = true;
8214 acrtc->hw_mode = new_crtc_state->mode;
8215 crtc->hwmode = new_crtc_state->mode;
8216 mode_set_reset_required = true;
8217 } else if (modereset_required(new_crtc_state)) {
8218 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8219 /* i.e. reset mode */
8220 if (dm_old_crtc_state->stream)
8221 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8222 mode_set_reset_required = true;
8224 } /* for_each_crtc_in_state() */
8227 /* if there mode set or reset, disable eDP PSR */
8228 if (mode_set_reset_required)
8229 amdgpu_dm_psr_disable_all(dm);
8231 dm_enable_per_frame_crtc_master_sync(dc_state);
8232 mutex_lock(&dm->dc_lock);
8233 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8234 mutex_unlock(&dm->dc_lock);
8237 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8238 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8240 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8242 if (dm_new_crtc_state->stream != NULL) {
8243 const struct dc_stream_status *status =
8244 dc_stream_get_status(dm_new_crtc_state->stream);
8247 status = dc_stream_get_status_from_state(dc_state,
8248 dm_new_crtc_state->stream);
8250 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8252 acrtc->otg_inst = status->primary_otg_inst;
8255 #ifdef CONFIG_DRM_AMD_DC_HDCP
8256 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8257 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8258 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8259 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8261 new_crtc_state = NULL;
8264 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8266 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8268 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8269 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8270 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8271 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8272 dm_new_con_state->update_hdcp = true;
8276 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8277 hdcp_update_display(
8278 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8279 new_con_state->hdcp_content_type,
8280 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8285 /* Handle connector state changes */
8286 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8287 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8288 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8289 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8290 struct dc_surface_update dummy_updates[MAX_SURFACES];
8291 struct dc_stream_update stream_update;
8292 struct dc_info_packet hdr_packet;
8293 struct dc_stream_status *status = NULL;
8294 bool abm_changed, hdr_changed, scaling_changed;
8296 memset(&dummy_updates, 0, sizeof(dummy_updates));
8297 memset(&stream_update, 0, sizeof(stream_update));
8300 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8301 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8304 /* Skip any modesets/resets */
8305 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8308 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8309 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8311 scaling_changed = is_scaling_state_different(dm_new_con_state,
8314 abm_changed = dm_new_crtc_state->abm_level !=
8315 dm_old_crtc_state->abm_level;
8318 is_hdr_metadata_different(old_con_state, new_con_state);
8320 if (!scaling_changed && !abm_changed && !hdr_changed)
8323 stream_update.stream = dm_new_crtc_state->stream;
8324 if (scaling_changed) {
8325 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8326 dm_new_con_state, dm_new_crtc_state->stream);
8328 stream_update.src = dm_new_crtc_state->stream->src;
8329 stream_update.dst = dm_new_crtc_state->stream->dst;
8333 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8335 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8339 fill_hdr_info_packet(new_con_state, &hdr_packet);
8340 stream_update.hdr_static_metadata = &hdr_packet;
8343 status = dc_stream_get_status(dm_new_crtc_state->stream);
8345 WARN_ON(!status->plane_count);
8348 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8349 * Here we create an empty update on each plane.
8350 * To fix this, DC should permit updating only stream properties.
8352 for (j = 0; j < status->plane_count; j++)
8353 dummy_updates[j].surface = status->plane_states[0];
8356 mutex_lock(&dm->dc_lock);
8357 dc_commit_updates_for_stream(dm->dc,
8359 status->plane_count,
8360 dm_new_crtc_state->stream,
8363 mutex_unlock(&dm->dc_lock);
8366 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8367 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8368 new_crtc_state, i) {
8369 if (old_crtc_state->active && !new_crtc_state->active)
8370 crtc_disable_count++;
8372 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8373 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8375 /* For freesync config update on crtc state and params for irq */
8376 update_stream_irq_parameters(dm, dm_new_crtc_state);
8378 /* Handle vrr on->off / off->on transitions */
8379 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8384 * Enable interrupts for CRTCs that are newly enabled or went through
8385 * a modeset. It was intentionally deferred until after the front end
8386 * state was modified to wait until the OTG was on and so the IRQ
8387 * handlers didn't access stale or invalid state.
8389 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8390 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8391 bool configure_crc = false;
8393 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8395 if (new_crtc_state->active &&
8396 (!old_crtc_state->active ||
8397 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8398 dc_stream_retain(dm_new_crtc_state->stream);
8399 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8400 manage_dm_interrupts(adev, acrtc, true);
8402 #ifdef CONFIG_DEBUG_FS
8403 if (new_crtc_state->active &&
8404 amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8406 * Frontend may have changed so reapply the CRC capture
8407 * settings for the stream.
8409 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8410 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8412 if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8413 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8414 configure_crc = true;
8416 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8417 configure_crc = true;
8421 amdgpu_dm_crtc_configure_crc_source(
8422 crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8427 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8428 if (new_crtc_state->async_flip)
8429 wait_for_vblank = false;
8431 /* update planes when needed per crtc*/
8432 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8433 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8435 if (dm_new_crtc_state->stream)
8436 amdgpu_dm_commit_planes(state, dc_state, dev,
8437 dm, crtc, wait_for_vblank);
8440 /* Update audio instances for each connector. */
8441 amdgpu_dm_commit_audio(dev, state);
8444 * send vblank event on all events not handled in flip and
8445 * mark consumed event for drm_atomic_helper_commit_hw_done
8447 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8448 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8450 if (new_crtc_state->event)
8451 drm_send_event_locked(dev, &new_crtc_state->event->base);
8453 new_crtc_state->event = NULL;
8455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8457 /* Signal HW programming completion */
8458 drm_atomic_helper_commit_hw_done(state);
8460 if (wait_for_vblank)
8461 drm_atomic_helper_wait_for_flip_done(dev, state);
8463 drm_atomic_helper_cleanup_planes(dev, state);
8465 /* return the stolen vga memory back to VRAM */
8466 if (!adev->mman.keep_stolen_vga_memory)
8467 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8468 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8471 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8472 * so we can put the GPU into runtime suspend if we're not driving any
8475 for (i = 0; i < crtc_disable_count; i++)
8476 pm_runtime_put_autosuspend(dev->dev);
8477 pm_runtime_mark_last_busy(dev->dev);
8480 dc_release_state(dc_state_temp);
8484 static int dm_force_atomic_commit(struct drm_connector *connector)
8487 struct drm_device *ddev = connector->dev;
8488 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8489 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8490 struct drm_plane *plane = disconnected_acrtc->base.primary;
8491 struct drm_connector_state *conn_state;
8492 struct drm_crtc_state *crtc_state;
8493 struct drm_plane_state *plane_state;
8498 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8500 /* Construct an atomic state to restore previous display setting */
8503 * Attach connectors to drm_atomic_state
8505 conn_state = drm_atomic_get_connector_state(state, connector);
8507 ret = PTR_ERR_OR_ZERO(conn_state);
8511 /* Attach crtc to drm_atomic_state*/
8512 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8514 ret = PTR_ERR_OR_ZERO(crtc_state);
8518 /* force a restore */
8519 crtc_state->mode_changed = true;
8521 /* Attach plane to drm_atomic_state */
8522 plane_state = drm_atomic_get_plane_state(state, plane);
8524 ret = PTR_ERR_OR_ZERO(plane_state);
8529 /* Call commit internally with the state we just constructed */
8530 ret = drm_atomic_commit(state);
8535 DRM_ERROR("Restoring old state failed with %i\n", ret);
8536 drm_atomic_state_put(state);
8542 * This function handles all cases when set mode does not come upon hotplug.
8543 * This includes when a display is unplugged then plugged back into the
8544 * same port and when running without usermode desktop manager supprot
8546 void dm_restore_drm_connector_state(struct drm_device *dev,
8547 struct drm_connector *connector)
8549 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8550 struct amdgpu_crtc *disconnected_acrtc;
8551 struct dm_crtc_state *acrtc_state;
8553 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8556 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8557 if (!disconnected_acrtc)
8560 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8561 if (!acrtc_state->stream)
8565 * If the previous sink is not released and different from the current,
8566 * we deduce we are in a state where we can not rely on usermode call
8567 * to turn on the display, so we do it here
8569 if (acrtc_state->stream->sink != aconnector->dc_sink)
8570 dm_force_atomic_commit(&aconnector->base);
8574 * Grabs all modesetting locks to serialize against any blocking commits,
8575 * Waits for completion of all non blocking commits.
8577 static int do_aquire_global_lock(struct drm_device *dev,
8578 struct drm_atomic_state *state)
8580 struct drm_crtc *crtc;
8581 struct drm_crtc_commit *commit;
8585 * Adding all modeset locks to aquire_ctx will
8586 * ensure that when the framework release it the
8587 * extra locks we are locking here will get released to
8589 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8593 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8594 spin_lock(&crtc->commit_lock);
8595 commit = list_first_entry_or_null(&crtc->commit_list,
8596 struct drm_crtc_commit, commit_entry);
8598 drm_crtc_commit_get(commit);
8599 spin_unlock(&crtc->commit_lock);
8605 * Make sure all pending HW programming completed and
8608 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8611 ret = wait_for_completion_interruptible_timeout(
8612 &commit->flip_done, 10*HZ);
8615 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8616 "timed out\n", crtc->base.id, crtc->name);
8618 drm_crtc_commit_put(commit);
8621 return ret < 0 ? ret : 0;
8624 static void get_freesync_config_for_crtc(
8625 struct dm_crtc_state *new_crtc_state,
8626 struct dm_connector_state *new_con_state)
8628 struct mod_freesync_config config = {0};
8629 struct amdgpu_dm_connector *aconnector =
8630 to_amdgpu_dm_connector(new_con_state->base.connector);
8631 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8632 int vrefresh = drm_mode_vrefresh(mode);
8634 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8635 vrefresh >= aconnector->min_vfreq &&
8636 vrefresh <= aconnector->max_vfreq;
8638 if (new_crtc_state->vrr_supported) {
8639 new_crtc_state->stream->ignore_msa_timing_param = true;
8640 config.state = new_crtc_state->base.vrr_enabled ?
8641 VRR_STATE_ACTIVE_VARIABLE :
8643 config.min_refresh_in_uhz =
8644 aconnector->min_vfreq * 1000000;
8645 config.max_refresh_in_uhz =
8646 aconnector->max_vfreq * 1000000;
8647 config.vsif_supported = true;
8651 new_crtc_state->freesync_config = config;
8654 static void reset_freesync_config_for_crtc(
8655 struct dm_crtc_state *new_crtc_state)
8657 new_crtc_state->vrr_supported = false;
8659 memset(&new_crtc_state->vrr_infopacket, 0,
8660 sizeof(new_crtc_state->vrr_infopacket));
8663 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8664 struct drm_atomic_state *state,
8665 struct drm_crtc *crtc,
8666 struct drm_crtc_state *old_crtc_state,
8667 struct drm_crtc_state *new_crtc_state,
8669 bool *lock_and_validation_needed)
8671 struct dm_atomic_state *dm_state = NULL;
8672 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8673 struct dc_stream_state *new_stream;
8677 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8678 * update changed items
8680 struct amdgpu_crtc *acrtc = NULL;
8681 struct amdgpu_dm_connector *aconnector = NULL;
8682 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8683 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8687 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8688 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8689 acrtc = to_amdgpu_crtc(crtc);
8690 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8692 /* TODO This hack should go away */
8693 if (aconnector && enable) {
8694 /* Make sure fake sink is created in plug-in scenario */
8695 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8697 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8700 if (IS_ERR(drm_new_conn_state)) {
8701 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8705 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8706 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8708 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8711 new_stream = create_validate_stream_for_sink(aconnector,
8712 &new_crtc_state->mode,
8714 dm_old_crtc_state->stream);
8717 * we can have no stream on ACTION_SET if a display
8718 * was disconnected during S3, in this case it is not an
8719 * error, the OS will be updated after detection, and
8720 * will do the right thing on next atomic commit
8724 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8725 __func__, acrtc->base.base.id);
8731 * TODO: Check VSDB bits to decide whether this should
8732 * be enabled or not.
8734 new_stream->triggered_crtc_reset.enabled =
8735 dm->force_timing_sync;
8737 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8739 ret = fill_hdr_info_packet(drm_new_conn_state,
8740 &new_stream->hdr_static_metadata);
8745 * If we already removed the old stream from the context
8746 * (and set the new stream to NULL) then we can't reuse
8747 * the old stream even if the stream and scaling are unchanged.
8748 * We'll hit the BUG_ON and black screen.
8750 * TODO: Refactor this function to allow this check to work
8751 * in all conditions.
8753 if (dm_new_crtc_state->stream &&
8754 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8755 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8756 new_crtc_state->mode_changed = false;
8757 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8758 new_crtc_state->mode_changed);
8762 /* mode_changed flag may get updated above, need to check again */
8763 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8767 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8768 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8769 "connectors_changed:%d\n",
8771 new_crtc_state->enable,
8772 new_crtc_state->active,
8773 new_crtc_state->planes_changed,
8774 new_crtc_state->mode_changed,
8775 new_crtc_state->active_changed,
8776 new_crtc_state->connectors_changed);
8778 /* Remove stream for any changed/disabled CRTC */
8781 if (!dm_old_crtc_state->stream)
8784 ret = dm_atomic_get_state(state, &dm_state);
8788 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8791 /* i.e. reset mode */
8792 if (dc_remove_stream_from_ctx(
8795 dm_old_crtc_state->stream) != DC_OK) {
8800 dc_stream_release(dm_old_crtc_state->stream);
8801 dm_new_crtc_state->stream = NULL;
8803 reset_freesync_config_for_crtc(dm_new_crtc_state);
8805 *lock_and_validation_needed = true;
8807 } else {/* Add stream for any updated/enabled CRTC */
8809 * Quick fix to prevent NULL pointer on new_stream when
8810 * added MST connectors not found in existing crtc_state in the chained mode
8811 * TODO: need to dig out the root cause of that
8813 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8816 if (modereset_required(new_crtc_state))
8819 if (modeset_required(new_crtc_state, new_stream,
8820 dm_old_crtc_state->stream)) {
8822 WARN_ON(dm_new_crtc_state->stream);
8824 ret = dm_atomic_get_state(state, &dm_state);
8828 dm_new_crtc_state->stream = new_stream;
8830 dc_stream_retain(new_stream);
8832 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8835 if (dc_add_stream_to_ctx(
8838 dm_new_crtc_state->stream) != DC_OK) {
8843 *lock_and_validation_needed = true;
8848 /* Release extra reference */
8850 dc_stream_release(new_stream);
8853 * We want to do dc stream updates that do not require a
8854 * full modeset below.
8856 if (!(enable && aconnector && new_crtc_state->active))
8859 * Given above conditions, the dc state cannot be NULL because:
8860 * 1. We're in the process of enabling CRTCs (just been added
8861 * to the dc context, or already is on the context)
8862 * 2. Has a valid connector attached, and
8863 * 3. Is currently active and enabled.
8864 * => The dc stream state currently exists.
8866 BUG_ON(dm_new_crtc_state->stream == NULL);
8868 /* Scaling or underscan settings */
8869 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8870 update_stream_scaling_settings(
8871 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8874 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8877 * Color management settings. We also update color properties
8878 * when a modeset is needed, to ensure it gets reprogrammed.
8880 if (dm_new_crtc_state->base.color_mgmt_changed ||
8881 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8882 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8887 /* Update Freesync settings. */
8888 get_freesync_config_for_crtc(dm_new_crtc_state,
8895 dc_stream_release(new_stream);
8899 static bool should_reset_plane(struct drm_atomic_state *state,
8900 struct drm_plane *plane,
8901 struct drm_plane_state *old_plane_state,
8902 struct drm_plane_state *new_plane_state)
8904 struct drm_plane *other;
8905 struct drm_plane_state *old_other_state, *new_other_state;
8906 struct drm_crtc_state *new_crtc_state;
8910 * TODO: Remove this hack once the checks below are sufficient
8911 * enough to determine when we need to reset all the planes on
8914 if (state->allow_modeset)
8917 /* Exit early if we know that we're adding or removing the plane. */
8918 if (old_plane_state->crtc != new_plane_state->crtc)
8921 /* old crtc == new_crtc == NULL, plane not in context. */
8922 if (!new_plane_state->crtc)
8926 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8928 if (!new_crtc_state)
8931 /* CRTC Degamma changes currently require us to recreate planes. */
8932 if (new_crtc_state->color_mgmt_changed)
8935 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8939 * If there are any new primary or overlay planes being added or
8940 * removed then the z-order can potentially change. To ensure
8941 * correct z-order and pipe acquisition the current DC architecture
8942 * requires us to remove and recreate all existing planes.
8944 * TODO: Come up with a more elegant solution for this.
8946 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8947 struct amdgpu_framebuffer *old_afb, *new_afb;
8948 if (other->type == DRM_PLANE_TYPE_CURSOR)
8951 if (old_other_state->crtc != new_plane_state->crtc &&
8952 new_other_state->crtc != new_plane_state->crtc)
8955 if (old_other_state->crtc != new_other_state->crtc)
8958 /* Src/dst size and scaling updates. */
8959 if (old_other_state->src_w != new_other_state->src_w ||
8960 old_other_state->src_h != new_other_state->src_h ||
8961 old_other_state->crtc_w != new_other_state->crtc_w ||
8962 old_other_state->crtc_h != new_other_state->crtc_h)
8965 /* Rotation / mirroring updates. */
8966 if (old_other_state->rotation != new_other_state->rotation)
8969 /* Blending updates. */
8970 if (old_other_state->pixel_blend_mode !=
8971 new_other_state->pixel_blend_mode)
8974 /* Alpha updates. */
8975 if (old_other_state->alpha != new_other_state->alpha)
8978 /* Colorspace changes. */
8979 if (old_other_state->color_range != new_other_state->color_range ||
8980 old_other_state->color_encoding != new_other_state->color_encoding)
8983 /* Framebuffer checks fall at the end. */
8984 if (!old_other_state->fb || !new_other_state->fb)
8987 /* Pixel format changes can require bandwidth updates. */
8988 if (old_other_state->fb->format != new_other_state->fb->format)
8991 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8992 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8994 /* Tiling and DCC changes also require bandwidth updates. */
8995 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8996 old_afb->base.modifier != new_afb->base.modifier)
9003 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9004 struct drm_plane_state *new_plane_state,
9005 struct drm_framebuffer *fb)
9007 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9008 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9012 if (fb->width > new_acrtc->max_cursor_width ||
9013 fb->height > new_acrtc->max_cursor_height) {
9014 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9015 new_plane_state->fb->width,
9016 new_plane_state->fb->height);
9019 if (new_plane_state->src_w != fb->width << 16 ||
9020 new_plane_state->src_h != fb->height << 16) {
9021 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9025 /* Pitch in pixels */
9026 pitch = fb->pitches[0] / fb->format->cpp[0];
9028 if (fb->width != pitch) {
9029 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9038 /* FB pitch is supported by cursor plane */
9041 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9045 /* Core DRM takes care of checking FB modifiers, so we only need to
9046 * check tiling flags when the FB doesn't have a modifier. */
9047 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9048 if (adev->family < AMDGPU_FAMILY_AI) {
9049 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9050 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9051 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9053 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9056 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9064 static int dm_update_plane_state(struct dc *dc,
9065 struct drm_atomic_state *state,
9066 struct drm_plane *plane,
9067 struct drm_plane_state *old_plane_state,
9068 struct drm_plane_state *new_plane_state,
9070 bool *lock_and_validation_needed)
9073 struct dm_atomic_state *dm_state = NULL;
9074 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9075 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9076 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9077 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9078 struct amdgpu_crtc *new_acrtc;
9083 new_plane_crtc = new_plane_state->crtc;
9084 old_plane_crtc = old_plane_state->crtc;
9085 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9086 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9088 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9089 if (!enable || !new_plane_crtc ||
9090 drm_atomic_plane_disabling(plane->state, new_plane_state))
9093 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9095 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9096 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9100 if (new_plane_state->fb) {
9101 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9102 new_plane_state->fb);
9110 needs_reset = should_reset_plane(state, plane, old_plane_state,
9113 /* Remove any changed/removed planes */
9118 if (!old_plane_crtc)
9121 old_crtc_state = drm_atomic_get_old_crtc_state(
9122 state, old_plane_crtc);
9123 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9125 if (!dm_old_crtc_state->stream)
9128 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9129 plane->base.id, old_plane_crtc->base.id);
9131 ret = dm_atomic_get_state(state, &dm_state);
9135 if (!dc_remove_plane_from_context(
9137 dm_old_crtc_state->stream,
9138 dm_old_plane_state->dc_state,
9139 dm_state->context)) {
9145 dc_plane_state_release(dm_old_plane_state->dc_state);
9146 dm_new_plane_state->dc_state = NULL;
9148 *lock_and_validation_needed = true;
9150 } else { /* Add new planes */
9151 struct dc_plane_state *dc_new_plane_state;
9153 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9156 if (!new_plane_crtc)
9159 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9160 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9162 if (!dm_new_crtc_state->stream)
9168 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9172 WARN_ON(dm_new_plane_state->dc_state);
9174 dc_new_plane_state = dc_create_plane_state(dc);
9175 if (!dc_new_plane_state)
9178 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9179 plane->base.id, new_plane_crtc->base.id);
9181 ret = fill_dc_plane_attributes(
9182 drm_to_adev(new_plane_crtc->dev),
9187 dc_plane_state_release(dc_new_plane_state);
9191 ret = dm_atomic_get_state(state, &dm_state);
9193 dc_plane_state_release(dc_new_plane_state);
9198 * Any atomic check errors that occur after this will
9199 * not need a release. The plane state will be attached
9200 * to the stream, and therefore part of the atomic
9201 * state. It'll be released when the atomic state is
9204 if (!dc_add_plane_to_context(
9206 dm_new_crtc_state->stream,
9208 dm_state->context)) {
9210 dc_plane_state_release(dc_new_plane_state);
9214 dm_new_plane_state->dc_state = dc_new_plane_state;
9216 /* Tell DC to do a full surface update every time there
9217 * is a plane change. Inefficient, but works for now.
9219 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9221 *lock_and_validation_needed = true;
9228 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9229 struct drm_crtc *crtc,
9230 struct drm_crtc_state *new_crtc_state)
9232 struct drm_plane_state *new_cursor_state, *new_primary_state;
9233 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9235 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9236 * cursor per pipe but it's going to inherit the scaling and
9237 * positioning from the underlying pipe. Check the cursor plane's
9238 * blending properties match the primary plane's. */
9240 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9241 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9242 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9246 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9247 (new_cursor_state->src_w >> 16);
9248 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9249 (new_cursor_state->src_h >> 16);
9251 primary_scale_w = new_primary_state->crtc_w * 1000 /
9252 (new_primary_state->src_w >> 16);
9253 primary_scale_h = new_primary_state->crtc_h * 1000 /
9254 (new_primary_state->src_h >> 16);
9256 if (cursor_scale_w != primary_scale_w ||
9257 cursor_scale_h != primary_scale_h) {
9258 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9265 #if defined(CONFIG_DRM_AMD_DC_DCN)
9266 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9268 struct drm_connector *connector;
9269 struct drm_connector_state *conn_state;
9270 struct amdgpu_dm_connector *aconnector = NULL;
9272 for_each_new_connector_in_state(state, connector, conn_state, i) {
9273 if (conn_state->crtc != crtc)
9276 aconnector = to_amdgpu_dm_connector(connector);
9277 if (!aconnector->port || !aconnector->mst_port)
9286 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9291 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9292 * @dev: The DRM device
9293 * @state: The atomic state to commit
9295 * Validate that the given atomic state is programmable by DC into hardware.
9296 * This involves constructing a &struct dc_state reflecting the new hardware
9297 * state we wish to commit, then querying DC to see if it is programmable. It's
9298 * important not to modify the existing DC state. Otherwise, atomic_check
9299 * may unexpectedly commit hardware changes.
9301 * When validating the DC state, it's important that the right locks are
9302 * acquired. For full updates case which removes/adds/updates streams on one
9303 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9304 * that any such full update commit will wait for completion of any outstanding
9305 * flip using DRMs synchronization events.
9307 * Note that DM adds the affected connectors for all CRTCs in state, when that
9308 * might not seem necessary. This is because DC stream creation requires the
9309 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9310 * be possible but non-trivial - a possible TODO item.
9312 * Return: -Error code if validation failed.
9314 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9315 struct drm_atomic_state *state)
9317 struct amdgpu_device *adev = drm_to_adev(dev);
9318 struct dm_atomic_state *dm_state = NULL;
9319 struct dc *dc = adev->dm.dc;
9320 struct drm_connector *connector;
9321 struct drm_connector_state *old_con_state, *new_con_state;
9322 struct drm_crtc *crtc;
9323 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9324 struct drm_plane *plane;
9325 struct drm_plane_state *old_plane_state, *new_plane_state;
9326 enum dc_status status;
9328 bool lock_and_validation_needed = false;
9329 struct dm_crtc_state *dm_old_crtc_state;
9331 trace_amdgpu_dm_atomic_check_begin(state);
9333 ret = drm_atomic_helper_check_modeset(dev, state);
9337 /* Check connector changes */
9338 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9339 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9340 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9342 /* Skip connectors that are disabled or part of modeset already. */
9343 if (!old_con_state->crtc && !new_con_state->crtc)
9346 if (!new_con_state->crtc)
9349 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9350 if (IS_ERR(new_crtc_state)) {
9351 ret = PTR_ERR(new_crtc_state);
9355 if (dm_old_con_state->abm_level !=
9356 dm_new_con_state->abm_level)
9357 new_crtc_state->connectors_changed = true;
9360 #if defined(CONFIG_DRM_AMD_DC_DCN)
9361 if (adev->asic_type >= CHIP_NAVI10) {
9362 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9363 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9364 ret = add_affected_mst_dsc_crtcs(state, crtc);
9371 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9372 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9374 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9375 !new_crtc_state->color_mgmt_changed &&
9376 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9377 dm_old_crtc_state->dsc_force_changed == false)
9380 if (!new_crtc_state->enable)
9383 ret = drm_atomic_add_affected_connectors(state, crtc);
9387 ret = drm_atomic_add_affected_planes(state, crtc);
9391 if (dm_old_crtc_state->dsc_force_changed && new_crtc_state)
9392 new_crtc_state->mode_changed = true;
9396 * Add all primary and overlay planes on the CRTC to the state
9397 * whenever a plane is enabled to maintain correct z-ordering
9398 * and to enable fast surface updates.
9400 drm_for_each_crtc(crtc, dev) {
9401 bool modified = false;
9403 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9404 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9407 if (new_plane_state->crtc == crtc ||
9408 old_plane_state->crtc == crtc) {
9417 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9418 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9422 drm_atomic_get_plane_state(state, plane);
9424 if (IS_ERR(new_plane_state)) {
9425 ret = PTR_ERR(new_plane_state);
9431 /* Remove exiting planes if they are modified */
9432 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9433 ret = dm_update_plane_state(dc, state, plane,
9437 &lock_and_validation_needed);
9442 /* Disable all crtcs which require disable */
9443 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9444 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9448 &lock_and_validation_needed);
9453 /* Enable all crtcs which require enable */
9454 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9455 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9459 &lock_and_validation_needed);
9464 /* Add new/modified planes */
9465 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9466 ret = dm_update_plane_state(dc, state, plane,
9470 &lock_and_validation_needed);
9475 /* Run this here since we want to validate the streams we created */
9476 ret = drm_atomic_helper_check_planes(dev, state);
9480 /* Check cursor planes scaling */
9481 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9482 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9487 if (state->legacy_cursor_update) {
9489 * This is a fast cursor update coming from the plane update
9490 * helper, check if it can be done asynchronously for better
9493 state->async_update =
9494 !drm_atomic_helper_async_check(dev, state);
9497 * Skip the remaining global validation if this is an async
9498 * update. Cursor updates can be done without affecting
9499 * state or bandwidth calcs and this avoids the performance
9500 * penalty of locking the private state object and
9501 * allocating a new dc_state.
9503 if (state->async_update)
9507 /* Check scaling and underscan changes*/
9508 /* TODO Removed scaling changes validation due to inability to commit
9509 * new stream into context w\o causing full reset. Need to
9510 * decide how to handle.
9512 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9513 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9514 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9515 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9517 /* Skip any modesets/resets */
9518 if (!acrtc || drm_atomic_crtc_needs_modeset(
9519 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9522 /* Skip any thing not scale or underscan changes */
9523 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9526 lock_and_validation_needed = true;
9530 * Streams and planes are reset when there are changes that affect
9531 * bandwidth. Anything that affects bandwidth needs to go through
9532 * DC global validation to ensure that the configuration can be applied
9535 * We have to currently stall out here in atomic_check for outstanding
9536 * commits to finish in this case because our IRQ handlers reference
9537 * DRM state directly - we can end up disabling interrupts too early
9540 * TODO: Remove this stall and drop DM state private objects.
9542 if (lock_and_validation_needed) {
9543 ret = dm_atomic_get_state(state, &dm_state);
9547 ret = do_aquire_global_lock(dev, state);
9551 #if defined(CONFIG_DRM_AMD_DC_DCN)
9552 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9555 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9561 * Perform validation of MST topology in the state:
9562 * We need to perform MST atomic check before calling
9563 * dc_validate_global_state(), or there is a chance
9564 * to get stuck in an infinite loop and hang eventually.
9566 ret = drm_dp_mst_atomic_check(state);
9569 status = dc_validate_global_state(dc, dm_state->context, false);
9570 if (status != DC_OK) {
9571 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9572 dc_status_to_str(status), status);
9578 * The commit is a fast update. Fast updates shouldn't change
9579 * the DC context, affect global validation, and can have their
9580 * commit work done in parallel with other commits not touching
9581 * the same resource. If we have a new DC context as part of
9582 * the DM atomic state from validation we need to free it and
9583 * retain the existing one instead.
9585 * Furthermore, since the DM atomic state only contains the DC
9586 * context and can safely be annulled, we can free the state
9587 * and clear the associated private object now to free
9588 * some memory and avoid a possible use-after-free later.
9591 for (i = 0; i < state->num_private_objs; i++) {
9592 struct drm_private_obj *obj = state->private_objs[i].ptr;
9594 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9595 int j = state->num_private_objs-1;
9597 dm_atomic_destroy_state(obj,
9598 state->private_objs[i].state);
9600 /* If i is not at the end of the array then the
9601 * last element needs to be moved to where i was
9602 * before the array can safely be truncated.
9605 state->private_objs[i] =
9606 state->private_objs[j];
9608 state->private_objs[j].ptr = NULL;
9609 state->private_objs[j].state = NULL;
9610 state->private_objs[j].old_state = NULL;
9611 state->private_objs[j].new_state = NULL;
9613 state->num_private_objs = j;
9619 /* Store the overall update type for use later in atomic check. */
9620 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9621 struct dm_crtc_state *dm_new_crtc_state =
9622 to_dm_crtc_state(new_crtc_state);
9624 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9629 /* Must be success */
9632 trace_amdgpu_dm_atomic_check_finish(state, ret);
9637 if (ret == -EDEADLK)
9638 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9639 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9640 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9642 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9644 trace_amdgpu_dm_atomic_check_finish(state, ret);
9649 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9650 struct amdgpu_dm_connector *amdgpu_dm_connector)
9653 bool capable = false;
9655 if (amdgpu_dm_connector->dc_link &&
9656 dm_helpers_dp_read_dpcd(
9658 amdgpu_dm_connector->dc_link,
9659 DP_DOWN_STREAM_PORT_COUNT,
9661 sizeof(dpcd_data))) {
9662 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9667 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9671 bool edid_check_required;
9672 struct detailed_timing *timing;
9673 struct detailed_non_pixel *data;
9674 struct detailed_data_monitor_range *range;
9675 struct amdgpu_dm_connector *amdgpu_dm_connector =
9676 to_amdgpu_dm_connector(connector);
9677 struct dm_connector_state *dm_con_state = NULL;
9679 struct drm_device *dev = connector->dev;
9680 struct amdgpu_device *adev = drm_to_adev(dev);
9681 bool freesync_capable = false;
9683 if (!connector->state) {
9684 DRM_ERROR("%s - Connector has no state", __func__);
9689 dm_con_state = to_dm_connector_state(connector->state);
9691 amdgpu_dm_connector->min_vfreq = 0;
9692 amdgpu_dm_connector->max_vfreq = 0;
9693 amdgpu_dm_connector->pixel_clock_mhz = 0;
9698 dm_con_state = to_dm_connector_state(connector->state);
9700 edid_check_required = false;
9701 if (!amdgpu_dm_connector->dc_sink) {
9702 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9705 if (!adev->dm.freesync_module)
9708 * if edid non zero restrict freesync only for dp and edp
9711 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9712 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9713 edid_check_required = is_dp_capable_without_timing_msa(
9715 amdgpu_dm_connector);
9718 if (edid_check_required == true && (edid->version > 1 ||
9719 (edid->version == 1 && edid->revision > 1))) {
9720 for (i = 0; i < 4; i++) {
9722 timing = &edid->detailed_timings[i];
9723 data = &timing->data.other_data;
9724 range = &data->data.range;
9726 * Check if monitor has continuous frequency mode
9728 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9731 * Check for flag range limits only. If flag == 1 then
9732 * no additional timing information provided.
9733 * Default GTF, GTF Secondary curve and CVT are not
9736 if (range->flags != 1)
9739 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9740 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9741 amdgpu_dm_connector->pixel_clock_mhz =
9742 range->pixel_clock_mhz * 10;
9746 if (amdgpu_dm_connector->max_vfreq -
9747 amdgpu_dm_connector->min_vfreq > 10) {
9749 freesync_capable = true;
9755 dm_con_state->freesync_capable = freesync_capable;
9757 if (connector->vrr_capable_property)
9758 drm_connector_set_vrr_capable_property(connector,
9762 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9764 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9766 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9768 if (link->type == dc_connection_none)
9770 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9771 dpcd_data, sizeof(dpcd_data))) {
9772 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9774 if (dpcd_data[0] == 0) {
9775 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9776 link->psr_settings.psr_feature_enabled = false;
9778 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9779 link->psr_settings.psr_feature_enabled = true;
9782 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9787 * amdgpu_dm_link_setup_psr() - configure psr link
9788 * @stream: stream state
9790 * Return: true if success
9792 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9794 struct dc_link *link = NULL;
9795 struct psr_config psr_config = {0};
9796 struct psr_context psr_context = {0};
9802 link = stream->link;
9804 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9806 if (psr_config.psr_version > 0) {
9807 psr_config.psr_exit_link_training_required = 0x1;
9808 psr_config.psr_frame_capture_indication_req = 0;
9809 psr_config.psr_rfb_setup_time = 0x37;
9810 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9811 psr_config.allow_smu_optimizations = 0x0;
9813 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9816 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9822 * amdgpu_dm_psr_enable() - enable psr f/w
9823 * @stream: stream state
9825 * Return: true if success
9827 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9829 struct dc_link *link = stream->link;
9830 unsigned int vsync_rate_hz = 0;
9831 struct dc_static_screen_params params = {0};
9832 /* Calculate number of static frames before generating interrupt to
9835 // Init fail safe of 2 frames static
9836 unsigned int num_frames_static = 2;
9838 DRM_DEBUG_DRIVER("Enabling psr...\n");
9840 vsync_rate_hz = div64_u64(div64_u64((
9841 stream->timing.pix_clk_100hz * 100),
9842 stream->timing.v_total),
9843 stream->timing.h_total);
9846 * Calculate number of frames such that at least 30 ms of time has
9849 if (vsync_rate_hz != 0) {
9850 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9851 num_frames_static = (30000 / frame_time_microsec) + 1;
9854 params.triggers.cursor_update = true;
9855 params.triggers.overlay_update = true;
9856 params.triggers.surface_update = true;
9857 params.num_frames = num_frames_static;
9859 dc_stream_set_static_screen_params(link->ctx->dc,
9863 return dc_link_set_psr_allow_active(link, true, false, false);
9867 * amdgpu_dm_psr_disable() - disable psr f/w
9868 * @stream: stream state
9870 * Return: true if success
9872 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9875 DRM_DEBUG_DRIVER("Disabling psr...\n");
9877 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9881 * amdgpu_dm_psr_disable() - disable psr f/w
9882 * if psr is enabled on any stream
9884 * Return: true if success
9886 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9888 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9889 return dc_set_psr_allow_active(dm->dc, false);
9892 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9894 struct amdgpu_device *adev = drm_to_adev(dev);
9895 struct dc *dc = adev->dm.dc;
9898 mutex_lock(&adev->dm.dc_lock);
9899 if (dc->current_state) {
9900 for (i = 0; i < dc->current_state->stream_count; ++i)
9901 dc->current_state->streams[i]
9902 ->triggered_crtc_reset.enabled =
9903 adev->dm.force_timing_sync;
9905 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9906 dc_trigger_sync(dc, dc->current_state);
9908 mutex_unlock(&adev->dm.dc_lock);
9911 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9912 uint32_t value, const char *func_name)
9914 #ifdef DM_CHECK_ADDR_0
9916 DC_ERR("invalid register write. address = 0");
9920 cgs_write_register(ctx->cgs_device, address, value);
9921 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9924 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9925 const char *func_name)
9928 #ifdef DM_CHECK_ADDR_0
9930 DC_ERR("invalid register read; address = 0\n");
9935 if (ctx->dmub_srv &&
9936 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9937 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9942 value = cgs_read_register(ctx->cgs_device, address);
9944 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);